Merge branch 'master' into patch-2
This commit is contained in:
commit
305c6fc029
|
@ -0,0 +1,205 @@
|
|||
version: 2.1
|
||||
|
||||
orbs:
|
||||
slack: circleci/slack@3.4.2
|
||||
|
||||
executors:
|
||||
go:
|
||||
docker:
|
||||
- image: circleci/golang:1.15
|
||||
environment:
|
||||
CONSUL_VERSION: 1.7.2
|
||||
GOMAXPROCS: 4
|
||||
GO111MODULE: "on"
|
||||
GOPROXY: https://proxy.golang.org/
|
||||
TEST_RESULTS_DIR: &TEST_RESULTS_DIR /tmp/test-results
|
||||
ARTIFACTS_DIR: &ARTIFACTS_DIR /tmp/artifacts
|
||||
|
||||
jobs:
|
||||
go-checks:
|
||||
executor:
|
||||
name: go
|
||||
steps:
|
||||
- checkout
|
||||
- run: go mod verify
|
||||
- run: make fmtcheck generate
|
||||
- run:
|
||||
name: verify no code was generated
|
||||
command: |
|
||||
if [[ -z $(git status --porcelain) ]]; then
|
||||
echo "Git directory is clean."
|
||||
else
|
||||
echo "Git is dirty. Run `make fmtcheck` and `make generate` locally and commit any formatting fixes or generated code."
|
||||
git status --porcelain
|
||||
exit 1
|
||||
fi
|
||||
|
||||
go-test:
|
||||
executor:
|
||||
name: go
|
||||
environment:
|
||||
TF_CONSUL_TEST: 1
|
||||
parallelism: 4
|
||||
steps:
|
||||
- checkout
|
||||
- attach_workspace:
|
||||
at: .
|
||||
- run:
|
||||
name: install consul
|
||||
command: |
|
||||
curl -sLo consul.zip https://releases.hashicorp.com/consul/${CONSUL_VERSION}/consul_${CONSUL_VERSION}_linux_amd64.zip
|
||||
unzip consul.zip
|
||||
mkdir -p ~/bin
|
||||
mv consul ~/bin
|
||||
echo 'export PATH="~/bin:$PATH"'
|
||||
- run: mkdir -p $TEST_RESULTS_DIR
|
||||
- run:
|
||||
name: Run Go Tests
|
||||
command: |
|
||||
PACKAGE_NAMES=$(go list ./... | circleci tests split --split-by=timings --timings-type=classname)
|
||||
echo "Running $(echo $PACKAGE_NAMES | wc -w) packages"
|
||||
echo $PACKAGE_NAMES
|
||||
gotestsum --format=short-verbose --junitfile $TEST_RESULTS_DIR/gotestsum-report.xml -- -p 2 -cover -coverprofile=cov_$CIRCLE_NODE_INDEX.part $PACKAGE_NAMES
|
||||
|
||||
# save coverage report parts
|
||||
- persist_to_workspace:
|
||||
root: .
|
||||
paths:
|
||||
- cov_*.part
|
||||
|
||||
- store_test_results:
|
||||
path: *TEST_RESULTS_DIR
|
||||
- store_artifacts:
|
||||
path: *TEST_RESULTS_DIR
|
||||
|
||||
- slack/status:
|
||||
fail_only: true
|
||||
only_for_branches: master
|
||||
|
||||
go-test-e2e:
|
||||
executor:
|
||||
name: go
|
||||
environment:
|
||||
TF_ACC: 1
|
||||
steps:
|
||||
- checkout
|
||||
- attach_workspace:
|
||||
at: .
|
||||
- run: mkdir -p $TEST_RESULTS_DIR
|
||||
- run:
|
||||
name: Run Go E2E Tests
|
||||
command: |
|
||||
gotestsum --format=short-verbose --junitfile $TEST_RESULTS_DIR/gotestsum-report.xml -- -p 2 -cover -coverprofile=cov_e2e.part ./command/e2etest ./tools/terraform-bundle/e2etest
|
||||
|
||||
# save coverage report parts
|
||||
- persist_to_workspace:
|
||||
root: .
|
||||
paths:
|
||||
- cov_*.part
|
||||
|
||||
- store_test_results:
|
||||
path: *TEST_RESULTS_DIR
|
||||
- store_artifacts:
|
||||
path: *TEST_RESULTS_DIR
|
||||
|
||||
- slack/status:
|
||||
fail_only: true
|
||||
only_for_branches: master
|
||||
|
||||
# combine code coverage results from the parallel circleci executors
|
||||
coverage-merge:
|
||||
executor:
|
||||
name: go
|
||||
steps:
|
||||
- checkout
|
||||
- attach_workspace:
|
||||
at: .
|
||||
- run: mkdir -p $TEST_RESULTS_DIR
|
||||
- run:
|
||||
name: merge coverage reports
|
||||
command: |
|
||||
echo "mode: set" > coverage.out
|
||||
grep -h -v "mode: set" cov_*.part >> coverage.out
|
||||
go tool cover -html=coverage.out -o $TEST_RESULTS_DIR/coverage.html
|
||||
- run:
|
||||
name: codecov upload
|
||||
command: bash <(curl -s https://codecov.io/bash) -v -C $CIRCLE_SHA1
|
||||
- store_artifacts:
|
||||
path: *TEST_RESULTS_DIR
|
||||
|
||||
# build all distros
|
||||
build-distros: &build-distros
|
||||
executor: go
|
||||
environment: &build-env
|
||||
TF_RELEASE: 1
|
||||
steps:
|
||||
- run: go get -u github.com/mitchellh/gox # go get gox before detecting go mod
|
||||
- checkout
|
||||
- run: ./scripts/build.sh
|
||||
- run: mkdir -p $ARTIFACTS_DIR
|
||||
- run: cp pkg/*.zip /tmp/artifacts
|
||||
# save dev build to CircleCI
|
||||
- store_artifacts:
|
||||
path: *ARTIFACTS_DIR
|
||||
|
||||
# build all 386 architecture supported OS binaries
|
||||
build-386:
|
||||
<<: *build-distros
|
||||
environment:
|
||||
<<: *build-env
|
||||
XC_OS: "freebsd linux openbsd windows"
|
||||
XC_ARCH: "386"
|
||||
|
||||
# build all amd64 architecture supported OS binaries
|
||||
build-amd64:
|
||||
<<: *build-distros
|
||||
environment:
|
||||
<<: *build-env
|
||||
XC_OS: "darwin freebsd linux solaris windows"
|
||||
XC_ARCH: "amd64"
|
||||
|
||||
# build all arm architecture supported OS binaries
|
||||
build-arm:
|
||||
<<: *build-distros
|
||||
environment:
|
||||
<<: *build-env
|
||||
XC_OS: "freebsd linux"
|
||||
XC_ARCH: "arm"
|
||||
|
||||
test-docker-full:
|
||||
executor:
|
||||
name: go
|
||||
steps:
|
||||
- checkout
|
||||
- setup_remote_docker
|
||||
- run:
|
||||
name: test docker build for 'full' image
|
||||
command: docker build -t test-docker-full .
|
||||
|
||||
workflows:
|
||||
version: 2
|
||||
test:
|
||||
jobs:
|
||||
- go-checks
|
||||
- go-test:
|
||||
requires:
|
||||
- go-checks
|
||||
- go-test-e2e:
|
||||
requires:
|
||||
- go-checks
|
||||
- coverage-merge:
|
||||
requires:
|
||||
- go-test
|
||||
- go-test-e2e
|
||||
- test-docker-full:
|
||||
filters:
|
||||
branches:
|
||||
only:
|
||||
- master
|
||||
- /^v\d+\.\d+$/ # v0.11, v0.12, etc.
|
||||
|
||||
build-distros:
|
||||
jobs:
|
||||
- build-386
|
||||
- build-amd64
|
||||
- build-arm
|
|
@ -1,524 +1,235 @@
|
|||
# Contributing to Terraform
|
||||
|
||||
**First:** if you're unsure or afraid of _anything_, just ask
|
||||
or submit the issue or pull request anyways. You won't be yelled at for
|
||||
giving your best effort. The worst that can happen is that you'll be
|
||||
politely asked to change something. We appreciate any sort of contributions,
|
||||
and don't want a wall of rules to get in the way of that.
|
||||
This repository contains only Terraform core, which includes the command line interface and the main graph engine. Providers are implemented as plugins that each have their own repository in [the `terraform-providers` organization](https://github.com/terraform-providers) on GitHub. Instructions for developing each provider are in the associated README file. For more information, see [the provider development overview](https://www.terraform.io/docs/plugins/provider.html).
|
||||
|
||||
However, for those individuals who want a bit more guidance on the
|
||||
best way to contribute to the project, read on. This document will cover
|
||||
what we're looking for. By addressing all the points we're looking for,
|
||||
it raises the chances we can quickly merge or address your contributions.
|
||||
---
|
||||
|
||||
Specifically, we have provided checklists below for each type of issue and pull
|
||||
request that can happen on the project. These checklists represent everything
|
||||
we need to be able to review and respond quickly.
|
||||
**All communication on GitHub, the community forum, and other HashiCorp-provided communication channels is subject to [the HashiCorp community guidelines](https://www.hashicorp.com/community-guidelines).**
|
||||
|
||||
## HashiCorp vs. Community Providers
|
||||
This document provides guidance on Terraform contribution recommended practices. It covers what we're looking for in order to help set some expectations and help you get the most out of participation in this project.
|
||||
|
||||
We separate providers out into what we call "HashiCorp Providers" and
|
||||
"Community Providers".
|
||||
To record a bug report, enhancement proposal, or give any other product feedback, please [open a GitHub issue](https://github.com/hashicorp/terraform/issues/new/choose) using the most appropriate issue template. Please do fill in all of the information the issue templates request, because we've seen from experience that this will maximize the chance that we'll be able to act on your feedback.
|
||||
|
||||
HashiCorp providers are providers that we'll dedicate full time resources to
|
||||
improving, supporting the latest features, and fixing bugs. These are providers
|
||||
we understand deeply and are confident we have the resources to manage
|
||||
ourselves.
|
||||
---
|
||||
|
||||
Community providers are providers where we depend on the community to
|
||||
contribute fixes and enhancements to improve. HashiCorp will run automated
|
||||
tests and ensure these providers continue to work, but will not dedicate full
|
||||
time resources to add new features to these providers. These providers are
|
||||
available in official Terraform releases, but the functionality is primarily
|
||||
contributed.
|
||||
<!-- MarkdownTOC autolink="true" -->
|
||||
|
||||
The current list of HashiCorp Providers is as follows:
|
||||
- [Contributing Fixes](#contributing-fixes)
|
||||
- [Proposing a Change](#proposing-a-change)
|
||||
- [Caveats & areas of special concern](#caveats--areas-of-special-concern)
|
||||
- [State Storage Backends](#state-storage-backends)
|
||||
- [Provisioners](#provisioners)
|
||||
- [Maintainers](#maintainers)
|
||||
- [Pull Request Lifecycle](#pull-request-lifecycle)
|
||||
- [Getting Your Pull Requests Merged Faster](#getting-your-pull-requests-merged-faster)
|
||||
- [PR Checks](#pr-checks)
|
||||
- [Terraform CLI/Core Development Environment](#terraform-clicore-development-environment)
|
||||
- [Acceptance Tests: Testing interactions with external services](#acceptance-tests-testing-interactions-with-external-services)
|
||||
- [Generated Code](#generated-code)
|
||||
- [External Dependencies](#external-dependencies)
|
||||
|
||||
* `aws`
|
||||
* `azurerm`
|
||||
* `google`
|
||||
* `opc`
|
||||
<!-- /MarkdownTOC -->
|
||||
|
||||
Our testing standards are the same for both HashiCorp and Community providers,
|
||||
and HashiCorp runs full acceptance test suites for every provider nightly to
|
||||
ensure Terraform remains stable.
|
||||
## Contributing Fixes
|
||||
|
||||
We make the distinction between these two types of providers to help
|
||||
highlight the vast amounts of community effort that goes in to making Terraform
|
||||
great, and to help contributors better understand the role HashiCorp employees
|
||||
play in the various areas of the code base.
|
||||
It can be tempting to want to dive into an open source project and help _build the thing_ you believe you're missing. It's a wonderful and helpful intention. However, Terraform is a complex tool. Many seemingly simple changes can have serious effects on other areas of the code and it can take some time to become familiar with the effects of even basic changes. The Terraform team is not immune to unintended and sometimes undesirable changes. We do take our work seriously, and appreciate the globally diverse community that relies on Terraform for workflows of all sizes and criticality.
|
||||
|
||||
## Issues
|
||||
As a result of Terraform's complexity and high bar for stability, the most straightforward way to start helping with the Terraform project is to pick an existing bug and [get to work](#terraform-clicore-development-environment).
|
||||
|
||||
### Issue Reporting Checklists
|
||||
For new contributors we've labeled a few issues with `Good First Issue` as a nod to issues which will help get you familiar with Terraform development, while also providing an onramp to the codebase itself.
|
||||
|
||||
We welcome issues of all kinds including feature requests, bug reports, and
|
||||
general questions. Below you'll find checklists with guidelines for well-formed
|
||||
issues of each type.
|
||||
Read the documentation, and don't be afraid to [ask questions](https://discuss.hashicorp.com/c/terraform-core/27).
|
||||
|
||||
#### Bug Reports
|
||||
## Proposing a Change
|
||||
|
||||
- [ ] __Test against latest release__: Make sure you test against the latest
|
||||
released version. It is possible we already fixed the bug you're experiencing.
|
||||
In order to be respectful of the time of community contributors, we aim to discuss potential changes in GitHub issues prior to implementation. That will allow us to give design feedback up front and set expectations about the scope of the change, and, for larger changes, how best to approach the work such that the Terraform team can review it and merge it along with other concurrent work.
|
||||
|
||||
- [ ] __Search for possible duplicate reports__: It's helpful to keep bug
|
||||
reports consolidated to one thread, so do a quick search on existing bug
|
||||
reports to check if anybody else has reported the same thing. You can scope
|
||||
searches by the label "bug" to help narrow things down.
|
||||
If the bug you wish to fix or enhancement you wish to implement isn't already covered by a GitHub issue that contains feedback from the Terraform team, please do start a discussion (either in [a new GitHub issue](https://github.com/hashicorp/terraform/issues/new/choose) or an existing one, as appropriate) before you invest significant development time. If you mention your intent to implement the change described in your issue, the Terraform team can, as best as possible, prioritize including implementation-related feedback in the subsequent discussion.
|
||||
|
||||
- [ ] __Include steps to reproduce__: Provide steps to reproduce the issue,
|
||||
along with your `.tf` files, with secrets removed, so we can try to
|
||||
reproduce it. Without this, it makes it much harder to fix the issue.
|
||||
At this time, we do not have a formal process for reviewing outside proposals that significantly change Terraform's workflow, its primary usage patterns, and its language. Additionally, some seemingly simple proposals can have deep effects across Terraform, which is why we strongly suggest starting with an issue-based proposal.
|
||||
|
||||
- [ ] __For panics, include `crash.log`__: If you experienced a panic, please
|
||||
create a [gist](https://gist.github.com) of the *entire* generated crash log
|
||||
for us to look at. Double check no sensitive items were in the log.
|
||||
For large proposals that could entail a significant design phase, we wish to be up front with potential contributors that, unfortunately, we are unlikely to be able to give prompt feedback. We are still interested to hear about your use-cases so that we can consider ways to meet them as part of other larger projects.
|
||||
|
||||
#### Feature Requests
|
||||
Most changes will involve updates to the test suite, and changes to Terraform's documentation. The Terraform team can advise on different testing strategies for specific scenarios, and may ask you to revise the specific phrasing of your proposed documentation prose to match better with the standard "voice" of Terraform's documentation.
|
||||
|
||||
- [ ] __Search for possible duplicate requests__: It's helpful to keep requests
|
||||
consolidated to one thread, so do a quick search on existing requests to
|
||||
check if anybody else has reported the same thing. You can scope searches by
|
||||
the label "enhancement" to help narrow things down.
|
||||
This repository is primarily maintained by a small team at HashiCorp along with their other responsibilities, so unfortunately we cannot always respond promptly to pull requests, particularly if they do not relate to an existing GitHub issue where the Terraform team has already participated and indicated willingness to work on the issue or accept PRs for the proposal. We *are* grateful for all contributions however, and will give feedback on pull requests as soon as we're able.
|
||||
|
||||
- [ ] __Include a use case description__: In addition to describing the
|
||||
behavior of the feature you'd like to see added, it's helpful to also lay
|
||||
out the reason why the feature would be important and how it would benefit
|
||||
Terraform users.
|
||||
### Caveats & areas of special concern
|
||||
|
||||
#### Questions
|
||||
There are some areas of Terraform which are of special concern to the Terraform team.
|
||||
|
||||
- [ ] __Search for answers in Terraform documentation__: We're happy to answer
|
||||
questions in GitHub Issues, but it helps reduce issue churn and maintainer
|
||||
workload if you work to find answers to common questions in the
|
||||
documentation. Often times Question issues result in documentation updates
|
||||
to help future users, so if you don't find an answer, you can give us
|
||||
pointers for where you'd expect to see it in the docs.
|
||||
#### State Storage Backends
|
||||
|
||||
### Issue Lifecycle
|
||||
The Terraform team is not merging PRs for new state storage backends at the current time. Our priority regarding state storage backends is to find maintainers for existing backends and remove those backends without maintainers.
|
||||
|
||||
1. The issue is reported.
|
||||
Please see the [CODEOWNERS](https://github.com/hashicorp/terraform/blob/master/CODEOWNERS) file for the status of a given backend. Community members with an interest in a particular standard backend are welcome to help maintain it.
|
||||
|
||||
2. The issue is verified and categorized by a Terraform collaborator.
|
||||
Categorization is done via GitHub labels. We generally use a two-label
|
||||
system of (1) issue/PR type, and (2) section of the codebase. Type is
|
||||
usually "bug", "enhancement", "documentation", or "question", and section
|
||||
can be any of the providers or provisioners or "core".
|
||||
Currently, merging state storage backends places a significant burden on the Terraform team. The team must setup an environment and cloud service provider account, or a new database/storage/key-value service, in order to build and test remote state storage backends. The time and complexity of doing so prevents us from moving Terraform forward in other ways.
|
||||
|
||||
3. Unless it is critical, the issue is left for a period of time (sometimes
|
||||
many weeks), giving outside contributors a chance to address the issue.
|
||||
We are working to remove ourselves from the critical path of state storage backends by moving them towards a plugin model. In the meantime, we won't be accepting new remote state backends into Terraform.
|
||||
|
||||
4. The issue is addressed in a pull request or commit. The issue will be
|
||||
referenced in the commit message so that the code that fixes it is clearly
|
||||
linked.
|
||||
#### Provisioners
|
||||
|
||||
5. The issue is closed. Sometimes, valid issues will be closed to keep
|
||||
the issue tracker clean. The issue is still indexed and available for
|
||||
future viewers, or can be re-opened if necessary.
|
||||
Provisioners are an area of concern in Terraform for a number of reasons. Chiefly, they are often used in the place of configuration management tools or custom providers.
|
||||
|
||||
## Pull Requests
|
||||
From our [documentation](https://www.terraform.io/docs/provisioners/index.html):
|
||||
|
||||
Thank you for contributing! Here you'll find information on what to include in
|
||||
your Pull Request to ensure it is accepted quickly.
|
||||
> ... they [...] add a considerable amount of complexity and uncertainty to Terraform usage.[...] we still recommend attempting to solve it [your problem] using other techniques first, and use provisioners only if there is no other option.
|
||||
|
||||
* For pull requests that follow the guidelines, we expect to be able to review
|
||||
and merge very quickly.
|
||||
* Pull requests that don't follow the guidelines will be annotated with what
|
||||
they're missing. A community or core team member may be able to swing around
|
||||
and help finish up the work, but these PRs will generally hang out much
|
||||
longer until they can be completed and merged.
|
||||
The Terraform team is in the process of building a way forward which continues to decrease reliance on provisioners. In the mean time however, as our documentation indicates, they are a tool of last resort. As such expect that PRs and issues for provisioners are not high in priority.
|
||||
|
||||
Please see the [CODEOWNERS](https://github.com/hashicorp/terraform/blob/master/CODEOWNERS) file for the status of a given provisioner. Community members with an interest in a particular provisioner are welcome to help maintain it.
|
||||
|
||||
#### Maintainers
|
||||
|
||||
Maintainers are key contributors to our Open Source project. They contribute their time and expertise and we ask that the community take extra special care to be mindful of this when interacting with them.
|
||||
|
||||
For code that has a listed maintainer or maintainers in our [CODEOWNERS](https://github.com/hashicorp/terraform/blob/master/CODEOWNERS) file, the Terraform team will highlight them for participation in PRs which relate to the area of code they maintain. The expectation is that a maintainer will review the code and work with the PR contributor before the code is merged by the Terraform team.
|
||||
|
||||
There is no expectation on response time for our maintainers; they may be indisposed for prolonged periods of time. Please be patient. Discussions on when code becomes "unmaintained" will be on a case-by-case basis.
|
||||
|
||||
If an an unmaintained area of code interests you and you'd like to become a maintainer, you may simply make a PR against our [CODEOWNERS](https://github.com/hashicorp/terraform/blob/master/CODEOWNERS) file with your github handle attached to the approriate area. If there is a maintainer or team of maintainers for that area, please coordinate with them as necessary.
|
||||
|
||||
### Pull Request Lifecycle
|
||||
|
||||
1. You are welcome to submit your pull request for commentary or review before
|
||||
it is fully completed. Please prefix the title of your pull request with
|
||||
"[WIP]" to indicate this. It's also a good idea to include specific
|
||||
questions or items you'd like feedback on.
|
||||
1. You are welcome to submit a [draft pull request](https://github.blog/2019-02-14-introducing-draft-pull-requests/) for commentary or review before it is fully completed. It's also a good idea to include specific questions or items you'd like feedback on.
|
||||
2. Once you believe your pull request is ready to be merged you can create your pull request.
|
||||
3. When time permits Terraform's core team members will look over your contribution and either merge, or provide comments letting you know if there is anything left to do. It may take some time for us to respond. We may also have questions that we need answered about the code, either because something doesn't make sense to us or because we want to understand your thought process. We kindly ask that you do not target specific team members.
|
||||
4. If we have requested changes, you can either make those changes or, if you disagree with the suggested changes, we can have a conversation about our reasoning and agree on a path forward. This may be a multi-step process. Our view is that pull requests are a chance to collaborate, and we welcome conversations about how to do things better. It is the contributor's responsibility to address any changes requested. While reviewers are happy to give guidance, it is unsustainable for us to perform the coding work necessary to get a PR into a mergeable state.
|
||||
5. Once all outstanding comments and checklist items have been addressed, your contribution will be merged! Merged PRs may or may not be included in the next release based on changes the Terraform teams deems as breaking or not. The core team takes care of updating the [CHANGELOG.md](https://github.com/hashicorp/terraform/blob/master/CHANGELOG.md) as they merge.
|
||||
6. In some cases, we might decide that a PR should be closed without merging. We'll make sure to provide clear reasoning when this happens. Following the recommended process above is one of the ways to ensure you don't spend time on a PR we can't or won't merge.
|
||||
|
||||
2. Once you believe your pull request is ready to be merged, you can remove any
|
||||
"[WIP]" prefix from the title and a core team member will review. Follow
|
||||
[the checklists below](#checklists-for-contribution) to help ensure that
|
||||
your contribution will be merged quickly.
|
||||
#### Getting Your Pull Requests Merged Faster
|
||||
|
||||
3. One of Terraform's core team members will look over your contribution and
|
||||
either provide comments letting you know if there is anything left to do. We
|
||||
do our best to provide feedback in a timely manner, but it may take some
|
||||
time for us to respond.
|
||||
It is much easier to review pull requests that are:
|
||||
|
||||
4. Once all outstanding comments and checklist items have been addressed, your
|
||||
contribution will be merged! Merged PRs will be included in the next
|
||||
Terraform release. The core team takes care of updating the CHANGELOG as
|
||||
they merge.
|
||||
1. Well-documented: Try to explain in the pull request comments what your change does, why you have made the change, and provide instructions for how to produce the new behavior introduced in the pull request. If you can, provide screen captures or terminal output to show what the changes look like. This helps the reviewers understand and test the change.
|
||||
2. Small: Try to only make one change per pull request. If you found two bugs and want to fix them both, that's *awesome*, but it's still best to submit the fixes as separate pull requests. This makes it much easier for reviewers to keep in their heads all of the implications of individual code changes, and that means the PR takes less effort and energy to merge. In general, the smaller the pull request, the sooner reviewers will be able to make time to review it.
|
||||
3. Passing Tests: Based on how much time we have, we may not review pull requests which aren't passing our tests (look below for advice on how to run unit tests). If you need help figuring out why tests are failing, please feel free to ask, but while we're happy to give guidance it is generally your responsibility to make sure that tests are passing. If your pull request changes an interface or invalidates an assumption that causes a bunch of tests to fail, then you need to fix those tests before we can merge your PR.
|
||||
|
||||
5. In rare cases, we might decide that a PR should be closed. We'll make sure
|
||||
to provide clear reasoning when this happens.
|
||||
If we request changes, try to make those changes in a timely manner. Otherwise, PRs can go stale and be a lot more work for all of us to merge in the future.
|
||||
|
||||
### Checklists for Contribution
|
||||
Even with everyone making their best effort to be responsive, it can be time-consuming to get a PR merged. It can be frustrating to deal with the back-and-forth as we make sure that we understand the changes fully. Please bear with us, and please know that we appreciate the time and energy you put into the project.
|
||||
|
||||
There are several different kinds of contribution, each of which has its own
|
||||
standards for a speedy review. The following sections describe guidelines for
|
||||
each type of contribution.
|
||||
### PR Checks
|
||||
|
||||
#### Documentation Update
|
||||
The following checks run when a PR is opened:
|
||||
|
||||
Because [Terraform's website][website] is in the same repo as the code, it's
|
||||
easy for anybody to help us improve our docs.
|
||||
- Contributor License Agreement (CLA): If this is your first contribution to Terraform you will be asked to sign the CLA.
|
||||
- Tests: tests include unit tests and acceptance tests, and all tests must pass before a PR can be merged.
|
||||
- Test Coverage Report: We use [codecov](https://codecov.io/) to check both overall test coverage, and patch coverage.
|
||||
|
||||
- [ ] __Reasoning for docs update__: Including a quick explanation for why the
|
||||
update needed is helpful for reviewers.
|
||||
- [ ] __Relevant Terraform version__: Is this update worth deploying to the
|
||||
site immediately, or is it referencing an upcoming version of Terraform and
|
||||
should get pushed out with the next release?
|
||||
-> **Note:** We are still deciding on the right targets for our code coverage check. A failure in `codecov` does not necessarily mean that your PR will not be approved or merged.
|
||||
|
||||
#### Enhancement/Bugfix to a Resource
|
||||
----
|
||||
|
||||
Working on existing resources is a great way to get started as a Terraform
|
||||
contributor because you can work within existing code and tests to get a feel
|
||||
for what to do.
|
||||
## Terraform CLI/Core Development Environment
|
||||
|
||||
- [ ] __Acceptance test coverage of new behavior__: Existing resources each
|
||||
have a set of [acceptance tests][acctests] covering their functionality.
|
||||
These tests should exercise all the behavior of the resource. Whether you are
|
||||
adding something or fixing a bug, the idea is to have an acceptance test that
|
||||
fails if your code were to be removed. Sometimes it is sufficient to
|
||||
"enhance" an existing test by adding an assertion or tweaking the config
|
||||
that is used, but often a new test is better to add. You can copy/paste an
|
||||
existing test and follow the conventions you see there, modifying the test
|
||||
to exercise the behavior of your code.
|
||||
- [ ] __Documentation updates__: If your code makes any changes that need to
|
||||
be documented, you should include those doc updates in the same PR. The
|
||||
[Terraform website][website] source is in this repo and includes
|
||||
instructions for getting a local copy of the site up and running if you'd
|
||||
like to preview your changes.
|
||||
- [ ] __Well-formed Code__: Do your best to follow existing conventions you
|
||||
see in the codebase, and ensure your code is formatted with `go fmt`. (The
|
||||
Travis CI build will fail if `go fmt` has not been run on incoming code.)
|
||||
The PR reviewers can help out on this front, and may provide comments with
|
||||
suggestions on how to improve the code.
|
||||
This repository contains the source code for Terraform CLI, which is the main component of Terraform that contains the core Terraform engine.
|
||||
|
||||
#### New Resource
|
||||
The HashiCorp-maintained Terraform providers are also open source but are not in this repository; instead, they are each in their own repository in [the `terraform-providers` organization](https://github.com/terraform-providers) on GitHub.
|
||||
|
||||
Implementing a new resource is a good way to learn more about how Terraform
|
||||
interacts with upstream APIs. There are plenty of examples to draw from in the
|
||||
existing resources, but you still get to implement something completely new.
|
||||
This repository also does not include the source code for some other parts of the Terraform product including Terraform Cloud, Terraform Enterprise, and the Terraform Registry. Those components are not open source, though if you have feedback about them (including bug reports) please do feel free to [open a GitHub issue on this repository](https://github.com/hashicorp/terraform/issues/new/choose).
|
||||
|
||||
- [ ] __Minimal LOC__: It can be inefficient for both the reviewer
|
||||
and author to go through long feedback cycles on a big PR with many
|
||||
resources. We therefore encourage you to only submit **1 resource at a time**.
|
||||
- [ ] __Acceptance tests__: New resources should include acceptance tests
|
||||
covering their behavior. See [Writing Acceptance
|
||||
Tests](#writing-acceptance-tests) below for a detailed guide on how to
|
||||
approach these.
|
||||
- [ ] __Documentation__: Each resource gets a page in the Terraform
|
||||
documentation. The [Terraform website][website] source is in this
|
||||
repo and includes instructions for getting a local copy of the site up and
|
||||
running if you'd like to preview your changes. For a resource, you'll want
|
||||
to add a new file in the appropriate place and add a link to the sidebar for
|
||||
that page.
|
||||
- [ ] __Well-formed Code__: Do your best to follow existing conventions you
|
||||
see in the codebase, and ensure your code is formatted with `go fmt`. (The
|
||||
Travis CI build will fail if `go fmt` has not been run on incoming code.)
|
||||
The PR reviewers can help out on this front, and may provide comments with
|
||||
suggestions on how to improve the code.
|
||||
---
|
||||
|
||||
#### New Provider
|
||||
If you wish to work on the Terraform CLI source code, you'll first need to install the [Go](https://golang.org/) compiler and the version control system [Git](https://git-scm.com/).
|
||||
|
||||
Implementing a new provider gives Terraform the ability to manage resources in
|
||||
a whole new API. It's a larger undertaking, but brings major new functionality
|
||||
into Terraform.
|
||||
At this time the Terraform development environment is targeting only Linux and Mac OS X systems. While Terraform itself is compatible with Windows, unfortunately the unit test suite currently contains Unix-specific assumptions around maximum path lengths, path separators, etc.
|
||||
|
||||
- [ ] __Minimal initial LOC__: Some providers may be big and it can be
|
||||
inefficient for both reviewer & author to go through long feedback cycles
|
||||
on a big PR with many resources. We encourage you to only submit
|
||||
the necessary minimum in a single PR, ideally **just the first resource**
|
||||
of the provider.
|
||||
- [ ] __Acceptance tests__: Each provider should include an acceptance test
|
||||
suite with tests for each resource should include acceptance tests covering
|
||||
its behavior. See [Writing Acceptance Tests](#writing-acceptance-tests) below
|
||||
for a detailed guide on how to approach these.
|
||||
- [ ] __Documentation__: Each provider has a section in the Terraform
|
||||
documentation. The [Terraform website][website] source is in this repo and
|
||||
includes instructions for getting a local copy of the site up and running if
|
||||
you'd like to preview your changes. For a provider, you'll want to add new
|
||||
index file and individual pages for each resource.
|
||||
- [ ] __Well-formed Code__: Do your best to follow existing conventions you
|
||||
see in the codebase, and ensure your code is formatted with `go fmt`. (The
|
||||
Travis CI build will fail if `go fmt` has not been run on incoming code.)
|
||||
The PR reviewers can help out on this front, and may provide comments with
|
||||
suggestions on how to improve the code.
|
||||
Refer to the file [`.go-version`](https://github.com/hashicorp/terraform/blob/master/.go-version) to see which version of Go Terraform is currently built with. Other versions will often work, but if you run into any build or testing problems please try with the specific Go version indicated. You can optionally simplify the installation of multiple specific versions of Go on your system by installing [`goenv`](https://github.com/syndbg/goenv), which reads `.go-version` and automatically selects the correct Go version.
|
||||
|
||||
#### Core Bugfix/Enhancement
|
||||
Use Git to clone this repository into a location of your choice. Terraform is using [Go Modules](https://blog.golang.org/using-go-modules), and so you should *not* clone it inside your `GOPATH`.
|
||||
|
||||
We are always happy when any developer is interested in diving into Terraform's
|
||||
core to help out! Here's what we look for in smaller Core PRs.
|
||||
Switch into the root directory of the cloned repository and build Terraform using the Go toolchain in the standard way:
|
||||
|
||||
- [ ] __Unit tests__: Terraform's core is covered by hundreds of unit tests at
|
||||
several different layers of abstraction. Generally the best place to start
|
||||
is with a "Context Test". These are higher level test that interact
|
||||
end-to-end with most of Terraform's core. They are divided into test files
|
||||
for each major action (plan, apply, etc.). Getting a failing test is a great
|
||||
way to prove out a bug report or a new enhancement. With a context test in
|
||||
place, you can work on implementation and lower level unit tests. Lower
|
||||
level tests are largely context dependent, but the Context Tests are almost
|
||||
always part of core work.
|
||||
- [ ] __Documentation updates__: If the core change involves anything that
|
||||
needs to be reflected in our documentation, you can make those changes in
|
||||
the same PR. The [Terraform website][website] source is in this repo and
|
||||
includes instructions for getting a local copy of the site up and running if
|
||||
you'd like to preview your changes.
|
||||
- [ ] __Well-formed Code__: Do your best to follow existing conventions you
|
||||
see in the codebase, and ensure your code is formatted with `go fmt`. (The
|
||||
Travis CI build will fail if `go fmt` has not been run on incoming code.)
|
||||
The PR reviewers can help out on this front, and may provide comments with
|
||||
suggestions on how to improve the code.
|
||||
|
||||
#### Core Feature
|
||||
|
||||
If you're interested in taking on a larger core feature, it's a good idea to
|
||||
get feedback early and often on the effort.
|
||||
|
||||
- [ ] __Early validation of idea and implementation plan__: Terraform's core
|
||||
is complicated enough that there are often several ways to implement
|
||||
something, each of which has different implications and tradeoffs. Working
|
||||
through a plan of attack with the team before you dive into implementation
|
||||
will help ensure that you're working in the right direction.
|
||||
- [ ] __Unit tests__: Terraform's core is covered by hundreds of unit tests at
|
||||
several different layers of abstraction. Generally the best place to start
|
||||
is with a "Context Test". These are higher level test that interact
|
||||
end-to-end with most of Terraform's core. They are divided into test files
|
||||
for each major action (plan, apply, etc.). Getting a failing test is a great
|
||||
way to prove out a bug report or a new enhancement. With a context test in
|
||||
place, you can work on implementation and lower level unit tests. Lower
|
||||
level tests are largely context dependent, but the Context Tests are almost
|
||||
always part of core work.
|
||||
- [ ] __Documentation updates__: If the core change involves anything that
|
||||
needs to be reflected in our documentation, you can make those changes in
|
||||
the same PR. The [Terraform website][website] source is in this repo and
|
||||
includes instructions for getting a local copy of the site up and running if
|
||||
you'd like to preview your changes.
|
||||
- [ ] __Well-formed Code__: Do your best to follow existing conventions you
|
||||
see in the codebase, and ensure your code is formatted with `go fmt`. (The
|
||||
Travis CI build will fail if `go fmt` has not been run on incoming code.)
|
||||
The PR reviewers can help out on this front, and may provide comments with
|
||||
suggestions on how to improve the code.
|
||||
|
||||
### Writing Acceptance Tests
|
||||
|
||||
Terraform includes an acceptance test harness that does most of the repetitive
|
||||
work involved in testing a resource.
|
||||
|
||||
#### Acceptance Tests Often Cost Money to Run
|
||||
|
||||
Because acceptance tests create real resources, they often cost money to run.
|
||||
Because the resources only exist for a short period of time, the total amount
|
||||
of money required is usually a relatively small. Nevertheless, we don't want
|
||||
financial limitations to be a barrier to contribution, so if you are unable to
|
||||
pay to run acceptance tests for your contribution, simply mention this in your
|
||||
pull request. We will happily accept "best effort" implementations of
|
||||
acceptance tests and run them for you on our side. This might mean that your PR
|
||||
takes a bit longer to merge, but it most definitely is not a blocker for
|
||||
contributions.
|
||||
|
||||
#### Running an Acceptance Test
|
||||
|
||||
Acceptance tests can be run using the `testacc` target in the Terraform
|
||||
`Makefile`. The individual tests to run can be controlled using a regular
|
||||
expression. Prior to running the tests provider configuration details such as
|
||||
access keys must be made available as environment variables.
|
||||
|
||||
For example, to run an acceptance test against the Azure Resource Manager
|
||||
provider, the following environment variables must be set:
|
||||
|
||||
```sh
|
||||
export ARM_SUBSCRIPTION_ID=...
|
||||
export ARM_CLIENT_ID=...
|
||||
export ARM_CLIENT_SECRET=...
|
||||
export ARM_TENANT_ID=...
|
||||
```
|
||||
cd terraform
|
||||
go install .
|
||||
```
|
||||
|
||||
Tests can then be run by specifying the target provider and a regular
|
||||
expression defining the tests to run:
|
||||
The first time you run the `go install` command, the Go toolchain will download any library dependencies that you don't already have in your Go modules cache. Subsequent builds will be faster because these dependencies will already be available on your local disk.
|
||||
|
||||
```sh
|
||||
$ make testacc TEST=./builtin/providers/azurerm TESTARGS='-run=TestAccAzureRMPublicIpStatic_update'
|
||||
==> Checking that code complies with gofmt requirements...
|
||||
Once the compilation process succeeds, you can find a `terraform` executable in the Go executable directory. If you haven't overridden it with the `GOBIN` environment variable, the executable directory is the `bin` directory inside the directory returned by the following command:
|
||||
|
||||
```
|
||||
go env GOPATH
|
||||
```
|
||||
|
||||
If you are planning to make changes to the Terraform source code, you should run the unit test suite before you start to make sure everything is initially passing:
|
||||
|
||||
```
|
||||
go test ./...
|
||||
```
|
||||
|
||||
As you make your changes, you can re-run the above command to ensure that the tests are *still* passing. If you are working only on a specific Go package, you can speed up your testing cycle by testing only that single package, or packages under a particular package prefix:
|
||||
|
||||
```
|
||||
go test ./command/...
|
||||
go test ./addrs
|
||||
```
|
||||
|
||||
## Acceptance Tests: Testing interactions with external services
|
||||
|
||||
Terraform's unit test suite is self-contained, using mocks and local files to help ensure that it can run offline and is unlikely to be broken by changes to outside systems.
|
||||
|
||||
However, several Terraform components interact with external services, such as the automatic provider installation mechanism, the Terraform Registry, Terraform Cloud, etc.
|
||||
|
||||
There are some optional tests in the Terraform CLI codebase that *do* interact with external services, which we collectively refer to as "acceptance tests". You can enable these by setting the environment variable `TF_ACC=1` when running the tests. We recommend focusing only on the specific package you are working on when enabling acceptance tests, both because it can help the test run to complete faster and because you are less likely to encounter failures due to drift in systems unrelated to your current goal:
|
||||
|
||||
```
|
||||
TF_ACC=1 go test ./internal/initwd
|
||||
```
|
||||
|
||||
Because the acceptance tests depend on services outside of the Terraform codebase, and because the acceptance tests are usually used only when making changes to the systems they cover, it is common and expected that drift in those external systems will cause test failures. Because of this, prior to working on a system covered by acceptance tests it's important to run the existing tests for that system in an *unchanged* work tree first and respond to any test failures that preexist, to avoid misinterpreting such failures as bugs in your new changes.
|
||||
|
||||
## Generated Code
|
||||
|
||||
Some files in the Terraform CLI codebase are generated. In most cases, we update these using `go generate`, which is the standard way to encapsulate code generation steps in a Go codebase.
|
||||
|
||||
```
|
||||
go generate ./...
|
||||
TF_ACC=1 go test ./builtin/providers/azurerm -v -run=TestAccAzureRMPublicIpStatic_update -timeout 120m
|
||||
=== RUN TestAccAzureRMPublicIpStatic_update
|
||||
--- PASS: TestAccAzureRMPublicIpStatic_update (177.48s)
|
||||
PASS
|
||||
ok github.com/hashicorp/terraform/builtin/providers/azurerm 177.504s
|
||||
```
|
||||
|
||||
Entire resource test suites can be targeted by using the naming convention to
|
||||
write the regular expression. For example, to run all tests of the
|
||||
`azurerm_public_ip` resource rather than just the update test, you can start
|
||||
testing like this:
|
||||
Use `git diff` afterwards to inspect the changes and ensure that they are what you expected.
|
||||
|
||||
```sh
|
||||
$ make testacc TEST=./builtin/providers/azurerm TESTARGS='-run=TestAccAzureRMPublicIpStatic'
|
||||
==> Checking that code complies with gofmt requirements...
|
||||
go generate ./...
|
||||
TF_ACC=1 go test ./builtin/providers/azurerm -v -run=TestAccAzureRMPublicIpStatic -timeout 120m
|
||||
=== RUN TestAccAzureRMPublicIpStatic_basic
|
||||
--- PASS: TestAccAzureRMPublicIpStatic_basic (137.74s)
|
||||
=== RUN TestAccAzureRMPublicIpStatic_update
|
||||
--- PASS: TestAccAzureRMPublicIpStatic_update (180.63s)
|
||||
PASS
|
||||
ok github.com/hashicorp/terraform/builtin/providers/azurerm 318.392s
|
||||
Terraform includes generated Go stub code for the Terraform provider plugin protocol, which is defined using Protocol Buffers. Because the Protocol Buffers tools are not written in Go and thus cannot be automatically installed using `go get`, we follow a different process for generating these, which requires that you've already installed a suitable version of `protoc`:
|
||||
|
||||
```
|
||||
make protobuf
|
||||
```
|
||||
|
||||
#### Writing an Acceptance Test
|
||||
## External Dependencies
|
||||
|
||||
Terraform has a framework for writing acceptance tests which minimises the
|
||||
amount of boilerplate code necessary to use common testing patterns. The entry
|
||||
point to the framework is the `resource.Test()` function.
|
||||
Terraform uses Go Modules for dependency management.
|
||||
|
||||
Tests are divided into `TestStep`s. Each `TestStep` proceeds by applying some
|
||||
Terraform configuration using the provider under test, and then verifying that
|
||||
results are as expected by making assertions using the provider API. It is
|
||||
common for a single test function to exercise both the creation of and updates
|
||||
to a single resource. Most tests follow a similar structure.
|
||||
Our dependency licensing policy for Terraform excludes proprietary licenses and "copyleft"-style licenses. We accept the common Mozilla Public License v2, MIT License, and BSD licenses. We will consider other open source licenses in similar spirit to those three, but if you plan to include such a dependency in a contribution we'd recommend opening a GitHub issue first to discuss what you intend to implement and what dependencies it will require so that the Terraform team can review the relevant licenses to for whether they meet our licensing needs.
|
||||
|
||||
1. Pre-flight checks are made to ensure that sufficient provider configuration
|
||||
is available to be able to proceed - for example in an acceptance test
|
||||
targeting AWS, `AWS_ACCESS_KEY_ID` and `AWS_SECRET_ACCESS_KEY` must be set prior
|
||||
to running acceptance tests. This is common to all tests exercising a single
|
||||
provider.
|
||||
If you need to add a new dependency to Terraform or update the selected version for an existing one, use `go get` from the root of the Terraform repository as follows:
|
||||
|
||||
Each `TestStep` is defined in the call to `resource.Test()`. Most assertion
|
||||
functions are defined out of band with the tests. This keeps the tests
|
||||
readable, and allows reuse of assertion functions across different tests of the
|
||||
same type of resource. The definition of a complete test looks like this:
|
||||
|
||||
```go
|
||||
func TestAccAzureRMPublicIpStatic_update(t *testing.T) {
|
||||
resource.Test(t, resource.TestCase{
|
||||
PreCheck: func() { testAccPreCheck(t) },
|
||||
Providers: testAccProviders,
|
||||
CheckDestroy: testCheckAzureRMPublicIpDestroy,
|
||||
Steps: []resource.TestStep{
|
||||
resource.TestStep{
|
||||
Config: testAccAzureRMVPublicIpStatic_basic,
|
||||
Check: resource.ComposeTestCheckFunc(
|
||||
testCheckAzureRMPublicIpExists("azurerm_public_ip.test"),
|
||||
),
|
||||
},
|
||||
},
|
||||
})
|
||||
}
|
||||
```
|
||||
go get github.com/hashicorp/hcl/v2@2.0.0
|
||||
```
|
||||
|
||||
When executing the test, the following steps are taken for each `TestStep`:
|
||||
This command will download the requested version (2.0.0 in the above example) and record that version selection in the `go.mod` file. It will also record checksums for the module in the `go.sum`.
|
||||
|
||||
1. The Terraform configuration required for the test is applied. This is
|
||||
responsible for configuring the resource under test, and any dependencies it
|
||||
may have. For example, to test the `azurerm_public_ip` resource, an
|
||||
`azurerm_resource_group` is required. This results in configuration which
|
||||
looks like this:
|
||||
To complete the dependency change, clean up any redundancy in the module metadata files by running:
|
||||
|
||||
```hcl
|
||||
resource "azurerm_resource_group" "test" {
|
||||
name = "acceptanceTestResourceGroup1"
|
||||
location = "West US"
|
||||
}
|
||||
```
|
||||
go mod tidy
|
||||
```
|
||||
|
||||
resource "azurerm_public_ip" "test" {
|
||||
name = "acceptanceTestPublicIp1"
|
||||
location = "West US"
|
||||
resource_group_name = "${azurerm_resource_group.test.name}"
|
||||
public_ip_address_allocation = "static"
|
||||
}
|
||||
```
|
||||
To ensure that the upgrade has worked correctly, be sure to run the unit test suite at least once:
|
||||
|
||||
1. Assertions are run using the provider API. These use the provider API
|
||||
directly rather than asserting against the resource state. For example, to
|
||||
verify that the `azurerm_public_ip` described above was created
|
||||
successfully, a test function like this is used:
|
||||
```
|
||||
go test ./...
|
||||
```
|
||||
|
||||
```go
|
||||
func testCheckAzureRMPublicIpExists(name string) resource.TestCheckFunc {
|
||||
return func(s *terraform.State) error {
|
||||
// Ensure we have enough information in state to look up in API
|
||||
rs, ok := s.RootModule().Resources[name]
|
||||
if !ok {
|
||||
return fmt.Errorf("Not found: %s", name)
|
||||
}
|
||||
Because dependency changes affect a shared, top-level file, they are more likely than some other change types to become conflicted with other proposed changes during the code review process. For that reason, and to make dependency changes more visible in the change history, we prefer to record dependency changes as separate commits that include only the results of the above commands and the minimal set of changes to Terraform's own code for compatibility with the new version:
|
||||
|
||||
publicIPName := rs.Primary.Attributes["name"]
|
||||
resourceGroup, hasResourceGroup := rs.Primary.Attributes["resource_group_name"]
|
||||
if !hasResourceGroup {
|
||||
return fmt.Errorf("Bad: no resource group found in state for public ip: %s", availSetName)
|
||||
}
|
||||
```
|
||||
git add go.mod go.sum
|
||||
git commit -m "go get github.com/hashicorp/hcl/v2@2.0.0"
|
||||
```
|
||||
|
||||
conn := testAccProvider.Meta().(*ArmClient).publicIPClient
|
||||
|
||||
resp, err := conn.Get(resourceGroup, publicIPName, "")
|
||||
if err != nil {
|
||||
return fmt.Errorf("Bad: Get on publicIPClient: %s", err)
|
||||
}
|
||||
|
||||
if resp.StatusCode == http.StatusNotFound {
|
||||
return fmt.Errorf("Bad: Public IP %q (resource group: %q) does not exist", name, resourceGroup)
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
}
|
||||
```
|
||||
|
||||
Notice that the only information used from the Terraform state is the ID of
|
||||
the resource - though in this case it is necessary to split the ID into
|
||||
constituent parts in order to use the provider API. For computed properties,
|
||||
we instead assert that the value saved in the Terraform state was the
|
||||
expected value if possible. The testing framework provides helper functions
|
||||
for several common types of check - for example:
|
||||
|
||||
```go
|
||||
resource.TestCheckResourceAttr("azurerm_public_ip.test", "domain_name_label", "mylabel01"),
|
||||
```
|
||||
|
||||
1. The resources created by the test are destroyed. This step happens
|
||||
automatically, and is the equivalent of calling `terraform destroy`.
|
||||
|
||||
1. Assertions are made against the provider API to verify that the resources
|
||||
have indeed been removed. If these checks fail, the test fails and reports
|
||||
"dangling resources". The code to ensure that the `azurerm_public_ip` shown
|
||||
above looks like this:
|
||||
|
||||
```go
|
||||
func testCheckAzureRMPublicIpDestroy(s *terraform.State) error {
|
||||
conn := testAccProvider.Meta().(*ArmClient).publicIPClient
|
||||
|
||||
for _, rs := range s.RootModule().Resources {
|
||||
if rs.Type != "azurerm_public_ip" {
|
||||
continue
|
||||
}
|
||||
|
||||
name := rs.Primary.Attributes["name"]
|
||||
resourceGroup := rs.Primary.Attributes["resource_group_name"]
|
||||
|
||||
resp, err := conn.Get(resourceGroup, name, "")
|
||||
|
||||
if err != nil {
|
||||
return nil
|
||||
}
|
||||
|
||||
if resp.StatusCode != http.StatusNotFound {
|
||||
return fmt.Errorf("Public IP still exists:\n%#v", resp.Properties)
|
||||
}
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
```
|
||||
|
||||
These functions usually test only for the resource directly under test: we
|
||||
skip the check that the `azurerm_resource_group` has been destroyed when
|
||||
testing `azurerm_resource_group`, under the assumption that
|
||||
`azurerm_resource_group` is tested independently in its own acceptance
|
||||
tests.
|
||||
|
||||
[website]: https://github.com/hashicorp/terraform/tree/master/website
|
||||
[acctests]: https://github.com/hashicorp/terraform#acceptance-tests
|
||||
[ml]: https://groups.google.com/group/terraform-tool
|
||||
You can then make use of the new or updated dependency in new code added in subsequent commits.
|
||||
|
|
|
@ -1,6 +1,7 @@
|
|||
---
|
||||
name: Bug report
|
||||
about: Let us know about an unexpected error, a crash, or an incorrect behavior.
|
||||
labels: bug, new
|
||||
|
||||
---
|
||||
|
||||
|
@ -9,12 +10,22 @@ Hi there,
|
|||
|
||||
Thank you for opening an issue. Please note that we try to keep the Terraform issue tracker reserved for bug reports and feature requests. For general usage questions, please see: https://www.terraform.io/community.html.
|
||||
|
||||
If your issue relates to a specific Terraform provider, please open it in the provider's own repository. The index of providers is at https://github.com/terraform-providers .
|
||||
If your issue relates to a specific Terraform provider, please open it in the provider's own repository. The index of providers is at https://github.com/terraform-providers.
|
||||
|
||||
To fix problems, we need clear reproduction cases - we need to be able to see it happen locally. A reproduction case is ideally something a Terraform Core engineer can git-clone or copy-paste and run immediately, without inventing any details or context.
|
||||
|
||||
* A short example can be directly copy-pasteable; longer examples should be in separate git repositories, especially if multiple files are needed
|
||||
* Please include all needed context. For example, if you figured out that an expression can cause a crash, put the expression in a variable definition or a resource
|
||||
* Set defaults on (or omit) any variables. The person reproducing it should not need to invent variable settings
|
||||
* If multiple steps are required, such as running terraform twice, consider scripting it in a simple shell script. For example, see [this case](https://github.com/danieldreier/terraform-issue-reproductions/tree/master/25719). Providing a script can be easier than explaining what changes to make to the config between runs.
|
||||
* Omit any unneeded complexity: remove variables, conditional statements, functions, modules, providers, and resources that are not needed to trigger the bug
|
||||
* When possible, use the [null resource](https://www.terraform.io/docs/providers/null/resource.html) provider rather than a real provider in order to minimize external dependencies. We know this isn't always feasible. The Terraform Core team doesn't have deep domain knowledge in every provider, or access to every cloud platform for reproduction cases.
|
||||
|
||||
-->
|
||||
|
||||
### Terraform Version
|
||||
<!---
|
||||
Run `terraform -v` to show the version, and paste the result between the ``` marks below.
|
||||
Run `terraform version` to show the version, and paste the result between the ``` marks below.
|
||||
|
||||
If you are not running the latest version of Terraform, please try upgrading because your issue may have already been fixed.
|
||||
-->
|
||||
|
@ -27,10 +38,10 @@ If you are not running the latest version of Terraform, please try upgrading bec
|
|||
<!--
|
||||
Paste the relevant parts of your Terraform configuration between the ``` marks below.
|
||||
|
||||
For large Terraform configs, please use a service like Dropbox and share a link to the ZIP file. For security, you can also encrypt the files using our GPG public key.
|
||||
For Terraform configs larger than a few resources, or that involve multiple files, please make a GitHub repository that we can clone, rather than copy-pasting multiple files in here. For security, you can also encrypt the files using our GPG public key at https://www.hashicorp.com/security.
|
||||
-->
|
||||
|
||||
```hcl
|
||||
```terraform
|
||||
...
|
||||
```
|
||||
|
||||
|
|
|
@ -0,0 +1,11 @@
|
|||
blank_issues_enabled: false
|
||||
contact_links:
|
||||
- name: Provider-related Feedback and Questions
|
||||
url: https://github.com/terraform-providers
|
||||
about: Each provider (e.g. AWS, Azure, GCP, Oracle, K8S, etc.) has its own repository, any provider related issues or questions should be directed to appropriate provider repository.
|
||||
- name: Provider Development Feedback and Questions
|
||||
url: https://github.com/hashicorp/terraform-plugin-sdk/issues/new/choose
|
||||
about: Plugin SDK has its own repository, any SDK and provider development related issues or questions should be directed there.
|
||||
- name: Terraform Language or Workflow Questions
|
||||
url: https://discuss.hashicorp.com/c/terraform-core
|
||||
about: Please ask and answer language or workflow related questions through the Terraform Core Community Forum.
|
|
@ -1,6 +1,7 @@
|
|||
---
|
||||
name: Feature request
|
||||
about: Suggest a new feature or other enhancement.
|
||||
labels: enhancement, new
|
||||
|
||||
---
|
||||
|
||||
|
@ -14,7 +15,7 @@ If your issue relates to a specific Terraform provider, please open it in the pr
|
|||
|
||||
### Current Terraform Version
|
||||
<!---
|
||||
Run `terraform -v` to show the version, and paste the result between the ``` marks below. This will record which version was current at the time of your feature request, to help manage the request backlog.
|
||||
Run `terraform version` to show the version, and paste the result between the ``` marks below. This will record which version was current at the time of your feature request, to help manage the request backlog.
|
||||
|
||||
If you're not using the latest version, please check to see if something related to your request has already been implemented in a later version.
|
||||
-->
|
||||
|
|
|
@ -1,5 +1,4 @@
|
|||
# Support
|
||||
|
||||
Terraform is a mature project with a growing community. There are active, dedicated people willing to help you through various mediums.
|
||||
|
||||
Take a look at those mediums listed at https://www.terraform.io/community.html
|
||||
If you have questions about Terraform usage, please feel free to create a topic
|
||||
on [the official community forum](https://discuss.hashicorp.com/c/terraform-core).
|
||||
|
|
|
@ -27,5 +27,8 @@ website/node_modules
|
|||
website/vendor
|
||||
|
||||
# Test exclusions
|
||||
!command/test-fixtures/**/*.tfstate
|
||||
!command/test-fixtures/**/.terraform/
|
||||
!command/testdata/**/*.tfstate
|
||||
!command/testdata/**/.terraform/
|
||||
|
||||
# Coverage
|
||||
coverage.txt
|
||||
|
|
|
@ -1 +1 @@
|
|||
1.11.4
|
||||
1.15.2
|
||||
|
|
|
@ -0,0 +1,39 @@
|
|||
behavior "remove_labels_on_reply" "remove_stale" {
|
||||
labels = ["waiting-response", "stale"]
|
||||
only_non_maintainers = true
|
||||
}
|
||||
|
||||
poll "label_issue_migrater" "provider_migrater" {
|
||||
schedule = "0 20 * * * *"
|
||||
new_owner = env.PROVIDERS_OWNER
|
||||
repo_prefix = "terraform-provider-"
|
||||
label_prefix = "provider/"
|
||||
excluded_label_prefixes = ["backend/", "provisioner/"]
|
||||
excluded_labels = ["build", "cli", "config", "core", "new-provider", "new-provisioner", "new-remote-state", "provider/terraform"]
|
||||
aliases = {
|
||||
"provider/google-cloud" = "provider/google"
|
||||
"provider/influx" = "provider/influxdb"
|
||||
"provider/vcloud" = "provider/vcd"
|
||||
}
|
||||
issue_header = <<-EOF
|
||||
_This issue was originally opened by @${var.user} as ${var.repository}#${var.issue_number}. It was migrated here as a result of the [provider split](https://www.hashicorp.com/blog/upcoming-provider-changes-in-terraform-0-10/). The original body of the issue is below._
|
||||
|
||||
<hr>
|
||||
|
||||
EOF
|
||||
migrated_comment = "This issue has been automatically migrated to ${var.repository}#${var.issue_number} because it looks like an issue with that provider. If you believe this is _not_ an issue with the provider, please reply to ${var.repository}#${var.issue_number}."
|
||||
}
|
||||
|
||||
poll "closed_issue_locker" "locker" {
|
||||
schedule = "0 50 1 * * *"
|
||||
closed_for = "720h" # 30 days
|
||||
no_comment_if_no_activity_for = "1440h" # 60 days
|
||||
max_issues = 500
|
||||
sleep_between_issues = "5s"
|
||||
|
||||
message = <<-EOF
|
||||
I'm going to lock this issue because it has been closed for _30 days_ ⏳. This helps our maintainers find and focus on the active issues.
|
||||
|
||||
If you have found a problem that seems similar to this, please open a new issue and complete the issue template so we can capture all the details necessary to investigate further.
|
||||
EOF
|
||||
}
|
|
@ -0,0 +1,8 @@
|
|||
version_info {
|
||||
commit_var = "main.GitCommit"
|
||||
version_var = "github.com/hashicorp/terraform/version.Version"
|
||||
prerelease_var = "github.com/hashicorp/terraform/version.Prerelease"
|
||||
}
|
||||
|
||||
version_exec = false
|
||||
disable_provider_requirements = true
|
55
.travis.yml
55
.travis.yml
|
@ -1,55 +0,0 @@
|
|||
dist: trusty
|
||||
sudo: required
|
||||
services:
|
||||
- docker
|
||||
language: go
|
||||
go:
|
||||
- "1.11.4"
|
||||
|
||||
# add TF_CONSUL_TEST=1 to run consul tests
|
||||
# they were causing timouts in travis
|
||||
# add TF_ETCDV3_TEST=1 to run etcdv3 tests
|
||||
# if added, TF_ETCDV3_ENDPOINTS must be set to a comma-separated list of (insecure) etcd endpoints against which to test
|
||||
env:
|
||||
- CONSUL_VERSION=0.7.5 GOMAXPROCS=4 GO111MODULE=on
|
||||
|
||||
# Fetch consul for the backend and provider tests
|
||||
before_install:
|
||||
- curl -sLo consul.zip https://releases.hashicorp.com/consul/${CONSUL_VERSION}/consul_${CONSUL_VERSION}_linux_amd64.zip
|
||||
- unzip consul.zip
|
||||
- mkdir -p ~/bin
|
||||
- mv consul ~/bin
|
||||
- export PATH="~/bin:$PATH"
|
||||
|
||||
install:
|
||||
# This script is used by the Travis build to install a cookie for
|
||||
# go.googlesource.com so rate limits are higher when using `go get` to fetch
|
||||
# packages that live there.
|
||||
# See: https://github.com/golang/go/issues/12933
|
||||
- bash scripts/gogetcookie.sh
|
||||
- make tools
|
||||
|
||||
before_script:
|
||||
- git config --global url.https://github.com/.insteadOf ssh://git@github.com/
|
||||
|
||||
script:
|
||||
- make test
|
||||
- make e2etest
|
||||
- GOOS=windows go build -mod=vendor
|
||||
# website-test is temporarily disabled while we get the website build back in shape after the v0.12 reorganization
|
||||
#- make website-test
|
||||
|
||||
branches:
|
||||
only:
|
||||
- master
|
||||
- v0.11
|
||||
notifications:
|
||||
irc:
|
||||
channels:
|
||||
- irc.freenode.org#terraform-tool
|
||||
skip_join: true
|
||||
use_notice: true
|
||||
matrix:
|
||||
fast_finish: true
|
||||
allow_failures:
|
||||
- go: tip
|
|
@ -0,0 +1,85 @@
|
|||
# Terraform Core GitHub Bug Triage & Labeling
|
||||
The Terraform Core team has adopted a more structured bug triage process than we previously used. Our goal is to respond to reports of issues quickly.
|
||||
|
||||
When a bug report is filed, our goal is to either:
|
||||
1. Get it to a state where it is ready for engineering to fix it in an upcoming Terraform release, or
|
||||
2. Close it explain why, if we can't help
|
||||
|
||||
## Process
|
||||
|
||||
### 1. [Newly created issues](https://github.com/hashicorp/terraform/issues?q=is%3Aopen+label%3Anew+label%3Abug+-label%3Abackend%2Foss+-label%3Abackend%2Fazure+-label%3Abackend%2Fs3+-label%3Abackend%2Fgcs+-label%3Abackend%2Fconsul+-label%3Abackend%2Fartifactory+-label%3Aterraform-cloud+-label%3Abackend%2Fremote+-label%3Abackend%2Fswift+-label%3Abackend%2Fpg+-label%3Abackend%2Ftencent++-label%3Abackend%2Fmanta++-label%3Abackend%2Fatlas++-label%3Abackend%2Fetcdv3++-label%3Abackend%2Fetcdv2+-label%3Aconfirmed+-label%3A%22pending+project%22+-label%3A%22waiting+for+reproduction%22+-label%3A%22waiting-response%22+-label%3Aexplained) require initial filtering.
|
||||
|
||||
These are raw reports that need categorization and support clarifying them. They need the following done:
|
||||
|
||||
* label backends, provisioners, and providers so we can route work on codebases we don't support to the correct teams
|
||||
* point requests for help to the community forum and close the issue
|
||||
* close reports against old versions we no longer support
|
||||
* prompt users who have submitted obviously incomplete reproduction cases for additional information
|
||||
|
||||
If an issue requires discussion with the user to get it out of this initial state, leave "new" on there and label it "waiting-response" until this phase of triage is done.
|
||||
|
||||
Once this initial filtering has been done, remove the new label. If an issue subjectively looks very high-impact and likely to impact many users, assign it to the [appropriate milestone](https://github.com/hashicorp/terraform/milestones) to mark it as being urgent.
|
||||
|
||||
### 2. Clarify [unreproduced issues](https://github.com/hashicorp/terraform/issues?q=is%3Aopen+label%3Abug+created%3A%3E2020-05-01+-label%3Aprovisioner%2Fsalt-masterless+-label%3Adocumentation+-label%3Aprovider%2Fazuredevops+-label%3Abackend%2Foss+-label%3Abackend%2Fazure+-label%3Abackend%2Fs3+-label%3Abackend%2Fgcs+-label%3Abackend%2Fconsul+-label%3Abackend%2Fartifactory+-label%3Aterraform-cloud+-label%3Abackend%2Fremote+-label%3Abackend%2Fswift+-label%3Abackend%2Fpg+-label%3Abackend%2Ftencent+-label%3Abackend%2Fmanta+-label%3Abackend%2Fatlas+-label%3Abackend%2Fetcdv3+-label%3Abackend%2Fetcdv2+-label%3Aconfirmed+-label%3A%22pending+project%22+-label%3Anew+-label%3A%22waiting+for+reproduction%22+-label%3Awaiting-response+-label%3Aexplained+sort%3Acreated-asc)
|
||||
|
||||
A core team member initially determines whether the issue is immediately reproducible. If they cannot readily reproduce it, they label it "waiting for reproduction" and correspond with the reporter to describe what is needed. When the issue is reproduced by a core team member, they label it "confirmed".
|
||||
|
||||
"confirmed" issues should have a clear reproduction case. Anyone who picks it up should be able to reproduce it readily without having to invent any details.
|
||||
|
||||
Note that the link above excludes issues reported before May 2020; this is to avoid including issues that were reported prior to this new process being implemented. [Unreproduced issues reported before May 2020](https://github.com/hashicorp/terraform/issues?q=is%3Aopen+label%3Abug+created%3A%3C2020-05-01+-label%3Aprovisioner%2Fsalt-masterless+-label%3Adocumentation+-label%3Aprovider%2Fazuredevops+-label%3Abackend%2Foss+-label%3Abackend%2Fazure+-label%3Abackend%2Fs3+-label%3Abackend%2Fgcs+-label%3Abackend%2Fconsul+-label%3Abackend%2Fartifactory+-label%3Aterraform-cloud+-label%3Abackend%2Fremote+-label%3Abackend%2Fswift+-label%3Abackend%2Fpg+-label%3Abackend%2Ftencent+-label%3Abackend%2Fmanta+-label%3Abackend%2Fatlas+-label%3Abackend%2Fetcdv3+-label%3Abackend%2Fetcdv2+-label%3Aconfirmed+-label%3A%22pending+project%22+-label%3Anew+-label%3A%22waiting+for+reproduction%22+-label%3Awaiting-response+-label%3Aexplained+sort%3Areactions-%2B1-desc) will be triaged as capacity permits.
|
||||
|
||||
|
||||
### 3. Explain or fix [confirmed issues](https://github.com/hashicorp/terraform/issues?q=is%3Aopen+label%3Abug+-label%3Aexplained+-label%3Abackend%2Foss+-label%3Abackend%2Fazure+-label%3Abackend%2Fs3+-label%3Abackend%2Fgcs+-label%3Abackend%2Fconsul+-label%3Abackend%2Fartifactory+-label%3Aterraform-cloud+-label%3Abackend%2Fremote+-label%3Abackend%2Fswift+-label%3Abackend%2Fpg+-label%3Abackend%2Ftencent++-label%3Abackend%2Fmanta++-label%3Abackend%2Fatlas++-label%3Abackend%2Fetcdv3++-label%3Abackend%2Fetcdv2+label%3Aconfirmed+-label%3A%22pending+project%22+)
|
||||
The next step for confirmed issues is to either:
|
||||
|
||||
* explain why the behavior is expected, label the issue as "working as designed", and close it, or
|
||||
* locate the cause of the defect in the codebase. When the defect is located, and that description is posted on the issue, the issue is labeled "explained". In many cases, this step will get skipped if the fix is obvious, and engineers will jump forward and make a PR.
|
||||
|
||||
[Confirmed crashes](https://github.com/hashicorp/terraform/issues?q=is%3Aopen+label%3Acrash+label%3Abug+-label%3Aexplained+-label%3Abackend%2Foss+-label%3Abackend%2Fazure+-label%3Abackend%2Fs3+-label%3Abackend%2Fgcs+-label%3Abackend%2Fconsul+-label%3Abackend%2Fartifactory+-label%3Aterraform-cloud+-label%3Abackend%2Fremote+-label%3Abackend%2Fswift+-label%3Abackend%2Fpg+-label%3Abackend%2Ftencent++-label%3Abackend%2Fmanta++-label%3Abackend%2Fatlas++-label%3Abackend%2Fetcdv3++-label%3Abackend%2Fetcdv2+label%3Aconfirmed+-label%3A%22pending+project%22+) should generally be considered high impact
|
||||
|
||||
### 4. The last step for [explained issues](https://github.com/hashicorp/terraform/issues?q=is%3Aopen+label%3Abug+label%3Aexplained+no%3Amilestone+-label%3Abackend%2Foss+-label%3Abackend%2Fazure+-label%3Abackend%2Fs3+-label%3Abackend%2Fgcs+-label%3Abackend%2Fconsul+-label%3Abackend%2Fartifactory+-label%3Aterraform-cloud+-label%3Abackend%2Fremote+-label%3Abackend%2Fswift+-label%3Abackend%2Fpg+-label%3Abackend%2Ftencent++-label%3Abackend%2Fmanta++-label%3Abackend%2Fatlas++-label%3Abackend%2Fetcdv3++-label%3Abackend%2Fetcdv2+label%3Aconfirmed+-label%3A%22pending+project%22+) is to make a PR to fix them.
|
||||
|
||||
Explained issues that are expected to be fixed in a future release should be assigned to a milestone
|
||||
|
||||
## GitHub Issue Labels
|
||||
label | description
|
||||
------------------------ | -----------
|
||||
new | new issue not yet triaged
|
||||
explained | a Terraform Core team member has described the root cause of this issue in code
|
||||
waiting for reproduction | unable to reproduce issue without further information
|
||||
not reproducible | closed because a reproduction case could not be generated
|
||||
duplicate | issue closed because another issue already tracks this problem
|
||||
confirmed | a Terraform Core team member has reproduced this issue
|
||||
working as designed | confirmed as reported and closed because the behavior is intended
|
||||
pending project | issue is confirmed but will require a significant project to fix
|
||||
|
||||
## Lack of response and unreproducible issues
|
||||
When bugs that have been [labeled waiting response](https://github.com/hashicorp/terraform/issues?q=is%3Aopen+label%3Abug+-label%3Abackend%2Foss+-label%3Abackend%2Fazure+-label%3Abackend%2Fs3+-label%3Abackend%2Fgcs+-label%3Abackend%2Fconsul+-label%3Abackend%2Fartifactory+-label%3Aterraform-cloud+-label%3Abackend%2Fremote+-label%3Abackend%2Fswift+-label%3Abackend%2Fpg+-label%3Abackend%2Ftencent+-label%3Abackend%2Fmanta+-label%3Abackend%2Fatlas+-label%3Abackend%2Fetcdv3+-label%3Abackend%2Fetcdv2+-label%3Aconfirmed+-label%3A%22pending+project%22+-label%3A%22waiting+for+reproduction%22+label%3Awaiting-response+-label%3Aexplained+sort%3Aupdated-asc) or [labeled "waiting for reproduction"](https://github.com/hashicorp/terraform/issues?q=is%3Aopen+label%3Abug+-label%3Abackend%2Foss+-label%3Abackend%2Fazure+-label%3Abackend%2Fs3+-label%3Abackend%2Fgcs+-label%3Abackend%2Fconsul+-label%3Abackend%2Fartifactory+-label%3Aterraform-cloud+-label%3Abackend%2Fremote+-label%3Abackend%2Fswift+-label%3Abackend%2Fpg+-label%3Abackend%2Ftencent+-label%3Abackend%2Fmanta+-label%3Abackend%2Fatlas+-label%3Abackend%2Fetcdv3+-label%3Abackend%2Fetcdv2+-label%3Aconfirmed+-label%3A%22pending+project%22+label%3A%22waiting+for+reproduction%22+-label%3Aexplained+sort%3Aupdated-asc+) for more than 30 days, we'll use our best judgement to determine whether it's more helpful to close it or prompt the reporter again. If they again go without a response for 30 days, they can be closed with a polite message explaining why and inviting the person to submit the needed information or reproduction case in the future.
|
||||
|
||||
The intent of this process is to get fix the maximum number of bugs in Terraform as quickly as possible, and having un-actionable bug reports makes it harder for Terraform Core team members and community contributors to find bugs they can actually work on.
|
||||
|
||||
## Helpful GitHub Filters
|
||||
|
||||
### Triage Process
|
||||
1. [Newly created issues](https://github.com/hashicorp/terraform/issues?q=is%3Aopen+label%3Anew+label%3Abug+-label%3Abackend%2Foss+-label%3Abackend%2Fazure+-label%3Abackend%2Fs3+-label%3Abackend%2Fgcs+-label%3Abackend%2Fconsul+-label%3Abackend%2Fartifactory+-label%3Aterraform-cloud+-label%3Abackend%2Fremote+-label%3Abackend%2Fswift+-label%3Abackend%2Fpg+-label%3Abackend%2Ftencent++-label%3Abackend%2Fmanta++-label%3Abackend%2Fatlas++-label%3Abackend%2Fetcdv3++-label%3Abackend%2Fetcdv2+-label%3Aconfirmed+-label%3A%22pending+project%22+-label%3A%22waiting+for+reproduction%22+-label%3A%22waiting-response%22+-label%3Aexplained) require initial filtering.
|
||||
2. Clarify [unreproduced issues](https://github.com/hashicorp/terraform/issues?q=is%3Aopen+label%3Abug+created%3A%3E2020-05-01+-label%3Aprovisioner%2Fsalt-masterless+-label%3Adocumentation+-label%3Aprovider%2Fazuredevops+-label%3Abackend%2Foss+-label%3Abackend%2Fazure+-label%3Abackend%2Fs3+-label%3Abackend%2Fgcs+-label%3Abackend%2Fconsul+-label%3Abackend%2Fartifactory+-label%3Aterraform-cloud+-label%3Abackend%2Fremote+-label%3Abackend%2Fswift+-label%3Abackend%2Fpg+-label%3Abackend%2Ftencent+-label%3Abackend%2Fmanta+-label%3Abackend%2Fatlas+-label%3Abackend%2Fetcdv3+-label%3Abackend%2Fetcdv2+-label%3Aconfirmed+-label%3A%22pending+project%22+-label%3Anew+-label%3A%22waiting+for+reproduction%22+-label%3Awaiting-response+-label%3Aexplained+sort%3Acreated-asc)
|
||||
3. Explain or fix [confirmed issues](https://github.com/hashicorp/terraform/issues?q=is%3Aopen+label%3Abug+-label%3Aexplained+-label%3Abackend%2Foss+-label%3Abackend%2Fazure+-label%3Abackend%2Fs3+-label%3Abackend%2Fgcs+-label%3Abackend%2Fconsul+-label%3Abackend%2Fartifactory+-label%3Aterraform-cloud+-label%3Abackend%2Fremote+-label%3Abackend%2Fswift+-label%3Abackend%2Fpg+-label%3Abackend%2Ftencent++-label%3Abackend%2Fmanta++-label%3Abackend%2Fatlas++-label%3Abackend%2Fetcdv3++-label%3Abackend%2Fetcdv2+label%3Aconfirmed+-label%3A%22pending+project%22+). Prioritize [confirmed crashes](https://github.com/hashicorp/terraform/issues?q=is%3Aopen+label%3Acrash+label%3Abug+-label%3Aexplained+-label%3Abackend%2Foss+-label%3Abackend%2Fazure+-label%3Abackend%2Fs3+-label%3Abackend%2Fgcs+-label%3Abackend%2Fconsul+-label%3Abackend%2Fartifactory+-label%3Aterraform-cloud+-label%3Abackend%2Fremote+-label%3Abackend%2Fswift+-label%3Abackend%2Fpg+-label%3Abackend%2Ftencent++-label%3Abackend%2Fmanta++-label%3Abackend%2Fatlas++-label%3Abackend%2Fetcdv3++-label%3Abackend%2Fetcdv2+label%3Aconfirmed+-label%3A%22pending+project%22+).
|
||||
4. Fix [explained issues](https://github.com/hashicorp/terraform/issues?q=is%3Aopen+label%3Abug+label%3Aexplained+no%3Amilestone+-label%3Abackend%2Foss+-label%3Abackend%2Fazure+-label%3Abackend%2Fs3+-label%3Abackend%2Fgcs+-label%3Abackend%2Fconsul+-label%3Abackend%2Fartifactory+-label%3Aterraform-cloud+-label%3Abackend%2Fremote+-label%3Abackend%2Fswift+-label%3Abackend%2Fpg+-label%3Abackend%2Ftencent++-label%3Abackend%2Fmanta++-label%3Abackend%2Fatlas++-label%3Abackend%2Fetcdv3++-label%3Abackend%2Fetcdv2+label%3Aconfirmed+-label%3A%22pending+project%22+)
|
||||
|
||||
### Other Backlog
|
||||
|
||||
[Confirmed needs for documentation fixes](https://github.com/hashicorp/terraform/issues?q=is%3Aopen+label%3Abug+label%3Adocumentation++label%3Aconfirmed+-label%3Abackend%2Foss+-label%3Abackend%2Fazure+-label%3Abackend%2Fs3+-label%3Abackend%2Fgcs+-label%3Abackend%2Fconsul+-label%3Abackend%2Fartifactory+-label%3Aterraform-cloud+-label%3Abackend%2Fremote+-label%3Abackend%2Fswift+-label%3Abackend%2Fpg+-label%3Abackend%2Ftencent++-label%3Abackend%2Fmanta++-label%3Abackend%2Fatlas++-label%3Abackend%2Fetcdv3++-label%3Abackend%2Fetcdv2+)
|
||||
|
||||
[Confirmed bugs that will require significant projects to fix](https://github.com/hashicorp/terraform/issues?q=is%3Aopen+label%3Abug+label%3Aconfirmed+label%3A%22pending+project%22++-label%3Abackend%2Foss+-label%3Abackend%2Fazure+-label%3Abackend%2Fs3+-label%3Abackend%2Fgcs+-label%3Abackend%2Fconsul+-label%3Abackend%2Fartifactory+-label%3Aterraform-cloud+-label%3Abackend%2Fremote+-label%3Abackend%2Fswift+-label%3Abackend%2Fpg+-label%3Abackend%2Ftencent++-label%3Abackend%2Fmanta++-label%3Abackend%2Fatlas++-label%3Abackend%2Fetcdv3++-label%3Abackend%2Fetcdv2)
|
||||
|
||||
### Milestone Use
|
||||
|
||||
Milestones ending in .x indicate issues assigned to that milestone are intended to be fixed during that release lifecycle. Milestones ending in .0 indicate issues that will be fixed in that major release. For example:
|
||||
|
||||
[0.13.x Milestone](https://github.com/hashicorp/terraform/milestone/17). Issues in this milestone should be considered high-priority but do not block a patch release. All issues in this milestone should be resolved in a 13.x release before the 0.14.0 RC1 ships.
|
||||
|
||||
[0.14.0 Milestone](https://github.com/hashicorp/terraform/milestone/18). All issues in this milestone must be fixed before 0.14.0 RC1 ships, and should ideally be fixed before 0.14.0 beta 1 ships.
|
||||
|
||||
[0.14.x Milestone](https://github.com/hashicorp/terraform/milestone/20). Issues in this milestone are expected to be addressed at some point in the 0.14.x lifecycle, before 0.15.0. All issues in this milestone should be resolved in a 14.x release before the 0.15.0 RC1 ships.
|
||||
|
||||
[0.15.0 Milestone](https://github.com/hashicorp/terraform/milestone/19). All issues in this milestone must be fixed before 0.15.0 RC1 ships, and should ideally be fixed before 0.15.0 beta 1 ships.
|
56
BUILDING.md
56
BUILDING.md
|
@ -1,56 +0,0 @@
|
|||
# Building Terraform
|
||||
|
||||
This document contains details about the process for building binaries for
|
||||
Terraform.
|
||||
|
||||
## Versioning
|
||||
|
||||
As a pre-1.0 project, we use the MINOR and PATCH versions as follows:
|
||||
|
||||
* a `MINOR` version increment indicates a release that may contain backwards
|
||||
incompatible changes
|
||||
* a `PATCH` version increment indicates a release that may contain bugfixes as
|
||||
well as additive (backwards compatible) features and enhancements
|
||||
|
||||
## Process
|
||||
|
||||
If only need to build binaries for the platform you're running (Windows, Linux,
|
||||
Mac OS X etc..), you can follow the instructions in the README for [Developing
|
||||
Terraform][1].
|
||||
|
||||
The guide below outlines the steps HashiCorp takes to build the official release
|
||||
binaries for Terraform. This process will generate a set of binaries for each supported
|
||||
platform, using the [gox](https://github.com/mitchellh/gox) tool.
|
||||
|
||||
A Vagrant virtual machine is used to provide a consistent environment with
|
||||
the pre-requisite tools in place. The specifics of this VM are defined in the
|
||||
[Vagrantfile](Vagrantfile).
|
||||
|
||||
|
||||
```sh
|
||||
# clone the repository if needed
|
||||
git clone https://github.com/hashicorp/terraform.git
|
||||
cd terraform
|
||||
|
||||
# Spin up a fresh build VM
|
||||
vagrant destroy -f
|
||||
vagrant up
|
||||
vagrant ssh
|
||||
|
||||
# The Vagrantfile installs Go and configures the $GOPATH at /opt/gopath
|
||||
# The current "terraform" directory is then sync'd into the gopath
|
||||
cd /opt/gopath/src/github.com/hashicorp/terraform/
|
||||
|
||||
# Verify unit tests pass
|
||||
make test
|
||||
|
||||
# Build the release
|
||||
# This generates binaries for each platform and places them in the pkg folder
|
||||
make bin
|
||||
```
|
||||
|
||||
After running these commands, you should have binaries for all supported
|
||||
platforms in the `pkg` folder.
|
||||
|
||||
|
||||
[1]: https://github.com/hashicorp/terraform#developing-terraform
|
1745
CHANGELOG.md
1745
CHANGELOG.md
File diff suppressed because it is too large
Load Diff
|
@ -0,0 +1,28 @@
|
|||
# Each line is a file pattern followed by one or more owners.
|
||||
# More on CODEOWNERS files: https://help.github.com/en/github/creating-cloning-and-archiving-repositories/about-code-owners
|
||||
|
||||
# Remote-state backend # Maintainer
|
||||
/backend/remote-state/artifactory Unmaintained
|
||||
/backend/remote-state/azure @hashicorp/terraform-azure
|
||||
/backend/remote-state/consul @hashicorp/consul @remilapeyre
|
||||
/backend/remote-state/cos @likexian
|
||||
/backend/remote-state/etcdv2 Unmaintained
|
||||
/backend/remote-state/etcdv3 @bmcustodio
|
||||
/backend/remote-state/gcs @hashicorp/terraform-google
|
||||
/backend/remote-state/http @hashicorp/terraform-core
|
||||
/backend/remote-state/manta Unmaintained
|
||||
/backend/remote-state/oss @xiaozhu36
|
||||
/backend/remote-state/pg @remilapeyre
|
||||
/backend/remote-state/s3 @hashicorp/terraform-aws
|
||||
/backend/remote-state/swift Unmaintained
|
||||
/backend/remote-state/kubernetes @jrhouston @alexsomesan
|
||||
|
||||
# Provisioners
|
||||
builtin/provisioners/chef Deprecated
|
||||
builtin/provisioners/file @hashicorp/terraform-core
|
||||
builtin/provisioners/habitat Deprecated
|
||||
builtin/provisioners/local-exec @hashicorp/terraform-core
|
||||
builtin/provisioners/puppet Deprecated
|
||||
builtin/provisioners/remote-exec @hashicorp/terraform-core
|
||||
builtin/provisioners/salt-masterless Deprecated
|
||||
|
|
@ -11,14 +11,14 @@
|
|||
FROM golang:alpine
|
||||
LABEL maintainer="HashiCorp Terraform Team <terraform@hashicorp.com>"
|
||||
|
||||
RUN apk add --update git bash openssh
|
||||
RUN apk add --no-cache git bash openssh
|
||||
|
||||
ENV TF_DEV=true
|
||||
ENV TF_RELEASE=1
|
||||
|
||||
WORKDIR $GOPATH/src/github.com/hashicorp/terraform
|
||||
COPY . .
|
||||
RUN /bin/bash scripts/build.sh
|
||||
RUN /bin/bash ./scripts/build.sh
|
||||
|
||||
WORKDIR $GOPATH
|
||||
ENTRYPOINT ["terraform"]
|
||||
|
|
89
Makefile
89
Makefile
|
@ -1,85 +1,16 @@
|
|||
VERSION?="0.3.32"
|
||||
TEST?=./...
|
||||
GOFMT_FILES?=$$(find . -name '*.go' | grep -v vendor)
|
||||
WEBSITE_REPO=github.com/hashicorp/terraform-website
|
||||
|
||||
default: test
|
||||
|
||||
tools:
|
||||
GO111MODULE=off go get -u golang.org/x/tools/cmd/stringer
|
||||
GO111MODULE=off go get -u golang.org/x/tools/cmd/cover
|
||||
GO111MODULE=off go get -u github.com/golang/mock/mockgen
|
||||
|
||||
# bin generates the releaseable binaries for Terraform
|
||||
bin: fmtcheck generate
|
||||
@TF_RELEASE=1 sh -c "'$(CURDIR)/scripts/build.sh'"
|
||||
|
||||
# dev creates binaries for testing Terraform locally. These are put
|
||||
# into ./bin/ as well as $GOPATH/bin
|
||||
dev: fmtcheck generate
|
||||
go install -mod=vendor .
|
||||
|
||||
quickdev: generate
|
||||
go install -mod=vendor .
|
||||
|
||||
# Shorthand for building and installing just one plugin for local testing.
|
||||
# Run as (for example): make plugin-dev PLUGIN=provider-aws
|
||||
plugin-dev: generate
|
||||
go install github.com/hashicorp/terraform/builtin/bins/$(PLUGIN)
|
||||
mv $(GOPATH)/bin/$(PLUGIN) $(GOPATH)/bin/terraform-$(PLUGIN)
|
||||
|
||||
# test runs the unit tests
|
||||
# we run this one package at a time here because running the entire suite in
|
||||
# one command creates memory usage issues when running in Travis-CI.
|
||||
test: fmtcheck generate
|
||||
go list -mod=vendor $(TEST) | xargs -t -n4 go test $(TESTARGS) -mod=vendor -timeout=2m -parallel=4
|
||||
|
||||
# testacc runs acceptance tests
|
||||
testacc: fmtcheck generate
|
||||
@if [ "$(TEST)" = "./..." ]; then \
|
||||
echo "ERROR: Set TEST to a specific package. For example,"; \
|
||||
echo " make testacc TEST=./builtin/providers/test"; \
|
||||
exit 1; \
|
||||
fi
|
||||
TF_ACC=1 go test $(TEST) -v $(TESTARGS) -mod=vendor -timeout 120m
|
||||
|
||||
# e2etest runs the end-to-end tests against a generated Terraform binary
|
||||
# The TF_ACC here allows network access, but does not require any special
|
||||
# credentials since the e2etests use local-only providers such as "null".
|
||||
e2etest: generate
|
||||
TF_ACC=1 go test -mod=vendor -v ./command/e2etest
|
||||
|
||||
test-compile: fmtcheck generate
|
||||
@if [ "$(TEST)" = "./..." ]; then \
|
||||
echo "ERROR: Set TEST to a specific package. For example,"; \
|
||||
echo " make test-compile TEST=./builtin/providers/test"; \
|
||||
exit 1; \
|
||||
fi
|
||||
go test -c $(TEST) $(TESTARGS)
|
||||
|
||||
# testrace runs the race checker
|
||||
testrace: fmtcheck generate
|
||||
TF_ACC= go test -mod=vendor -race $(TEST) $(TESTARGS)
|
||||
|
||||
cover:
|
||||
@go tool cover 2>/dev/null; if [ $$? -eq 3 ]; then \
|
||||
go get -u golang.org/x/tools/cmd/cover; \
|
||||
fi
|
||||
go test $(TEST) -coverprofile=coverage.out
|
||||
go tool cover -html=coverage.out
|
||||
rm coverage.out
|
||||
VERSION?="0.3.44"
|
||||
|
||||
# generate runs `go generate` to build the dynamically generated
|
||||
# source files, except the protobuf stubs which are built instead with
|
||||
# "make protobuf".
|
||||
generate:
|
||||
@which stringer > /dev/null; if [ $$? -ne 0 ]; then \
|
||||
GO111MODULE=off go get -u golang.org/x/tools/cmd/stringer; \
|
||||
fi
|
||||
# We turn off modules for "go generate" because our downstream generate
|
||||
# commands are not all ready to deal with Go modules yet, and this
|
||||
# avoids downloading all of the deps that are in the vendor dir anyway.
|
||||
GO111MODULE=off go generate ./...
|
||||
go generate ./...
|
||||
# go fmt doesn't support -mod=vendor but it still wants to populate the
|
||||
# module cache with everything in go.mod even though formatting requires
|
||||
# no dependencies, and so we're disabling modules mode for this right
|
||||
# now until the "go fmt" behavior is rationalized to either support the
|
||||
# -mod= argument or _not_ try to install things.
|
||||
GO111MODULE=off go fmt command/internal_plugin_list.go > /dev/null
|
||||
|
||||
# We separate the protobuf generation because most development tasks on
|
||||
|
@ -89,12 +20,10 @@ generate:
|
|||
# If you are working on changes to protobuf interfaces you may either use
|
||||
# this target or run the individual scripts below directly.
|
||||
protobuf:
|
||||
bash scripts/protobuf-check.sh
|
||||
bash internal/tfplugin5/generate.sh
|
||||
bash plans/internal/planproto/generate.sh
|
||||
|
||||
fmt:
|
||||
gofmt -w $(GOFMT_FILES)
|
||||
|
||||
fmtcheck:
|
||||
@sh -c "'$(CURDIR)/scripts/gofmtcheck.sh'"
|
||||
|
||||
|
@ -147,4 +76,4 @@ endif
|
|||
# under parallel conditions.
|
||||
.NOTPARALLEL:
|
||||
|
||||
.PHONY: bin cover default dev e2etest fmt fmtcheck generate protobuf plugin-dev quickdev test-compile test testacc testrace tools vendor-status website website-test
|
||||
.PHONY: fmtcheck generate protobuf website website-test
|
||||
|
|
148
README.md
148
README.md
|
@ -2,10 +2,12 @@ Terraform
|
|||
=========
|
||||
|
||||
- Website: https://www.terraform.io
|
||||
- [![Gitter chat](https://badges.gitter.im/hashicorp-terraform/Lobby.png)](https://gitter.im/hashicorp-terraform/Lobby)
|
||||
- Mailing list: [Google Groups](http://groups.google.com/group/terraform-tool)
|
||||
- Forums: [HashiCorp Discuss](https://discuss.hashicorp.com/c/terraform-core)
|
||||
- Documentation: [https://www.terraform.io/docs/](https://www.terraform.io/docs/)
|
||||
- Tutorials: [HashiCorp's Learn Platform](https://learn.hashicorp.com/terraform)
|
||||
- Certification Exam: [HashiCorp Certified: Terraform Associate](https://www.hashicorp.com/certification/#hashicorp-certified-terraform-associate)
|
||||
|
||||
<img alt="Terraform" src="https://cdn.rawgit.com/hashicorp/terraform-website/master/content/source/assets/images/logo-hashicorp.svg" width="600px">
|
||||
<img alt="Terraform" src="https://www.terraform.io/assets/images/logo-hashicorp-3f10732f.svg" width="600px">
|
||||
|
||||
Terraform is a tool for building, changing, and versioning infrastructure safely and efficiently. Terraform can manage existing and popular service providers as well as custom in-house solutions.
|
||||
|
||||
|
@ -23,146 +25,22 @@ For more information, see the [introduction section](http://www.terraform.io/int
|
|||
|
||||
Getting Started & Documentation
|
||||
-------------------------------
|
||||
|
||||
If you're new to Terraform and want to get started creating infrastructure, please checkout our [Getting Started](https://www.terraform.io/intro/getting-started/install.html) guide, available on the [Terraform website](http://www.terraform.io).
|
||||
|
||||
All documentation is available on the [Terraform website](http://www.terraform.io):
|
||||
|
||||
Documentation is available on the [Terraform website](http://www.terraform.io):
|
||||
- [Intro](https://www.terraform.io/intro/index.html)
|
||||
- [Docs](https://www.terraform.io/docs/index.html)
|
||||
|
||||
If you're new to Terraform and want to get started creating infrastructure, please check out our [Getting Started guides](https://learn.hashicorp.com/terraform#getting-started) on HashiCorp's learning platform. There are also [additional guides](https://learn.hashicorp.com/terraform#operations-and-development) to continue your learning.
|
||||
|
||||
Show off your Terraform knowledge by passing a certification exam. Visit the [certification page](https://www.hashicorp.com/certification/) for information about exams and find [study materials](https://learn.hashicorp.com/terraform/certification/terraform-associate) on HashiCorp's learning platform.
|
||||
|
||||
Developing Terraform
|
||||
--------------------
|
||||
|
||||
If you wish to work on Terraform itself or any of its built-in providers, you'll first need [Go](http://www.golang.org) installed on your machine (version 1.11+ is *required*). Alternatively, you can use the Vagrantfile in the root of this repo to stand up a virtual machine with the appropriate dev tooling already set up for you.
|
||||
|
||||
This repository contains only Terraform core, which includes the command line interface and the main graph engine. Providers are implemented as plugins that each have their own repository in [the `terraform-providers` organization](https://github.com/terraform-providers) on GitHub. Instructions for developing each provider are in the associated README file. For more information, see [the provider development overview](https://www.terraform.io/docs/plugins/provider.html).
|
||||
|
||||
For local development of Terraform core, first make sure Go is properly installed and that a
|
||||
[GOPATH](http://golang.org/doc/code.html#GOPATH) has been set. You will also need to add `$GOPATH/bin` to your `$PATH`.
|
||||
|
||||
Next, using [Git](https://git-scm.com/), clone this repository into `$GOPATH/src/github.com/hashicorp/terraform`.
|
||||
|
||||
You'll need to run `make tools` to install some required tools, then `make`. This will compile the code and then run the tests. If this exits with exit status 0, then everything is working!
|
||||
You only need torun `make tools` once (or when the tools change).
|
||||
|
||||
```sh
|
||||
$ cd "$GOPATH/src/github.com/hashicorp/terraform"
|
||||
$ make tools
|
||||
$ make
|
||||
```
|
||||
|
||||
To compile a development version of Terraform and the built-in plugins, run `make dev`. This will build everything using [gox](https://github.com/mitchellh/gox) and put Terraform binaries in the `bin` and `$GOPATH/bin` folders:
|
||||
|
||||
```sh
|
||||
$ make dev
|
||||
...
|
||||
$ bin/terraform
|
||||
...
|
||||
```
|
||||
|
||||
If you're developing a specific package, you can run tests for just that package by specifying the `TEST` variable. For example below, only`terraform` package tests will be run.
|
||||
|
||||
```sh
|
||||
$ make test TEST=./terraform
|
||||
...
|
||||
```
|
||||
|
||||
If you're working on a specific provider which has not been separated into an individual repository and only wish to rebuild that provider, you can use the `plugin-dev` target. For example, to build only the Test provider:
|
||||
|
||||
```sh
|
||||
$ make plugin-dev PLUGIN=provider-test
|
||||
```
|
||||
|
||||
### Dependencies
|
||||
|
||||
Terraform uses Go Modules for dependency management, but for the moment is
|
||||
continuing to use Go 1.6-style vendoring for compatibility with tools that
|
||||
have not yet been updated for full Go Modules support.
|
||||
|
||||
If you're developing Terraform, there are a few tasks you might need to perform.
|
||||
|
||||
#### Adding a dependency
|
||||
|
||||
If you're adding a dependency, you'll need to vendor it in the same Pull Request as the code that depends on it. You should do this in a separate commit from your code, as makes PR review easier and Git history simpler to read in the future.
|
||||
|
||||
To add a dependency:
|
||||
|
||||
Assuming your work is on a branch called `my-feature-branch`, the steps look like this:
|
||||
|
||||
1. Add an `import` statement to a suitable package in the Terraform code.
|
||||
|
||||
2. Run `go mod vendor` to download the latest version of the module containing
|
||||
the imported package into the `vendor/` directory, and update the `go.mod`
|
||||
and `go.sum` files.
|
||||
|
||||
3. Review the changes in git and commit them.
|
||||
|
||||
#### Updating a dependency
|
||||
|
||||
To update a dependency:
|
||||
|
||||
1. Run `go get -u module-path@version-number`, such as `go get -u github.com/hashicorp/hcl@2.0.0`
|
||||
|
||||
2. Run `go mod vendor` to update the vendored copy in the `vendor/` directory.
|
||||
|
||||
3. Review the changes in git and commit them.
|
||||
|
||||
### Acceptance Tests
|
||||
|
||||
Terraform has a comprehensive [acceptance
|
||||
test](http://en.wikipedia.org/wiki/Acceptance_testing) suite covering the
|
||||
built-in providers. Our [Contributing Guide](https://github.com/hashicorp/terraform/blob/master/.github/CONTRIBUTING.md) includes details about how and when to write and run acceptance tests in order to help contributions get accepted quickly.
|
||||
|
||||
|
||||
### Cross Compilation and Building for Distribution
|
||||
|
||||
If you wish to cross-compile Terraform for another architecture, you can set the `XC_OS` and `XC_ARCH` environment variables to values representing the target operating system and architecture before calling `make`. The output is placed in the `pkg` subdirectory tree both expanded in a directory representing the OS/architecture combination and as a ZIP archive.
|
||||
|
||||
For example, to compile 64-bit Linux binaries on Mac OS X, you can run:
|
||||
|
||||
```sh
|
||||
$ XC_OS=linux XC_ARCH=amd64 make bin
|
||||
...
|
||||
$ file pkg/linux_amd64/terraform
|
||||
terraform: ELF 64-bit LSB executable, x86-64, version 1 (SYSV), statically linked, not stripped
|
||||
```
|
||||
|
||||
`XC_OS` and `XC_ARCH` can be space separated lists representing different combinations of operating system and architecture. For example, to compile for both Linux and Mac OS X, targeting both 32- and 64-bit architectures, you can run:
|
||||
|
||||
```sh
|
||||
$ XC_OS="linux darwin" XC_ARCH="386 amd64" make bin
|
||||
...
|
||||
$ tree ./pkg/ -P "terraform|*.zip"
|
||||
./pkg/
|
||||
├── darwin_386
|
||||
│ └── terraform
|
||||
├── darwin_386.zip
|
||||
├── darwin_amd64
|
||||
│ └── terraform
|
||||
├── darwin_amd64.zip
|
||||
├── linux_386
|
||||
│ └── terraform
|
||||
├── linux_386.zip
|
||||
├── linux_amd64
|
||||
│ └── terraform
|
||||
└── linux_amd64.zip
|
||||
|
||||
4 directories, 8 files
|
||||
```
|
||||
|
||||
_Note: Cross-compilation uses [gox](https://github.com/mitchellh/gox), which requires toolchains to be built with versions of Go prior to 1.5. In order to successfully cross-compile with older versions of Go, you will need to run `gox -build-toolchain` before running the commands detailed above._
|
||||
|
||||
#### Docker
|
||||
|
||||
When using docker you don't need to have any of the Go development tools installed and you can clone terraform to any location on disk (doesn't have to be in your $GOPATH). This is useful for users who want to build `master` or a specific branch for testing without setting up a proper Go environment.
|
||||
|
||||
For example, run the following command to build terraform in a linux-based container for macOS.
|
||||
|
||||
```sh
|
||||
docker run --rm -v $(pwd):/go/src/github.com/hashicorp/terraform -w /go/src/github.com/hashicorp/terraform -e XC_OS=darwin -e XC_ARCH=amd64 golang:latest bash -c "apt-get update && apt-get install -y zip && make bin"
|
||||
```
|
||||
To learn more about compiling Terraform and contributing suggested changes, please refer to [the contributing guide](.github/CONTRIBUTING.md).
|
||||
|
||||
To learn more about how we handle bug reports, please read the [bug triage guide](./BUGPROCESS.md).
|
||||
|
||||
## License
|
||||
[![FOSSA Status](https://app.fossa.io/api/projects/git%2Bhttps%3A%2F%2Fgithub.com%2Fhashicorp%2Fterraform.svg?type=large)](https://app.fossa.io/projects/git%2Bhttps%3A%2F%2Fgithub.com%2Fhashicorp%2Fterraform?ref=badge_large)
|
||||
[Mozilla Public License v2.0](https://github.com/hashicorp/terraform/blob/master/LICENSE)
|
||||
|
|
|
@ -1,92 +0,0 @@
|
|||
# -*- mode: ruby -*-
|
||||
# vi: set ft=ruby :
|
||||
|
||||
# Vagrantfile API/syntax version. Don't touch unless you know what you're doing!
|
||||
VAGRANTFILE_API_VERSION = "2"
|
||||
|
||||
# Software version variables
|
||||
GOVERSION = "1.11.4"
|
||||
UBUNTUVERSION = "16.04"
|
||||
|
||||
# CPU and RAM can be adjusted depending on your system
|
||||
CPUCOUNT = "2"
|
||||
RAM = "4096"
|
||||
|
||||
$script = <<SCRIPT
|
||||
GOVERSION="#{GOVERSION}"
|
||||
SRCROOT="/opt/go"
|
||||
SRCPATH="/opt/gopath"
|
||||
|
||||
# Get the ARCH
|
||||
ARCH="$(uname -m | sed 's|i686|386|' | sed 's|x86_64|amd64|')"
|
||||
|
||||
# Install Prereq Packages
|
||||
export DEBIAN_PRIORITY=critical
|
||||
export DEBIAN_FRONTEND=noninteractive
|
||||
export DEBCONF_NONINTERACTIVE_SEEN=true
|
||||
APT_OPTS="--assume-yes --no-install-suggests --no-install-recommends -o Dpkg::Options::=\"--force-confdef\" -o Dpkg::Options::=\"--force-confold\""
|
||||
echo "Upgrading packages ..."
|
||||
apt-get update ${APT_OPTS} >/dev/null
|
||||
apt-get upgrade ${APT_OPTS} >/dev/null
|
||||
echo "Installing prerequisites ..."
|
||||
apt-get install ${APT_OPTS} build-essential curl git-core libpcre3-dev mercurial pkg-config zip >/dev/null
|
||||
|
||||
# Install Go
|
||||
echo "Downloading go (${GOVERSION}) ..."
|
||||
wget -P /tmp --quiet "https://storage.googleapis.com/golang/go${GOVERSION}.linux-${ARCH}.tar.gz"
|
||||
echo "Setting up go (${GOVERSION}) ..."
|
||||
tar -C /opt -xf "/tmp/go${GOVERSION}.linux-${ARCH}.tar.gz"
|
||||
chmod 775 "$SRCROOT"
|
||||
chown vagrant:vagrant "$SRCROOT"
|
||||
|
||||
# Setup the GOPATH; even though the shared folder spec gives the working
|
||||
# directory the right user/group, we need to set it properly on the
|
||||
# parent path to allow subsequent "go get" commands to work.
|
||||
mkdir -p "$SRCPATH"
|
||||
chown -R vagrant:vagrant "$SRCPATH" 2>/dev/null || true
|
||||
# ^^ silencing errors here because we expect this to fail for the shared folder
|
||||
|
||||
cat >/etc/profile.d/gopath.sh <<EOF
|
||||
export GOPATH="$SRCPATH"
|
||||
export GOROOT="$SRCROOT"
|
||||
export PATH="$SRCROOT/bin:$SRCPATH/bin:\$PATH"
|
||||
EOF
|
||||
chmod 755 /etc/profile.d/gopath.sh
|
||||
|
||||
grep -q -F 'cd /opt/gopath/src/github.com/hashicorp/terraform' /home/vagrant/.bashrc || cat >>/home/vagrant/.bashrc <<EOF
|
||||
|
||||
## After login, change to terraform directory
|
||||
cd /opt/gopath/src/github.com/hashicorp/terraform
|
||||
EOF
|
||||
|
||||
SCRIPT
|
||||
|
||||
Vagrant.configure(VAGRANTFILE_API_VERSION) do |config|
|
||||
config.vm.box = "bento/ubuntu-#{UBUNTUVERSION}"
|
||||
config.vm.hostname = "terraform"
|
||||
|
||||
config.vm.provision "prepare-shell", type: "shell", inline: "sudo sed -i '/tty/!s/mesg n/tty -s \\&\\& mesg n/' /root/.profile", privileged: false
|
||||
config.vm.provision "initial-setup", type: "shell", inline: $script
|
||||
config.vm.synced_folder '.', '/opt/gopath/src/github.com/hashicorp/terraform'
|
||||
|
||||
config.vm.provider "docker" do |v, override|
|
||||
override.vm.box = "tknerr/baseimage-ubuntu-#{UBUNTUVERSION}"
|
||||
end
|
||||
|
||||
["vmware_fusion", "vmware_workstation"].each do |p|
|
||||
config.vm.provider p do |v|
|
||||
v.vmx["memsize"] = "#{RAM}"
|
||||
v.vmx["numvcpus"] = "#{CPUCOUNT}"
|
||||
end
|
||||
end
|
||||
|
||||
config.vm.provider "virtualbox" do |v|
|
||||
v.memory = "#{RAM}"
|
||||
v.cpus = "#{CPUCOUNT}"
|
||||
end
|
||||
|
||||
config.vm.provider "parallels" do |prl|
|
||||
prl.memory = "#{RAM}"
|
||||
prl.cpus = "#{CPUCOUNT}"
|
||||
end
|
||||
end
|
|
@ -0,0 +1,12 @@
|
|||
package addrs
|
||||
|
||||
// ForEachAttr is the address of an attribute referencing the current "for_each" object in
|
||||
// the interpolation scope, addressed using the "each" keyword, ex. "each.key" and "each.value"
|
||||
type ForEachAttr struct {
|
||||
referenceable
|
||||
Name string
|
||||
}
|
||||
|
||||
func (f ForEachAttr) String() string {
|
||||
return "each." + f.Name
|
||||
}
|
|
@ -14,6 +14,15 @@ func (v InputVariable) String() string {
|
|||
return "var." + v.Name
|
||||
}
|
||||
|
||||
// Absolute converts the receiver into an absolute address within the given
|
||||
// module instance.
|
||||
func (v InputVariable) Absolute(m ModuleInstance) AbsInputVariableInstance {
|
||||
return AbsInputVariableInstance{
|
||||
Module: m,
|
||||
Variable: v,
|
||||
}
|
||||
}
|
||||
|
||||
// AbsInputVariableInstance is the address of an input variable within a
|
||||
// particular module instance.
|
||||
type AbsInputVariableInstance struct {
|
||||
|
@ -34,7 +43,7 @@ func (m ModuleInstance) InputVariable(name string) AbsInputVariableInstance {
|
|||
|
||||
func (v AbsInputVariableInstance) String() string {
|
||||
if len(v.Module) == 0 {
|
||||
return v.String()
|
||||
return v.Variable.String()
|
||||
}
|
||||
|
||||
return fmt.Sprintf("%s.%s", v.Module.String(), v.Variable.String())
|
||||
|
|
|
@ -18,6 +18,10 @@ import (
|
|||
type InstanceKey interface {
|
||||
instanceKeySigil()
|
||||
String() string
|
||||
|
||||
// Value returns the cty.Value of the appropriate type for the InstanceKey
|
||||
// value.
|
||||
Value() cty.Value
|
||||
}
|
||||
|
||||
// ParseInstanceKey returns the instance key corresponding to the given value,
|
||||
|
@ -56,6 +60,10 @@ func (k IntKey) String() string {
|
|||
return fmt.Sprintf("[%d]", int(k))
|
||||
}
|
||||
|
||||
func (k IntKey) Value() cty.Value {
|
||||
return cty.NumberIntVal(int64(k))
|
||||
}
|
||||
|
||||
// StringKey is the InstanceKey representation representing string indices, as
|
||||
// used when the "for_each" argument is specified with a map or object type.
|
||||
type StringKey string
|
||||
|
@ -69,6 +77,10 @@ func (k StringKey) String() string {
|
|||
return fmt.Sprintf("[%q]", string(k))
|
||||
}
|
||||
|
||||
func (k StringKey) Value() cty.Value {
|
||||
return cty.StringVal(string(k))
|
||||
}
|
||||
|
||||
// InstanceKeyLess returns true if the first given instance key i should sort
|
||||
// before the second key j, and false otherwise.
|
||||
func InstanceKeyLess(i, j InstanceKey) bool {
|
||||
|
|
|
@ -33,7 +33,58 @@ func (m Module) String() string {
|
|||
if len(m) == 0 {
|
||||
return ""
|
||||
}
|
||||
return strings.Join([]string(m), ".")
|
||||
var steps []string
|
||||
for _, s := range m {
|
||||
steps = append(steps, "module", s)
|
||||
}
|
||||
return strings.Join(steps, ".")
|
||||
}
|
||||
|
||||
func (m Module) Equal(other Module) bool {
|
||||
return m.String() == other.String()
|
||||
}
|
||||
|
||||
func (m Module) targetableSigil() {
|
||||
// Module is targetable
|
||||
}
|
||||
|
||||
// TargetContains implements Targetable for Module by returning true if the given other
|
||||
// address either matches the receiver, is a sub-module-instance of the
|
||||
// receiver, or is a targetable absolute address within a module that
|
||||
// is contained within the receiver.
|
||||
func (m Module) TargetContains(other Targetable) bool {
|
||||
switch to := other.(type) {
|
||||
|
||||
case Module:
|
||||
if len(to) < len(m) {
|
||||
// Can't be contained if the path is shorter
|
||||
return false
|
||||
}
|
||||
// Other is contained if its steps match for the length of our own path.
|
||||
for i, ourStep := range m {
|
||||
otherStep := to[i]
|
||||
if ourStep != otherStep {
|
||||
return false
|
||||
}
|
||||
}
|
||||
// If we fall out here then the prefixed matched, so it's contained.
|
||||
return true
|
||||
|
||||
case ModuleInstance:
|
||||
return m.TargetContains(to.Module())
|
||||
|
||||
case ConfigResource:
|
||||
return m.TargetContains(to.Module)
|
||||
|
||||
case AbsResource:
|
||||
return m.TargetContains(to.Module)
|
||||
|
||||
case AbsResourceInstance:
|
||||
return m.TargetContains(to.Module)
|
||||
|
||||
default:
|
||||
return false
|
||||
}
|
||||
}
|
||||
|
||||
// Child returns the address of a child call in the receiver, identified by the
|
||||
|
@ -73,3 +124,17 @@ func (m Module) Call() (Module, ModuleCall) {
|
|||
Name: callName,
|
||||
}
|
||||
}
|
||||
|
||||
// Ancestors returns a slice containing the receiver and all of its ancestor
|
||||
// modules, all the way up to (and including) the root module. The result is
|
||||
// ordered by depth, with the root module always first.
|
||||
//
|
||||
// Since the result always includes the root module, a caller may choose to
|
||||
// ignore it by slicing the result with [1:].
|
||||
func (m Module) Ancestors() []Module {
|
||||
ret := make([]Module, 0, len(m)+1)
|
||||
for i := 0; i <= len(m); i++ {
|
||||
ret = append(ret, m[:i])
|
||||
}
|
||||
return ret
|
||||
}
|
||||
|
|
|
@ -51,31 +51,52 @@ func (c ModuleCallInstance) ModuleInstance(caller ModuleInstance) ModuleInstance
|
|||
return caller.Child(c.Call.Name, c.Key)
|
||||
}
|
||||
|
||||
// Output returns the address of an output of the receiver identified by its
|
||||
// Output returns the absolute address of an output of the receiver identified by its
|
||||
// name.
|
||||
func (c ModuleCallInstance) Output(name string) ModuleCallOutput {
|
||||
return ModuleCallOutput{
|
||||
func (c ModuleCallInstance) Output(name string) AbsModuleCallOutput {
|
||||
return AbsModuleCallOutput{
|
||||
Call: c,
|
||||
Name: name,
|
||||
}
|
||||
}
|
||||
|
||||
// ModuleCallOutput is the address of a particular named output produced by
|
||||
// an instance of a module call.
|
||||
// ModuleCallOutput is the address of a named output and its associated
|
||||
// ModuleCall, which may expand into multiple module instances
|
||||
type ModuleCallOutput struct {
|
||||
referenceable
|
||||
Call ModuleCall
|
||||
Name string
|
||||
}
|
||||
|
||||
func (m ModuleCallOutput) String() string {
|
||||
return fmt.Sprintf("%s.%s", m.Call.String(), m.Name)
|
||||
}
|
||||
|
||||
// AbsModuleCallOutput is the address of a particular named output produced by
|
||||
// an instance of a module call.
|
||||
type AbsModuleCallOutput struct {
|
||||
referenceable
|
||||
Call ModuleCallInstance
|
||||
Name string
|
||||
}
|
||||
|
||||
func (co ModuleCallOutput) String() string {
|
||||
// ModuleCallOutput returns the referenceable ModuleCallOutput for this
|
||||
// particular instance.
|
||||
func (co AbsModuleCallOutput) ModuleCallOutput() ModuleCallOutput {
|
||||
return ModuleCallOutput{
|
||||
Call: co.Call.Call,
|
||||
Name: co.Name,
|
||||
}
|
||||
}
|
||||
|
||||
func (co AbsModuleCallOutput) String() string {
|
||||
return fmt.Sprintf("%s.%s", co.Call.String(), co.Name)
|
||||
}
|
||||
|
||||
// AbsOutputValue returns the absolute output value address that corresponds
|
||||
// to the receving module call output address, once resolved in the given
|
||||
// calling module.
|
||||
func (co ModuleCallOutput) AbsOutputValue(caller ModuleInstance) AbsOutputValue {
|
||||
func (co AbsModuleCallOutput) AbsOutputValue(caller ModuleInstance) AbsOutputValue {
|
||||
moduleAddr := co.Call.ModuleInstance(caller)
|
||||
return moduleAddr.OutputValue(co.Name)
|
||||
}
|
||||
|
|
|
@ -4,8 +4,8 @@ import (
|
|||
"bytes"
|
||||
"fmt"
|
||||
|
||||
"github.com/hashicorp/hcl2/hcl"
|
||||
"github.com/hashicorp/hcl2/hcl/hclsyntax"
|
||||
"github.com/hashicorp/hcl/v2"
|
||||
"github.com/hashicorp/hcl/v2/hclsyntax"
|
||||
"github.com/zclconf/go-cty/cty"
|
||||
"github.com/zclconf/go-cty/cty/gocty"
|
||||
|
||||
|
@ -57,7 +57,7 @@ func ParseModuleInstance(traversal hcl.Traversal) (ModuleInstance, tfdiags.Diagn
|
|||
// If a reference string is coming from a source that should be identified in
|
||||
// error messages then the caller should instead parse it directly using a
|
||||
// suitable function from the HCL API and pass the traversal itself to
|
||||
// ParseProviderConfigCompact.
|
||||
// ParseModuleInstance.
|
||||
//
|
||||
// Error diagnostics are returned if either the parsing fails or the analysis
|
||||
// of the traversal fails. There is no way for the caller to distinguish the
|
||||
|
@ -383,8 +383,7 @@ func (m ModuleInstance) CallInstance() (ModuleInstance, ModuleCallInstance) {
|
|||
// is contained within the reciever.
|
||||
func (m ModuleInstance) TargetContains(other Targetable) bool {
|
||||
switch to := other.(type) {
|
||||
|
||||
case ModuleInstance:
|
||||
case Module:
|
||||
if len(to) < len(m) {
|
||||
// Can't be contained if the path is shorter
|
||||
return false
|
||||
|
@ -392,13 +391,55 @@ func (m ModuleInstance) TargetContains(other Targetable) bool {
|
|||
// Other is contained if its steps match for the length of our own path.
|
||||
for i, ourStep := range m {
|
||||
otherStep := to[i]
|
||||
if ourStep != otherStep {
|
||||
|
||||
// We can't contain an entire module if we have a specific instance
|
||||
// key. The case of NoKey is OK because this address is either
|
||||
// meant to address an unexpanded module, or a single instance of
|
||||
// that module, and both of those are a covered in-full by the
|
||||
// Module address.
|
||||
if ourStep.InstanceKey != NoKey {
|
||||
return false
|
||||
}
|
||||
|
||||
if ourStep.Name != otherStep {
|
||||
return false
|
||||
}
|
||||
}
|
||||
// If we fall out here then the prefixed matched, so it's contained.
|
||||
return true
|
||||
|
||||
case ModuleInstance:
|
||||
if len(to) < len(m) {
|
||||
return false
|
||||
}
|
||||
for i, ourStep := range m {
|
||||
otherStep := to[i]
|
||||
|
||||
if ourStep.Name != otherStep.Name {
|
||||
return false
|
||||
}
|
||||
|
||||
// if this is our last step, because all targets are parsed as
|
||||
// instances, this may be a ModuleInstance intended to be used as a
|
||||
// Module.
|
||||
if i == len(m)-1 {
|
||||
if ourStep.InstanceKey == NoKey {
|
||||
// If the other step is a keyed instance, then we contain that
|
||||
// step, and if it isn't it's a match, which is true either way
|
||||
return true
|
||||
}
|
||||
}
|
||||
|
||||
if ourStep.InstanceKey != otherStep.InstanceKey {
|
||||
return false
|
||||
}
|
||||
|
||||
}
|
||||
return true
|
||||
|
||||
case ConfigResource:
|
||||
return m.TargetContains(to.Module)
|
||||
|
||||
case AbsResource:
|
||||
return m.TargetContains(to.Module)
|
||||
|
||||
|
@ -410,6 +451,26 @@ func (m ModuleInstance) TargetContains(other Targetable) bool {
|
|||
}
|
||||
}
|
||||
|
||||
// Module returns the address of the module that this instance is an instance
|
||||
// of.
|
||||
func (m ModuleInstance) Module() Module {
|
||||
if len(m) == 0 {
|
||||
return nil
|
||||
}
|
||||
ret := make(Module, len(m))
|
||||
for i, step := range m {
|
||||
ret[i] = step.Name
|
||||
}
|
||||
return ret
|
||||
}
|
||||
|
||||
func (m ModuleInstance) targetableSigil() {
|
||||
// ModuleInstance is targetable
|
||||
}
|
||||
|
||||
func (s ModuleInstanceStep) String() string {
|
||||
if s.InstanceKey != NoKey {
|
||||
return s.Name + s.InstanceKey.String()
|
||||
}
|
||||
return s.Name
|
||||
}
|
||||
|
|
|
@ -62,13 +62,13 @@ func (v AbsOutputValue) String() string {
|
|||
//
|
||||
// The root module does not have a call, and so this method cannot be used
|
||||
// with outputs in the root module, and will panic in that case.
|
||||
func (v AbsOutputValue) ModuleCallOutput() (ModuleInstance, ModuleCallOutput) {
|
||||
func (v AbsOutputValue) ModuleCallOutput() (ModuleInstance, AbsModuleCallOutput) {
|
||||
if v.Module.IsRoot() {
|
||||
panic("ReferenceFromCall used with root module output")
|
||||
}
|
||||
|
||||
caller, call := v.Module.CallInstance()
|
||||
return caller, ModuleCallOutput{
|
||||
return caller, AbsModuleCallOutput{
|
||||
Call: call,
|
||||
Name: v.OutputValue.Name,
|
||||
}
|
||||
|
|
|
@ -3,8 +3,8 @@ package addrs
|
|||
import (
|
||||
"fmt"
|
||||
|
||||
"github.com/hashicorp/hcl2/hcl"
|
||||
"github.com/hashicorp/hcl2/hcl/hclsyntax"
|
||||
"github.com/hashicorp/hcl/v2"
|
||||
"github.com/hashicorp/hcl/v2/hclsyntax"
|
||||
"github.com/hashicorp/terraform/tfdiags"
|
||||
)
|
||||
|
||||
|
@ -85,6 +85,14 @@ func parseRef(traversal hcl.Traversal) (*Reference, tfdiags.Diagnostics) {
|
|||
Remaining: remain,
|
||||
}, diags
|
||||
|
||||
case "each":
|
||||
name, rng, remain, diags := parseSingleAttrRef(traversal)
|
||||
return &Reference{
|
||||
Subject: ForEachAttr{Name: name},
|
||||
SourceRange: tfdiags.SourceRangeFromHCL(rng),
|
||||
Remaining: remain,
|
||||
}, diags
|
||||
|
||||
case "data":
|
||||
if len(traversal) < 3 {
|
||||
diags = diags.Append(&hcl.Diagnostic{
|
||||
|
@ -112,10 +120,9 @@ func parseRef(traversal hcl.Traversal) (*Reference, tfdiags.Diagnostics) {
|
|||
return nil, diags
|
||||
}
|
||||
|
||||
// A traversal starting with "module" can either be a reference to
|
||||
// an entire module instance or to a single output from a module
|
||||
// instance, depending on what we find after this introducer.
|
||||
|
||||
// A traversal starting with "module" can either be a reference to an
|
||||
// entire module, or to a single output from a module instance,
|
||||
// depending on what we find after this introducer.
|
||||
callInstance := ModuleCallInstance{
|
||||
Call: ModuleCall{
|
||||
Name: callName,
|
||||
|
@ -124,12 +131,12 @@ func parseRef(traversal hcl.Traversal) (*Reference, tfdiags.Diagnostics) {
|
|||
}
|
||||
|
||||
if len(remain) == 0 {
|
||||
// Reference to an entire module instance. Might alternatively
|
||||
// be a reference to a collection of instances of a particular
|
||||
// module, but the caller will need to deal with that ambiguity
|
||||
// since we don't have enough context here.
|
||||
// Reference to an entire module. Might alternatively be a
|
||||
// reference to a single instance of a particular module, but the
|
||||
// caller will need to deal with that ambiguity since we don't have
|
||||
// enough context here.
|
||||
return &Reference{
|
||||
Subject: callInstance,
|
||||
Subject: callInstance.Call,
|
||||
SourceRange: tfdiags.SourceRangeFromHCL(callRange),
|
||||
Remaining: remain,
|
||||
}, diags
|
||||
|
@ -163,7 +170,7 @@ func parseRef(traversal hcl.Traversal) (*Reference, tfdiags.Diagnostics) {
|
|||
if attrTrav, ok := remain[0].(hcl.TraverseAttr); ok {
|
||||
remain = remain[1:]
|
||||
return &Reference{
|
||||
Subject: ModuleCallOutput{
|
||||
Subject: AbsModuleCallOutput{
|
||||
Name: attrTrav.Name,
|
||||
Call: callInstance,
|
||||
},
|
||||
|
@ -282,7 +289,7 @@ func parseResourceRef(mode ResourceMode, startRange hcl.Range, traversal hcl.Tra
|
|||
// of the resource, but we don't have enough context here to decide
|
||||
// so we'll let the caller resolve that ambiguity.
|
||||
return &Reference{
|
||||
Subject: resourceInstAddr,
|
||||
Subject: resourceAddr,
|
||||
SourceRange: tfdiags.SourceRangeFromHCL(rng),
|
||||
}, diags
|
||||
}
|
||||
|
|
|
@ -4,8 +4,8 @@ import (
|
|||
"testing"
|
||||
|
||||
"github.com/go-test/deep"
|
||||
"github.com/hashicorp/hcl2/hcl"
|
||||
"github.com/hashicorp/hcl2/hcl/hclsyntax"
|
||||
"github.com/hashicorp/hcl/v2"
|
||||
"github.com/hashicorp/hcl/v2/hclsyntax"
|
||||
"github.com/hashicorp/terraform/tfdiags"
|
||||
"github.com/zclconf/go-cty/cty"
|
||||
)
|
||||
|
@ -64,16 +64,60 @@ func TestParseRef(t *testing.T) {
|
|||
`The "count" object does not support this operation.`,
|
||||
},
|
||||
|
||||
// each
|
||||
{
|
||||
`each.key`,
|
||||
&Reference{
|
||||
Subject: ForEachAttr{
|
||||
Name: "key",
|
||||
},
|
||||
SourceRange: tfdiags.SourceRange{
|
||||
Start: tfdiags.SourcePos{Line: 1, Column: 1, Byte: 0},
|
||||
End: tfdiags.SourcePos{Line: 1, Column: 9, Byte: 8},
|
||||
},
|
||||
},
|
||||
``,
|
||||
},
|
||||
{
|
||||
`each.value.blah`,
|
||||
&Reference{
|
||||
Subject: ForEachAttr{
|
||||
Name: "value",
|
||||
},
|
||||
SourceRange: tfdiags.SourceRange{
|
||||
Start: tfdiags.SourcePos{Line: 1, Column: 1, Byte: 0},
|
||||
End: tfdiags.SourcePos{Line: 1, Column: 11, Byte: 10},
|
||||
},
|
||||
Remaining: hcl.Traversal{
|
||||
hcl.TraverseAttr{
|
||||
Name: "blah",
|
||||
SrcRange: hcl.Range{
|
||||
Start: hcl.Pos{Line: 1, Column: 11, Byte: 10},
|
||||
End: hcl.Pos{Line: 1, Column: 16, Byte: 15},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
``,
|
||||
},
|
||||
{
|
||||
`each`,
|
||||
nil,
|
||||
`The "each" object cannot be accessed directly. Instead, access one of its attributes.`,
|
||||
},
|
||||
{
|
||||
`each["hello"]`,
|
||||
nil,
|
||||
`The "each" object does not support this operation.`,
|
||||
},
|
||||
// data
|
||||
{
|
||||
`data.external.foo`,
|
||||
&Reference{
|
||||
Subject: ResourceInstance{
|
||||
Resource: Resource{
|
||||
Mode: DataResourceMode,
|
||||
Type: "external",
|
||||
Name: "foo",
|
||||
},
|
||||
Subject: Resource{
|
||||
Mode: DataResourceMode,
|
||||
Type: "external",
|
||||
Name: "foo",
|
||||
},
|
||||
SourceRange: tfdiags.SourceRange{
|
||||
Start: tfdiags.SourcePos{Line: 1, Column: 1, Byte: 0},
|
||||
|
@ -237,10 +281,8 @@ func TestParseRef(t *testing.T) {
|
|||
{
|
||||
`module.foo`,
|
||||
&Reference{
|
||||
Subject: ModuleCallInstance{
|
||||
Call: ModuleCall{
|
||||
Name: "foo",
|
||||
},
|
||||
Subject: ModuleCall{
|
||||
Name: "foo",
|
||||
},
|
||||
SourceRange: tfdiags.SourceRange{
|
||||
Start: tfdiags.SourcePos{Line: 1, Column: 1, Byte: 0},
|
||||
|
@ -252,7 +294,7 @@ func TestParseRef(t *testing.T) {
|
|||
{
|
||||
`module.foo.bar`,
|
||||
&Reference{
|
||||
Subject: ModuleCallOutput{
|
||||
Subject: AbsModuleCallOutput{
|
||||
Call: ModuleCallInstance{
|
||||
Call: ModuleCall{
|
||||
Name: "foo",
|
||||
|
@ -270,7 +312,7 @@ func TestParseRef(t *testing.T) {
|
|||
{
|
||||
`module.foo.bar.baz`,
|
||||
&Reference{
|
||||
Subject: ModuleCallOutput{
|
||||
Subject: AbsModuleCallOutput{
|
||||
Call: ModuleCallInstance{
|
||||
Call: ModuleCall{
|
||||
Name: "foo",
|
||||
|
@ -313,7 +355,7 @@ func TestParseRef(t *testing.T) {
|
|||
{
|
||||
`module.foo["baz"].bar`,
|
||||
&Reference{
|
||||
Subject: ModuleCallOutput{
|
||||
Subject: AbsModuleCallOutput{
|
||||
Call: ModuleCallInstance{
|
||||
Call: ModuleCall{
|
||||
Name: "foo",
|
||||
|
@ -332,7 +374,7 @@ func TestParseRef(t *testing.T) {
|
|||
{
|
||||
`module.foo["baz"].bar.boop`,
|
||||
&Reference{
|
||||
Subject: ModuleCallOutput{
|
||||
Subject: AbsModuleCallOutput{
|
||||
Call: ModuleCallInstance{
|
||||
Call: ModuleCall{
|
||||
Name: "foo",
|
||||
|
@ -546,12 +588,10 @@ func TestParseRef(t *testing.T) {
|
|||
{
|
||||
`boop_instance.foo`,
|
||||
&Reference{
|
||||
Subject: ResourceInstance{
|
||||
Resource: Resource{
|
||||
Mode: ManagedResourceMode,
|
||||
Type: "boop_instance",
|
||||
Name: "foo",
|
||||
},
|
||||
Subject: Resource{
|
||||
Mode: ManagedResourceMode,
|
||||
Type: "boop_instance",
|
||||
Name: "foo",
|
||||
},
|
||||
SourceRange: tfdiags.SourceRange{
|
||||
Start: tfdiags.SourcePos{Line: 1, Column: 1, Byte: 0},
|
||||
|
|
|
@ -3,9 +3,9 @@ package addrs
|
|||
import (
|
||||
"fmt"
|
||||
|
||||
"github.com/hashicorp/hcl2/hcl/hclsyntax"
|
||||
"github.com/hashicorp/hcl/v2/hclsyntax"
|
||||
|
||||
"github.com/hashicorp/hcl2/hcl"
|
||||
"github.com/hashicorp/hcl/v2"
|
||||
"github.com/hashicorp/terraform/tfdiags"
|
||||
)
|
||||
|
||||
|
|
|
@ -4,8 +4,8 @@ import (
|
|||
"testing"
|
||||
|
||||
"github.com/go-test/deep"
|
||||
"github.com/hashicorp/hcl2/hcl"
|
||||
"github.com/hashicorp/hcl2/hcl/hclsyntax"
|
||||
"github.com/hashicorp/hcl/v2"
|
||||
"github.com/hashicorp/hcl/v2/hclsyntax"
|
||||
"github.com/hashicorp/terraform/tfdiags"
|
||||
)
|
||||
|
||||
|
@ -249,6 +249,51 @@ func TestParseTarget(t *testing.T) {
|
|||
},
|
||||
``,
|
||||
},
|
||||
{
|
||||
`module.foo.module.bar[0].data.aws_instance.baz`,
|
||||
&Target{
|
||||
Subject: AbsResource{
|
||||
Resource: Resource{
|
||||
Mode: DataResourceMode,
|
||||
Type: "aws_instance",
|
||||
Name: "baz",
|
||||
},
|
||||
Module: ModuleInstance{
|
||||
{Name: "foo", InstanceKey: NoKey},
|
||||
{Name: "bar", InstanceKey: IntKey(0)},
|
||||
},
|
||||
},
|
||||
SourceRange: tfdiags.SourceRange{
|
||||
Start: tfdiags.SourcePos{Line: 1, Column: 1, Byte: 0},
|
||||
End: tfdiags.SourcePos{Line: 1, Column: 47, Byte: 46},
|
||||
},
|
||||
},
|
||||
``,
|
||||
},
|
||||
{
|
||||
`module.foo.module.bar["a"].data.aws_instance.baz["hello"]`,
|
||||
&Target{
|
||||
Subject: AbsResourceInstance{
|
||||
Resource: ResourceInstance{
|
||||
Resource: Resource{
|
||||
Mode: DataResourceMode,
|
||||
Type: "aws_instance",
|
||||
Name: "baz",
|
||||
},
|
||||
Key: StringKey("hello"),
|
||||
},
|
||||
Module: ModuleInstance{
|
||||
{Name: "foo", InstanceKey: NoKey},
|
||||
{Name: "bar", InstanceKey: StringKey("a")},
|
||||
},
|
||||
},
|
||||
SourceRange: tfdiags.SourceRange{
|
||||
Start: tfdiags.SourcePos{Line: 1, Column: 1, Byte: 0},
|
||||
End: tfdiags.SourcePos{Line: 1, Column: 58, Byte: 57},
|
||||
},
|
||||
},
|
||||
``,
|
||||
},
|
||||
{
|
||||
`module.foo.module.bar.data.aws_instance.baz["hello"]`,
|
||||
&Target{
|
||||
|
|
|
@ -0,0 +1,464 @@
|
|||
package addrs
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"strings"
|
||||
|
||||
"golang.org/x/net/idna"
|
||||
|
||||
"github.com/hashicorp/hcl/v2"
|
||||
svchost "github.com/hashicorp/terraform-svchost"
|
||||
"github.com/hashicorp/terraform/tfdiags"
|
||||
)
|
||||
|
||||
// Provider encapsulates a single provider type. In the future this will be
|
||||
// extended to include additional fields including Namespace and SourceHost
|
||||
type Provider struct {
|
||||
Type string
|
||||
Namespace string
|
||||
Hostname svchost.Hostname
|
||||
}
|
||||
|
||||
// DefaultRegistryHost is the hostname used for provider addresses that do
|
||||
// not have an explicit hostname.
|
||||
const DefaultRegistryHost = svchost.Hostname("registry.terraform.io")
|
||||
|
||||
// BuiltInProviderHost is the pseudo-hostname used for the "built-in" provider
|
||||
// namespace. Built-in provider addresses must also have their namespace set
|
||||
// to BuiltInProviderNamespace in order to be considered as built-in.
|
||||
const BuiltInProviderHost = svchost.Hostname("terraform.io")
|
||||
|
||||
// BuiltInProviderNamespace is the provider namespace used for "built-in"
|
||||
// providers. Built-in provider addresses must also have their hostname
|
||||
// set to BuiltInProviderHost in order to be considered as built-in.
|
||||
//
|
||||
// The this namespace is literally named "builtin", in the hope that users
|
||||
// who see FQNs containing this will be able to infer the way in which they are
|
||||
// special, even if they haven't encountered the concept formally yet.
|
||||
const BuiltInProviderNamespace = "builtin"
|
||||
|
||||
// LegacyProviderNamespace is the special string used in the Namespace field
|
||||
// of type Provider to mark a legacy provider address. This special namespace
|
||||
// value would normally be invalid, and can be used only when the hostname is
|
||||
// DefaultRegistryHost because that host owns the mapping from legacy name to
|
||||
// FQN.
|
||||
const LegacyProviderNamespace = "-"
|
||||
|
||||
// String returns an FQN string, indended for use in machine-readable output.
|
||||
func (pt Provider) String() string {
|
||||
if pt.IsZero() {
|
||||
panic("called String on zero-value addrs.Provider")
|
||||
}
|
||||
return pt.Hostname.ForDisplay() + "/" + pt.Namespace + "/" + pt.Type
|
||||
}
|
||||
|
||||
// ForDisplay returns a user-friendly FQN string, simplified for readability. If
|
||||
// the provider is using the default hostname, the hostname is omitted.
|
||||
func (pt Provider) ForDisplay() string {
|
||||
if pt.IsZero() {
|
||||
panic("called ForDisplay on zero-value addrs.Provider")
|
||||
}
|
||||
|
||||
if pt.Hostname == DefaultRegistryHost {
|
||||
return pt.Namespace + "/" + pt.Type
|
||||
}
|
||||
return pt.Hostname.ForDisplay() + "/" + pt.Namespace + "/" + pt.Type
|
||||
}
|
||||
|
||||
// NewProvider constructs a provider address from its parts, and normalizes
|
||||
// the namespace and type parts to lowercase using unicode case folding rules
|
||||
// so that resulting addrs.Provider values can be compared using standard
|
||||
// Go equality rules (==).
|
||||
//
|
||||
// The hostname is given as a svchost.Hostname, which is required by the
|
||||
// contract of that type to have already been normalized for equality testing.
|
||||
//
|
||||
// This function will panic if the given namespace or type name are not valid.
|
||||
// When accepting namespace or type values from outside the program, use
|
||||
// ParseProviderPart first to check that the given value is valid.
|
||||
func NewProvider(hostname svchost.Hostname, namespace, typeName string) Provider {
|
||||
if namespace == LegacyProviderNamespace {
|
||||
// Legacy provider addresses must always be created via
|
||||
// NewLegacyProvider so that we can use static analysis to find
|
||||
// codepaths still working with those.
|
||||
panic("attempt to create legacy provider address using NewProvider; use NewLegacyProvider instead")
|
||||
}
|
||||
|
||||
return Provider{
|
||||
Type: MustParseProviderPart(typeName),
|
||||
Namespace: MustParseProviderPart(namespace),
|
||||
Hostname: hostname,
|
||||
}
|
||||
}
|
||||
|
||||
// ImpliedProviderForUnqualifiedType represents the rules for inferring what
|
||||
// provider FQN a user intended when only a naked type name is available.
|
||||
//
|
||||
// For all except the type name "terraform" this returns a so-called "default"
|
||||
// provider, which is under the registry.terraform.io/hashicorp/ namespace.
|
||||
//
|
||||
// As a special case, the string "terraform" maps to
|
||||
// "terraform.io/builtin/terraform" because that is the more likely user
|
||||
// intent than the now-unmaintained "registry.terraform.io/hashicorp/terraform"
|
||||
// which remains only for compatibility with older Terraform versions.
|
||||
func ImpliedProviderForUnqualifiedType(typeName string) Provider {
|
||||
switch typeName {
|
||||
case "terraform":
|
||||
// Note for future maintainers: any additional strings we add here
|
||||
// as implied to be builtin must never also be use as provider names
|
||||
// in the registry.terraform.io/hashicorp/... namespace, because
|
||||
// otherwise older versions of Terraform could implicitly select
|
||||
// the registry name instead of the internal one.
|
||||
return NewBuiltInProvider(typeName)
|
||||
default:
|
||||
return NewDefaultProvider(typeName)
|
||||
}
|
||||
}
|
||||
|
||||
// NewDefaultProvider returns the default address of a HashiCorp-maintained,
|
||||
// Registry-hosted provider.
|
||||
func NewDefaultProvider(name string) Provider {
|
||||
return Provider{
|
||||
Type: MustParseProviderPart(name),
|
||||
Namespace: "hashicorp",
|
||||
Hostname: DefaultRegistryHost,
|
||||
}
|
||||
}
|
||||
|
||||
// NewBuiltInProvider returns the address of a "built-in" provider. See
|
||||
// the docs for Provider.IsBuiltIn for more information.
|
||||
func NewBuiltInProvider(name string) Provider {
|
||||
return Provider{
|
||||
Type: MustParseProviderPart(name),
|
||||
Namespace: BuiltInProviderNamespace,
|
||||
Hostname: BuiltInProviderHost,
|
||||
}
|
||||
}
|
||||
|
||||
// NewLegacyProvider returns a mock address for a provider.
|
||||
// This will be removed when ProviderType is fully integrated.
|
||||
func NewLegacyProvider(name string) Provider {
|
||||
return Provider{
|
||||
// We intentionally don't normalize and validate the legacy names,
|
||||
// because existing code expects legacy provider names to pass through
|
||||
// verbatim, even if not compliant with our new naming rules.
|
||||
Type: name,
|
||||
Namespace: LegacyProviderNamespace,
|
||||
Hostname: DefaultRegistryHost,
|
||||
}
|
||||
}
|
||||
|
||||
// LegacyString returns the provider type, which is frequently used
|
||||
// interchangeably with provider name. This function can and should be removed
|
||||
// when provider type is fully integrated. As a safeguard for future
|
||||
// refactoring, this function panics if the Provider is not a legacy provider.
|
||||
func (pt Provider) LegacyString() string {
|
||||
if pt.IsZero() {
|
||||
panic("called LegacyString on zero-value addrs.Provider")
|
||||
}
|
||||
if pt.Namespace != LegacyProviderNamespace && pt.Namespace != BuiltInProviderNamespace {
|
||||
panic(pt.String() + " cannot be represented as a legacy string")
|
||||
}
|
||||
return pt.Type
|
||||
}
|
||||
|
||||
// IsZero returns true if the receiver is the zero value of addrs.Provider.
|
||||
//
|
||||
// The zero value is not a valid addrs.Provider and calling other methods on
|
||||
// such a value is likely to either panic or otherwise misbehave.
|
||||
func (pt Provider) IsZero() bool {
|
||||
return pt == Provider{}
|
||||
}
|
||||
|
||||
// IsBuiltIn returns true if the receiver is the address of a "built-in"
|
||||
// provider. That is, a provider under terraform.io/builtin/ which is
|
||||
// included as part of the Terraform binary itself rather than one to be
|
||||
// installed from elsewhere.
|
||||
//
|
||||
// These are ignored by the provider installer because they are assumed to
|
||||
// already be available without any further installation.
|
||||
func (pt Provider) IsBuiltIn() bool {
|
||||
return pt.Hostname == BuiltInProviderHost && pt.Namespace == BuiltInProviderNamespace
|
||||
}
|
||||
|
||||
// LessThan returns true if the receiver should sort before the other given
|
||||
// address in an ordered list of provider addresses.
|
||||
//
|
||||
// This ordering is an arbitrary one just to allow deterministic results from
|
||||
// functions that would otherwise have no natural ordering. It's subject
|
||||
// to change in future.
|
||||
func (pt Provider) LessThan(other Provider) bool {
|
||||
switch {
|
||||
case pt.Hostname != other.Hostname:
|
||||
return pt.Hostname < other.Hostname
|
||||
case pt.Namespace != other.Namespace:
|
||||
return pt.Namespace < other.Namespace
|
||||
default:
|
||||
return pt.Type < other.Type
|
||||
}
|
||||
}
|
||||
|
||||
// IsLegacy returns true if the provider is a legacy-style provider
|
||||
func (pt Provider) IsLegacy() bool {
|
||||
if pt.IsZero() {
|
||||
panic("called IsLegacy() on zero-value addrs.Provider")
|
||||
}
|
||||
|
||||
return pt.Hostname == DefaultRegistryHost && pt.Namespace == LegacyProviderNamespace
|
||||
|
||||
}
|
||||
|
||||
// IsDefault returns true if the provider is a default hashicorp provider
|
||||
func (pt Provider) IsDefault() bool {
|
||||
if pt.IsZero() {
|
||||
panic("called IsDefault() on zero-value addrs.Provider")
|
||||
}
|
||||
|
||||
return pt.Hostname == DefaultRegistryHost && pt.Namespace == "hashicorp"
|
||||
}
|
||||
|
||||
// Equals returns true if the receiver and other provider have the same attributes.
|
||||
func (pt Provider) Equals(other Provider) bool {
|
||||
return pt == other
|
||||
}
|
||||
|
||||
// ParseProviderSourceString parses the source attribute and returns a provider.
|
||||
// This is intended primarily to parse the FQN-like strings returned by
|
||||
// terraform-config-inspect.
|
||||
//
|
||||
// The following are valid source string formats:
|
||||
// name
|
||||
// namespace/name
|
||||
// hostname/namespace/name
|
||||
func ParseProviderSourceString(str string) (Provider, tfdiags.Diagnostics) {
|
||||
var ret Provider
|
||||
var diags tfdiags.Diagnostics
|
||||
|
||||
// split the source string into individual components
|
||||
parts := strings.Split(str, "/")
|
||||
if len(parts) == 0 || len(parts) > 3 {
|
||||
diags = diags.Append(&hcl.Diagnostic{
|
||||
Severity: hcl.DiagError,
|
||||
Summary: "Invalid provider source string",
|
||||
Detail: `The "source" attribute must be in the format "[hostname/][namespace/]name"`,
|
||||
})
|
||||
return ret, diags
|
||||
}
|
||||
|
||||
// check for an invalid empty string in any part
|
||||
for i := range parts {
|
||||
if parts[i] == "" {
|
||||
diags = diags.Append(&hcl.Diagnostic{
|
||||
Severity: hcl.DiagError,
|
||||
Summary: "Invalid provider source string",
|
||||
Detail: `The "source" attribute must be in the format "[hostname/][namespace/]name"`,
|
||||
})
|
||||
return ret, diags
|
||||
}
|
||||
}
|
||||
|
||||
// check the 'name' portion, which is always the last part
|
||||
givenName := parts[len(parts)-1]
|
||||
name, err := ParseProviderPart(givenName)
|
||||
if err != nil {
|
||||
diags = diags.Append(&hcl.Diagnostic{
|
||||
Severity: hcl.DiagError,
|
||||
Summary: "Invalid provider type",
|
||||
Detail: fmt.Sprintf(`Invalid provider type %q in source %q: %s"`, givenName, str, err),
|
||||
})
|
||||
return ret, diags
|
||||
}
|
||||
ret.Type = name
|
||||
ret.Hostname = DefaultRegistryHost
|
||||
|
||||
if len(parts) == 1 {
|
||||
return NewDefaultProvider(parts[0]), diags
|
||||
}
|
||||
|
||||
if len(parts) >= 2 {
|
||||
// the namespace is always the second-to-last part
|
||||
givenNamespace := parts[len(parts)-2]
|
||||
if givenNamespace == LegacyProviderNamespace {
|
||||
// For now we're tolerating legacy provider addresses until we've
|
||||
// finished updating the rest of the codebase to no longer use them,
|
||||
// or else we'd get errors round-tripping through legacy subsystems.
|
||||
ret.Namespace = LegacyProviderNamespace
|
||||
} else {
|
||||
namespace, err := ParseProviderPart(givenNamespace)
|
||||
if err != nil {
|
||||
diags = diags.Append(&hcl.Diagnostic{
|
||||
Severity: hcl.DiagError,
|
||||
Summary: "Invalid provider namespace",
|
||||
Detail: fmt.Sprintf(`Invalid provider namespace %q in source %q: %s"`, namespace, str, err),
|
||||
})
|
||||
return Provider{}, diags
|
||||
}
|
||||
ret.Namespace = namespace
|
||||
}
|
||||
}
|
||||
|
||||
// Final Case: 3 parts
|
||||
if len(parts) == 3 {
|
||||
// the namespace is always the first part in a three-part source string
|
||||
hn, err := svchost.ForComparison(parts[0])
|
||||
if err != nil {
|
||||
diags = diags.Append(&hcl.Diagnostic{
|
||||
Severity: hcl.DiagError,
|
||||
Summary: "Invalid provider source hostname",
|
||||
Detail: fmt.Sprintf(`Invalid provider source hostname namespace %q in source %q: %s"`, hn, str, err),
|
||||
})
|
||||
return Provider{}, diags
|
||||
}
|
||||
ret.Hostname = hn
|
||||
}
|
||||
|
||||
if ret.Namespace == LegacyProviderNamespace && ret.Hostname != DefaultRegistryHost {
|
||||
// Legacy provider addresses must always be on the default registry
|
||||
// host, because the default registry host decides what actual FQN
|
||||
// each one maps to.
|
||||
diags = diags.Append(&hcl.Diagnostic{
|
||||
Severity: hcl.DiagError,
|
||||
Summary: "Invalid provider namespace",
|
||||
Detail: "The legacy provider namespace \"-\" can be used only with hostname " + DefaultRegistryHost.ForDisplay() + ".",
|
||||
})
|
||||
return Provider{}, diags
|
||||
}
|
||||
|
||||
// Due to how plugin executables are named and provider git repositories
|
||||
// are conventionally named, it's a reasonable and
|
||||
// apparently-somewhat-common user error to incorrectly use the
|
||||
// "terraform-provider-" prefix in a provider source address. There is
|
||||
// no good reason for a provider to have the prefix "terraform-" anyway,
|
||||
// so we've made that invalid from the start both so we can give feedback
|
||||
// to provider developers about the terraform- prefix being redundant
|
||||
// and give specialized feedback to folks who incorrectly use the full
|
||||
// terraform-provider- prefix to help them self-correct.
|
||||
const redundantPrefix = "terraform-"
|
||||
const userErrorPrefix = "terraform-provider-"
|
||||
if strings.HasPrefix(ret.Type, redundantPrefix) {
|
||||
if strings.HasPrefix(ret.Type, userErrorPrefix) {
|
||||
// Likely user error. We only return this specialized error if
|
||||
// whatever is after the prefix would otherwise be a
|
||||
// syntactically-valid provider type, so we don't end up advising
|
||||
// the user to try something that would be invalid for another
|
||||
// reason anyway.
|
||||
// (This is mainly just for robustness, because the validation
|
||||
// we already did above should've rejected most/all ways for
|
||||
// the suggestedType to end up invalid here.)
|
||||
suggestedType := ret.Type[len(userErrorPrefix):]
|
||||
if _, err := ParseProviderPart(suggestedType); err == nil {
|
||||
suggestedAddr := ret
|
||||
suggestedAddr.Type = suggestedType
|
||||
diags = diags.Append(tfdiags.Sourceless(
|
||||
tfdiags.Error,
|
||||
"Invalid provider type",
|
||||
fmt.Sprintf("Provider source %q has a type with the prefix %q, which isn't valid. Although that prefix is often used in the names of version control repositories for Terraform providers, provider source strings should not include it.\n\nDid you mean %q?", ret.ForDisplay(), userErrorPrefix, suggestedAddr.ForDisplay()),
|
||||
))
|
||||
return Provider{}, diags
|
||||
}
|
||||
}
|
||||
// Otherwise, probably instead an incorrectly-named provider, perhaps
|
||||
// arising from a similar instinct to what causes there to be
|
||||
// thousands of Python packages on PyPI with "python-"-prefixed
|
||||
// names.
|
||||
diags = diags.Append(tfdiags.Sourceless(
|
||||
tfdiags.Error,
|
||||
"Invalid provider type",
|
||||
fmt.Sprintf("Provider source %q has a type with the prefix %q, which isn't allowed because it would be redundant to name a Terraform provider with that prefix. If you are the author of this provider, rename it to not include the prefix.", ret, redundantPrefix),
|
||||
))
|
||||
return Provider{}, diags
|
||||
}
|
||||
|
||||
return ret, diags
|
||||
}
|
||||
|
||||
// MustParseProviderSourceString is a wrapper around ParseProviderSourceString that panics if
|
||||
// it returns an error.
|
||||
func MustParseProviderSourceString(str string) Provider {
|
||||
result, diags := ParseProviderSourceString(str)
|
||||
if diags.HasErrors() {
|
||||
panic(diags.Err().Error())
|
||||
}
|
||||
return result
|
||||
}
|
||||
|
||||
// ParseProviderPart processes an addrs.Provider namespace or type string
|
||||
// provided by an end-user, producing a normalized version if possible or
|
||||
// an error if the string contains invalid characters.
|
||||
//
|
||||
// A provider part is processed in the same way as an individual label in a DNS
|
||||
// domain name: it is transformed to lowercase per the usual DNS case mapping
|
||||
// and normalization rules and may contain only letters, digits, and dashes.
|
||||
// Additionally, dashes may not appear at the start or end of the string.
|
||||
//
|
||||
// These restrictions are intended to allow these names to appear in fussy
|
||||
// contexts such as directory/file names on case-insensitive filesystems,
|
||||
// repository names on GitHub, etc. We're using the DNS rules in particular,
|
||||
// rather than some similar rules defined locally, because the hostname part
|
||||
// of an addrs.Provider is already a hostname and it's ideal to use exactly
|
||||
// the same case folding and normalization rules for all of the parts.
|
||||
//
|
||||
// In practice a provider type string conventionally does not contain dashes
|
||||
// either. Such names are permitted, but providers with such type names will be
|
||||
// hard to use because their resource type names will not be able to contain
|
||||
// the provider type name and thus each resource will need an explicit provider
|
||||
// address specified. (A real-world example of such a provider is the
|
||||
// "google-beta" variant of the GCP provider, which has resource types that
|
||||
// start with the "google_" prefix instead.)
|
||||
//
|
||||
// It's valid to pass the result of this function as the argument to a
|
||||
// subsequent call, in which case the result will be identical.
|
||||
func ParseProviderPart(given string) (string, error) {
|
||||
if len(given) == 0 {
|
||||
return "", fmt.Errorf("must have at least one character")
|
||||
}
|
||||
|
||||
// We're going to process the given name using the same "IDNA" library we
|
||||
// use for the hostname portion, since it already implements the case
|
||||
// folding rules we want.
|
||||
//
|
||||
// The idna library doesn't expose individual label parsing directly, but
|
||||
// once we've verified it doesn't contain any dots we can just treat it
|
||||
// like a top-level domain for this library's purposes.
|
||||
if strings.ContainsRune(given, '.') {
|
||||
return "", fmt.Errorf("dots are not allowed")
|
||||
}
|
||||
|
||||
// We don't allow names containing multiple consecutive dashes, just as
|
||||
// a matter of preference: they look weird, confusing, or incorrect.
|
||||
// This also, as a side-effect, prevents the use of the "punycode"
|
||||
// indicator prefix "xn--" that would cause the IDNA library to interpret
|
||||
// the given name as punycode, because that would be weird and unexpected.
|
||||
if strings.Contains(given, "--") {
|
||||
return "", fmt.Errorf("cannot use multiple consecutive dashes")
|
||||
}
|
||||
|
||||
result, err := idna.Lookup.ToUnicode(given)
|
||||
if err != nil {
|
||||
return "", fmt.Errorf("must contain only letters, digits, and dashes, and may not use leading or trailing dashes")
|
||||
}
|
||||
|
||||
return result, nil
|
||||
}
|
||||
|
||||
// MustParseProviderPart is a wrapper around ParseProviderPart that panics if
|
||||
// it returns an error.
|
||||
func MustParseProviderPart(given string) string {
|
||||
result, err := ParseProviderPart(given)
|
||||
if err != nil {
|
||||
panic(err.Error())
|
||||
}
|
||||
return result
|
||||
}
|
||||
|
||||
// IsProviderPartNormalized compares a given string to the result of ParseProviderPart(string)
|
||||
func IsProviderPartNormalized(str string) (bool, error) {
|
||||
normalized, err := ParseProviderPart(str)
|
||||
if err != nil {
|
||||
return false, err
|
||||
}
|
||||
if str == normalized {
|
||||
return true, nil
|
||||
}
|
||||
return false, nil
|
||||
}
|
|
@ -2,155 +2,106 @@ package addrs
|
|||
|
||||
import (
|
||||
"fmt"
|
||||
"strings"
|
||||
|
||||
"github.com/hashicorp/terraform/tfdiags"
|
||||
"github.com/zclconf/go-cty/cty"
|
||||
|
||||
"github.com/hashicorp/hcl2/hcl"
|
||||
"github.com/hashicorp/hcl2/hcl/hclsyntax"
|
||||
"github.com/hashicorp/hcl/v2"
|
||||
"github.com/hashicorp/hcl/v2/hclsyntax"
|
||||
)
|
||||
|
||||
// ProviderConfig is the address of a provider configuration.
|
||||
type ProviderConfig struct {
|
||||
Type string
|
||||
// ProviderConfig is an interface type whose dynamic type can be either
|
||||
// LocalProviderConfig or AbsProviderConfig, in order to represent situations
|
||||
// where a value might either be module-local or absolute but the decision
|
||||
// cannot be made until runtime.
|
||||
//
|
||||
// Where possible, use either LocalProviderConfig or AbsProviderConfig directly
|
||||
// instead, to make intent more clear. ProviderConfig can be used only in
|
||||
// situations where the recipient of the value has some out-of-band way to
|
||||
// determine a "current module" to use if the value turns out to be
|
||||
// a LocalProviderConfig.
|
||||
//
|
||||
// Recipients of non-nil ProviderConfig values that actually need
|
||||
// AbsProviderConfig values should call ResolveAbsProviderAddr on the
|
||||
// *configs.Config value representing the root module configuration, which
|
||||
// handles the translation from local to fully-qualified using mapping tables
|
||||
// defined in the configuration.
|
||||
//
|
||||
// Recipients of a ProviderConfig value can assume it can contain only a
|
||||
// LocalProviderConfig value, an AbsProviderConfigValue, or nil to represent
|
||||
// the absense of a provider config in situations where that is meaningful.
|
||||
type ProviderConfig interface {
|
||||
providerConfig()
|
||||
}
|
||||
|
||||
// LocalProviderConfig is the address of a provider configuration from the
|
||||
// perspective of references in a particular module.
|
||||
//
|
||||
// Finding the corresponding AbsProviderConfig will require looking up the
|
||||
// LocalName in the providers table in the module's configuration; there is
|
||||
// no syntax-only translation between these types.
|
||||
type LocalProviderConfig struct {
|
||||
LocalName string
|
||||
|
||||
// If not empty, Alias identifies which non-default (aliased) provider
|
||||
// configuration this address refers to.
|
||||
Alias string
|
||||
}
|
||||
|
||||
// NewDefaultProviderConfig returns the address of the default (un-aliased)
|
||||
// configuration for the provider with the given type name.
|
||||
func NewDefaultProviderConfig(typeName string) ProviderConfig {
|
||||
return ProviderConfig{
|
||||
Type: typeName,
|
||||
var _ ProviderConfig = LocalProviderConfig{}
|
||||
|
||||
// NewDefaultLocalProviderConfig returns the address of the default (un-aliased)
|
||||
// configuration for the provider with the given local type name.
|
||||
func NewDefaultLocalProviderConfig(LocalNameName string) LocalProviderConfig {
|
||||
return LocalProviderConfig{
|
||||
LocalName: LocalNameName,
|
||||
}
|
||||
}
|
||||
|
||||
// ParseProviderConfigCompact parses the given absolute traversal as a relative
|
||||
// provider address in compact form. The following are examples of traversals
|
||||
// that can be successfully parsed as compact relative provider configuration
|
||||
// addresses:
|
||||
//
|
||||
// aws
|
||||
// aws.foo
|
||||
//
|
||||
// This function will panic if given a relative traversal.
|
||||
//
|
||||
// If the returned diagnostics contains errors then the result value is invalid
|
||||
// and must not be used.
|
||||
func ParseProviderConfigCompact(traversal hcl.Traversal) (ProviderConfig, tfdiags.Diagnostics) {
|
||||
var diags tfdiags.Diagnostics
|
||||
ret := ProviderConfig{
|
||||
Type: traversal.RootName(),
|
||||
}
|
||||
// providerConfig Implements addrs.ProviderConfig.
|
||||
func (pc LocalProviderConfig) providerConfig() {}
|
||||
|
||||
if len(traversal) < 2 {
|
||||
// Just a type name, then.
|
||||
return ret, diags
|
||||
}
|
||||
|
||||
aliasStep := traversal[1]
|
||||
switch ts := aliasStep.(type) {
|
||||
case hcl.TraverseAttr:
|
||||
ret.Alias = ts.Name
|
||||
return ret, diags
|
||||
default:
|
||||
diags = diags.Append(&hcl.Diagnostic{
|
||||
Severity: hcl.DiagError,
|
||||
Summary: "Invalid provider configuration address",
|
||||
Detail: "The provider type name must either stand alone or be followed by an alias name separated with a dot.",
|
||||
Subject: aliasStep.SourceRange().Ptr(),
|
||||
})
|
||||
}
|
||||
|
||||
if len(traversal) > 2 {
|
||||
diags = diags.Append(&hcl.Diagnostic{
|
||||
Severity: hcl.DiagError,
|
||||
Summary: "Invalid provider configuration address",
|
||||
Detail: "Extraneous extra operators after provider configuration address.",
|
||||
Subject: traversal[2:].SourceRange().Ptr(),
|
||||
})
|
||||
}
|
||||
|
||||
return ret, diags
|
||||
}
|
||||
|
||||
// ParseProviderConfigCompactStr is a helper wrapper around ParseProviderConfigCompact
|
||||
// that takes a string and parses it with the HCL native syntax traversal parser
|
||||
// before interpreting it.
|
||||
//
|
||||
// This should be used only in specialized situations since it will cause the
|
||||
// created references to not have any meaningful source location information.
|
||||
// If a reference string is coming from a source that should be identified in
|
||||
// error messages then the caller should instead parse it directly using a
|
||||
// suitable function from the HCL API and pass the traversal itself to
|
||||
// ParseProviderConfigCompact.
|
||||
//
|
||||
// Error diagnostics are returned if either the parsing fails or the analysis
|
||||
// of the traversal fails. There is no way for the caller to distinguish the
|
||||
// two kinds of diagnostics programmatically. If error diagnostics are returned
|
||||
// then the returned address is invalid.
|
||||
func ParseProviderConfigCompactStr(str string) (ProviderConfig, tfdiags.Diagnostics) {
|
||||
var diags tfdiags.Diagnostics
|
||||
|
||||
traversal, parseDiags := hclsyntax.ParseTraversalAbs([]byte(str), "", hcl.Pos{Line: 1, Column: 1})
|
||||
diags = diags.Append(parseDiags)
|
||||
if parseDiags.HasErrors() {
|
||||
return ProviderConfig{}, diags
|
||||
}
|
||||
|
||||
addr, addrDiags := ParseProviderConfigCompact(traversal)
|
||||
diags = diags.Append(addrDiags)
|
||||
return addr, diags
|
||||
}
|
||||
|
||||
// Absolute returns an AbsProviderConfig from the receiver and the given module
|
||||
// instance address.
|
||||
func (pc ProviderConfig) Absolute(module ModuleInstance) AbsProviderConfig {
|
||||
return AbsProviderConfig{
|
||||
Module: module,
|
||||
ProviderConfig: pc,
|
||||
}
|
||||
}
|
||||
|
||||
func (pc ProviderConfig) String() string {
|
||||
if pc.Type == "" {
|
||||
func (pc LocalProviderConfig) String() string {
|
||||
if pc.LocalName == "" {
|
||||
// Should never happen; always indicates a bug
|
||||
return "provider.<invalid>"
|
||||
}
|
||||
|
||||
if pc.Alias != "" {
|
||||
return fmt.Sprintf("provider.%s.%s", pc.Type, pc.Alias)
|
||||
return fmt.Sprintf("provider.%s.%s", pc.LocalName, pc.Alias)
|
||||
}
|
||||
|
||||
return "provider." + pc.Type
|
||||
return "provider." + pc.LocalName
|
||||
}
|
||||
|
||||
// StringCompact is an alternative to String that returns the form that can
|
||||
// be parsed by ParseProviderConfigCompact, without the "provider." prefix.
|
||||
func (pc ProviderConfig) StringCompact() string {
|
||||
func (pc LocalProviderConfig) StringCompact() string {
|
||||
if pc.Alias != "" {
|
||||
return fmt.Sprintf("%s.%s", pc.Type, pc.Alias)
|
||||
return fmt.Sprintf("%s.%s", pc.LocalName, pc.Alias)
|
||||
}
|
||||
return pc.Type
|
||||
return pc.LocalName
|
||||
}
|
||||
|
||||
// AbsProviderConfig is the absolute address of a provider configuration
|
||||
// within a particular module instance.
|
||||
type AbsProviderConfig struct {
|
||||
Module ModuleInstance
|
||||
ProviderConfig ProviderConfig
|
||||
Module Module
|
||||
Provider Provider
|
||||
Alias string
|
||||
}
|
||||
|
||||
var _ ProviderConfig = AbsProviderConfig{}
|
||||
|
||||
// ParseAbsProviderConfig parses the given traversal as an absolute provider
|
||||
// address. The following are examples of traversals that can be successfully
|
||||
// parsed as absolute provider configuration addresses:
|
||||
//
|
||||
// provider.aws
|
||||
// provider.aws.foo
|
||||
// module.bar.provider.aws
|
||||
// module.bar.module.baz.provider.aws.foo
|
||||
// module.foo[1].provider.aws.foo
|
||||
// provider["registry.terraform.io/hashicorp/aws"]
|
||||
// provider["registry.terraform.io/hashicorp/aws"].foo
|
||||
// module.bar.provider["registry.terraform.io/hashicorp/aws"]
|
||||
// module.bar.module.baz.provider["registry.terraform.io/hashicorp/aws"].foo
|
||||
//
|
||||
// This type of address is used, for example, to record the relationships
|
||||
// between resources and provider configurations in the state structure.
|
||||
|
@ -158,9 +109,23 @@ type AbsProviderConfig struct {
|
|||
// messages that refer to provider configurations.
|
||||
func ParseAbsProviderConfig(traversal hcl.Traversal) (AbsProviderConfig, tfdiags.Diagnostics) {
|
||||
modInst, remain, diags := parseModuleInstancePrefix(traversal)
|
||||
ret := AbsProviderConfig{
|
||||
Module: modInst,
|
||||
var ret AbsProviderConfig
|
||||
|
||||
// Providers cannot resolve within module instances, so verify that there
|
||||
// are no instance keys in the module path before converting to a Module.
|
||||
for _, step := range modInst {
|
||||
if step.InstanceKey != NoKey {
|
||||
diags = diags.Append(&hcl.Diagnostic{
|
||||
Severity: hcl.DiagError,
|
||||
Summary: "Invalid provider configuration address",
|
||||
Detail: "Provider address cannot contain module indexes",
|
||||
Subject: remain.SourceRange().Ptr(),
|
||||
})
|
||||
return ret, diags
|
||||
}
|
||||
}
|
||||
ret.Module = modInst.Module()
|
||||
|
||||
if len(remain) < 2 || remain.RootName() != "provider" {
|
||||
diags = diags.Append(&hcl.Diagnostic{
|
||||
Severity: hcl.DiagError,
|
||||
|
@ -180,8 +145,22 @@ func ParseAbsProviderConfig(traversal hcl.Traversal) (AbsProviderConfig, tfdiags
|
|||
return ret, diags
|
||||
}
|
||||
|
||||
if tt, ok := remain[1].(hcl.TraverseAttr); ok {
|
||||
ret.ProviderConfig.Type = tt.Name
|
||||
if tt, ok := remain[1].(hcl.TraverseIndex); ok {
|
||||
if !tt.Key.Type().Equals(cty.String) {
|
||||
diags = diags.Append(&hcl.Diagnostic{
|
||||
Severity: hcl.DiagError,
|
||||
Summary: "Invalid provider configuration address",
|
||||
Detail: "The prefix \"provider.\" must be followed by a provider type name.",
|
||||
Subject: remain[1].SourceRange().Ptr(),
|
||||
})
|
||||
return ret, diags
|
||||
}
|
||||
p, sourceDiags := ParseProviderSourceString(tt.Key.AsString())
|
||||
ret.Provider = p
|
||||
if sourceDiags.HasErrors() {
|
||||
diags = diags.Append(sourceDiags)
|
||||
return ret, diags
|
||||
}
|
||||
} else {
|
||||
diags = diags.Append(&hcl.Diagnostic{
|
||||
Severity: hcl.DiagError,
|
||||
|
@ -194,7 +173,7 @@ func ParseAbsProviderConfig(traversal hcl.Traversal) (AbsProviderConfig, tfdiags
|
|||
|
||||
if len(remain) == 3 {
|
||||
if tt, ok := remain[2].(hcl.TraverseAttr); ok {
|
||||
ret.ProviderConfig.Alias = tt.Name
|
||||
ret.Alias = tt.Name
|
||||
} else {
|
||||
diags = diags.Append(&hcl.Diagnostic{
|
||||
Severity: hcl.DiagError,
|
||||
|
@ -226,6 +205,18 @@ func ParseAbsProviderConfig(traversal hcl.Traversal) (AbsProviderConfig, tfdiags
|
|||
// the returned address is invalid.
|
||||
func ParseAbsProviderConfigStr(str string) (AbsProviderConfig, tfdiags.Diagnostics) {
|
||||
var diags tfdiags.Diagnostics
|
||||
traversal, parseDiags := hclsyntax.ParseTraversalAbs([]byte(str), "", hcl.Pos{Line: 1, Column: 1})
|
||||
diags = diags.Append(parseDiags)
|
||||
if parseDiags.HasErrors() {
|
||||
return AbsProviderConfig{}, diags
|
||||
}
|
||||
addr, addrDiags := ParseAbsProviderConfig(traversal)
|
||||
diags = diags.Append(addrDiags)
|
||||
return addr, diags
|
||||
}
|
||||
|
||||
func ParseLegacyAbsProviderConfigStr(str string) (AbsProviderConfig, tfdiags.Diagnostics) {
|
||||
var diags tfdiags.Diagnostics
|
||||
|
||||
traversal, parseDiags := hclsyntax.ParseTraversalAbs([]byte(str), "", hcl.Pos{Line: 1, Column: 1})
|
||||
diags = diags.Append(parseDiags)
|
||||
|
@ -233,34 +224,118 @@ func ParseAbsProviderConfigStr(str string) (AbsProviderConfig, tfdiags.Diagnosti
|
|||
return AbsProviderConfig{}, diags
|
||||
}
|
||||
|
||||
addr, addrDiags := ParseAbsProviderConfig(traversal)
|
||||
addr, addrDiags := ParseLegacyAbsProviderConfig(traversal)
|
||||
diags = diags.Append(addrDiags)
|
||||
return addr, diags
|
||||
}
|
||||
|
||||
// ProviderConfigDefault returns the address of the default provider config
|
||||
// of the given type inside the recieving module instance.
|
||||
func (m ModuleInstance) ProviderConfigDefault(name string) AbsProviderConfig {
|
||||
// ParseLegacyAbsProviderConfig parses the given traversal as an absolute
|
||||
// provider address. The following are examples of traversals that can be
|
||||
// successfully parsed as legacy absolute provider configuration addresses:
|
||||
//
|
||||
// provider.aws
|
||||
// provider.aws.foo
|
||||
// module.bar.provider.aws
|
||||
// module.bar.module.baz.provider.aws.foo
|
||||
//
|
||||
// This type of address is used in legacy state and may appear in state v4 if
|
||||
// the provider config addresses have not been normalized to include provider
|
||||
// FQN.
|
||||
func ParseLegacyAbsProviderConfig(traversal hcl.Traversal) (AbsProviderConfig, tfdiags.Diagnostics) {
|
||||
modInst, remain, diags := parseModuleInstancePrefix(traversal)
|
||||
var ret AbsProviderConfig
|
||||
|
||||
// Providers cannot resolve within module instances, so verify that there
|
||||
// are no instance keys in the module path before converting to a Module.
|
||||
for _, step := range modInst {
|
||||
if step.InstanceKey != NoKey {
|
||||
diags = diags.Append(&hcl.Diagnostic{
|
||||
Severity: hcl.DiagError,
|
||||
Summary: "Invalid provider configuration address",
|
||||
Detail: "Provider address cannot contain module indexes",
|
||||
Subject: remain.SourceRange().Ptr(),
|
||||
})
|
||||
return ret, diags
|
||||
}
|
||||
}
|
||||
ret.Module = modInst.Module()
|
||||
|
||||
if len(remain) < 2 || remain.RootName() != "provider" {
|
||||
diags = diags.Append(&hcl.Diagnostic{
|
||||
Severity: hcl.DiagError,
|
||||
Summary: "Invalid provider configuration address",
|
||||
Detail: "Provider address must begin with \"provider.\", followed by a provider type name.",
|
||||
Subject: remain.SourceRange().Ptr(),
|
||||
})
|
||||
return ret, diags
|
||||
}
|
||||
if len(remain) > 3 {
|
||||
diags = diags.Append(&hcl.Diagnostic{
|
||||
Severity: hcl.DiagError,
|
||||
Summary: "Invalid provider configuration address",
|
||||
Detail: "Extraneous operators after provider configuration alias.",
|
||||
Subject: hcl.Traversal(remain[3:]).SourceRange().Ptr(),
|
||||
})
|
||||
return ret, diags
|
||||
}
|
||||
|
||||
// We always assume legacy-style providers in legacy state ...
|
||||
if tt, ok := remain[1].(hcl.TraverseAttr); ok {
|
||||
// ... unless it's the builtin "terraform" provider, a special case.
|
||||
if tt.Name == "terraform" {
|
||||
ret.Provider = NewBuiltInProvider(tt.Name)
|
||||
} else {
|
||||
ret.Provider = NewLegacyProvider(tt.Name)
|
||||
}
|
||||
} else {
|
||||
diags = diags.Append(&hcl.Diagnostic{
|
||||
Severity: hcl.DiagError,
|
||||
Summary: "Invalid provider configuration address",
|
||||
Detail: "The prefix \"provider.\" must be followed by a provider type name.",
|
||||
Subject: remain[1].SourceRange().Ptr(),
|
||||
})
|
||||
return ret, diags
|
||||
}
|
||||
|
||||
if len(remain) == 3 {
|
||||
if tt, ok := remain[2].(hcl.TraverseAttr); ok {
|
||||
ret.Alias = tt.Name
|
||||
} else {
|
||||
diags = diags.Append(&hcl.Diagnostic{
|
||||
Severity: hcl.DiagError,
|
||||
Summary: "Invalid provider configuration address",
|
||||
Detail: "Provider type name must be followed by a configuration alias name.",
|
||||
Subject: remain[2].SourceRange().Ptr(),
|
||||
})
|
||||
return ret, diags
|
||||
}
|
||||
}
|
||||
|
||||
return ret, diags
|
||||
}
|
||||
|
||||
// ProviderConfigDefault returns the address of the default provider config of
|
||||
// the given type inside the recieving module instance.
|
||||
func (m ModuleInstance) ProviderConfigDefault(provider Provider) AbsProviderConfig {
|
||||
return AbsProviderConfig{
|
||||
Module: m,
|
||||
ProviderConfig: ProviderConfig{
|
||||
Type: name,
|
||||
},
|
||||
Module: m.Module(),
|
||||
Provider: provider,
|
||||
}
|
||||
}
|
||||
|
||||
// ProviderConfigAliased returns the address of an aliased provider config
|
||||
// of with given type and alias inside the recieving module instance.
|
||||
func (m ModuleInstance) ProviderConfigAliased(name, alias string) AbsProviderConfig {
|
||||
// ProviderConfigAliased returns the address of an aliased provider config of
|
||||
// the given type and alias inside the recieving module instance.
|
||||
func (m ModuleInstance) ProviderConfigAliased(provider Provider, alias string) AbsProviderConfig {
|
||||
return AbsProviderConfig{
|
||||
Module: m,
|
||||
ProviderConfig: ProviderConfig{
|
||||
Type: name,
|
||||
Alias: alias,
|
||||
},
|
||||
Module: m.Module(),
|
||||
Provider: provider,
|
||||
Alias: alias,
|
||||
}
|
||||
}
|
||||
|
||||
// providerConfig Implements addrs.ProviderConfig.
|
||||
func (pc AbsProviderConfig) providerConfig() {}
|
||||
|
||||
// Inherited returns an address that the receiving configuration address might
|
||||
// inherit from in a parent module. The second bool return value indicates if
|
||||
// such inheritance is possible, and thus whether the returned address is valid.
|
||||
|
@ -269,9 +344,9 @@ func (m ModuleInstance) ProviderConfigAliased(name, alias string) AbsProviderCon
|
|||
// other than the root module. Even if a valid address is returned, inheritence
|
||||
// may not be performed for other reasons, such as if the calling module
|
||||
// provided explicit provider configurations within the call for this module.
|
||||
// The ProviderTransformer graph transform in the main terraform module has
|
||||
// the authoritative logic for provider inheritance, and this method is here
|
||||
// mainly just for its benefit.
|
||||
// The ProviderTransformer graph transform in the main terraform module has the
|
||||
// authoritative logic for provider inheritance, and this method is here mainly
|
||||
// just for its benefit.
|
||||
func (pc AbsProviderConfig) Inherited() (AbsProviderConfig, bool) {
|
||||
// Can't inherit if we're already in the root.
|
||||
if len(pc.Module) == 0 {
|
||||
|
@ -279,19 +354,52 @@ func (pc AbsProviderConfig) Inherited() (AbsProviderConfig, bool) {
|
|||
}
|
||||
|
||||
// Can't inherit if we have an alias.
|
||||
if pc.ProviderConfig.Alias != "" {
|
||||
if pc.Alias != "" {
|
||||
return AbsProviderConfig{}, false
|
||||
}
|
||||
|
||||
// Otherwise, we might inherit from a configuration with the same
|
||||
// provider name in the parent module instance.
|
||||
// provider type in the parent module instance.
|
||||
parentMod := pc.Module.Parent()
|
||||
return pc.ProviderConfig.Absolute(parentMod), true
|
||||
return AbsProviderConfig{
|
||||
Module: parentMod,
|
||||
Provider: pc.Provider,
|
||||
}, true
|
||||
|
||||
}
|
||||
|
||||
func (pc AbsProviderConfig) String() string {
|
||||
if len(pc.Module) == 0 {
|
||||
return pc.ProviderConfig.String()
|
||||
// LegacyString() returns a legacy-style AbsProviderConfig string and should only be used for legacy state shimming.
|
||||
func (pc AbsProviderConfig) LegacyString() string {
|
||||
if pc.Alias != "" {
|
||||
if len(pc.Module) == 0 {
|
||||
return fmt.Sprintf("%s.%s.%s", "provider", pc.Provider.LegacyString(), pc.Alias)
|
||||
} else {
|
||||
return fmt.Sprintf("%s.%s.%s.%s", pc.Module.String(), "provider", pc.Provider.LegacyString(), pc.Alias)
|
||||
}
|
||||
}
|
||||
return fmt.Sprintf("%s.%s", pc.Module.String(), pc.ProviderConfig.String())
|
||||
if len(pc.Module) == 0 {
|
||||
return fmt.Sprintf("%s.%s", "provider", pc.Provider.LegacyString())
|
||||
}
|
||||
return fmt.Sprintf("%s.%s.%s", pc.Module.String(), "provider", pc.Provider.LegacyString())
|
||||
}
|
||||
|
||||
// String() returns a string representation of an AbsProviderConfig in the following format:
|
||||
//
|
||||
// provider["example.com/namespace/name"]
|
||||
// provider["example.com/namespace/name"].alias
|
||||
// module.module-name.provider["example.com/namespace/name"]
|
||||
// module.module-name.provider["example.com/namespace/name"].alias
|
||||
func (pc AbsProviderConfig) String() string {
|
||||
var parts []string
|
||||
if len(pc.Module) > 0 {
|
||||
parts = append(parts, pc.Module.String())
|
||||
}
|
||||
|
||||
parts = append(parts, fmt.Sprintf("provider[%q]", pc.Provider))
|
||||
|
||||
if pc.Alias != "" {
|
||||
parts = append(parts, pc.Alias)
|
||||
}
|
||||
|
||||
return strings.Join(parts, ".")
|
||||
}
|
||||
|
|
|
@ -1,76 +1,15 @@
|
|||
package addrs
|
||||
|
||||
import (
|
||||
"reflect"
|
||||
"testing"
|
||||
|
||||
"github.com/go-test/deep"
|
||||
|
||||
"github.com/hashicorp/hcl2/hcl"
|
||||
"github.com/hashicorp/hcl2/hcl/hclsyntax"
|
||||
"github.com/hashicorp/hcl/v2"
|
||||
"github.com/hashicorp/hcl/v2/hclsyntax"
|
||||
)
|
||||
|
||||
func TestParseProviderConfigCompact(t *testing.T) {
|
||||
tests := []struct {
|
||||
Input string
|
||||
Want ProviderConfig
|
||||
WantDiag string
|
||||
}{
|
||||
{
|
||||
`aws`,
|
||||
ProviderConfig{
|
||||
Type: "aws",
|
||||
},
|
||||
``,
|
||||
},
|
||||
{
|
||||
`aws.foo`,
|
||||
ProviderConfig{
|
||||
Type: "aws",
|
||||
Alias: "foo",
|
||||
},
|
||||
``,
|
||||
},
|
||||
{
|
||||
`aws["foo"]`,
|
||||
ProviderConfig{},
|
||||
`The provider type name must either stand alone or be followed by an alias name separated with a dot.`,
|
||||
},
|
||||
}
|
||||
|
||||
for _, test := range tests {
|
||||
t.Run(test.Input, func(t *testing.T) {
|
||||
traversal, parseDiags := hclsyntax.ParseTraversalAbs([]byte(test.Input), "", hcl.Pos{})
|
||||
if len(parseDiags) != 0 {
|
||||
t.Errorf("unexpected diagnostics during parse")
|
||||
for _, diag := range parseDiags {
|
||||
t.Logf("- %s", diag)
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
got, diags := ParseProviderConfigCompact(traversal)
|
||||
|
||||
if test.WantDiag != "" {
|
||||
if len(diags) != 1 {
|
||||
t.Fatalf("got %d diagnostics; want 1", len(diags))
|
||||
}
|
||||
gotDetail := diags[0].Description().Detail
|
||||
if gotDetail != test.WantDiag {
|
||||
t.Fatalf("wrong diagnostic detail\ngot: %s\nwant: %s", gotDetail, test.WantDiag)
|
||||
}
|
||||
return
|
||||
} else {
|
||||
if len(diags) != 0 {
|
||||
t.Fatalf("got %d diagnostics; want 0", len(diags))
|
||||
}
|
||||
}
|
||||
|
||||
for _, problem := range deep.Equal(got, test.Want) {
|
||||
t.Error(problem)
|
||||
}
|
||||
})
|
||||
}
|
||||
}
|
||||
func TestParseAbsProviderConfig(t *testing.T) {
|
||||
tests := []struct {
|
||||
Input string
|
||||
|
@ -78,102 +17,69 @@ func TestParseAbsProviderConfig(t *testing.T) {
|
|||
WantDiag string
|
||||
}{
|
||||
{
|
||||
`provider.aws`,
|
||||
`provider["registry.terraform.io/hashicorp/aws"]`,
|
||||
AbsProviderConfig{
|
||||
Module: RootModuleInstance,
|
||||
ProviderConfig: ProviderConfig{
|
||||
Type: "aws",
|
||||
Module: RootModule,
|
||||
Provider: Provider{
|
||||
Type: "aws",
|
||||
Namespace: "hashicorp",
|
||||
Hostname: "registry.terraform.io",
|
||||
},
|
||||
},
|
||||
``,
|
||||
},
|
||||
{
|
||||
`provider.aws.foo`,
|
||||
`provider["registry.terraform.io/hashicorp/aws"].foo`,
|
||||
AbsProviderConfig{
|
||||
Module: RootModuleInstance,
|
||||
ProviderConfig: ProviderConfig{
|
||||
Type: "aws",
|
||||
Alias: "foo",
|
||||
Module: RootModule,
|
||||
Provider: Provider{
|
||||
Type: "aws",
|
||||
Namespace: "hashicorp",
|
||||
Hostname: "registry.terraform.io",
|
||||
},
|
||||
Alias: "foo",
|
||||
},
|
||||
``,
|
||||
},
|
||||
{
|
||||
`module.baz.provider["registry.terraform.io/hashicorp/aws"]`,
|
||||
AbsProviderConfig{
|
||||
Module: Module{"baz"},
|
||||
Provider: Provider{
|
||||
Type: "aws",
|
||||
Namespace: "hashicorp",
|
||||
Hostname: "registry.terraform.io",
|
||||
},
|
||||
},
|
||||
``,
|
||||
},
|
||||
{
|
||||
`module.baz.provider.aws`,
|
||||
`module.baz.provider["registry.terraform.io/hashicorp/aws"].foo`,
|
||||
AbsProviderConfig{
|
||||
Module: ModuleInstance{
|
||||
{
|
||||
Name: "baz",
|
||||
},
|
||||
},
|
||||
ProviderConfig: ProviderConfig{
|
||||
Type: "aws",
|
||||
Module: Module{"baz"},
|
||||
Provider: Provider{
|
||||
Type: "aws",
|
||||
Namespace: "hashicorp",
|
||||
Hostname: "registry.terraform.io",
|
||||
},
|
||||
Alias: "foo",
|
||||
},
|
||||
``,
|
||||
},
|
||||
{
|
||||
`module.baz.provider.aws.foo`,
|
||||
AbsProviderConfig{
|
||||
Module: ModuleInstance{
|
||||
{
|
||||
Name: "baz",
|
||||
},
|
||||
},
|
||||
ProviderConfig: ProviderConfig{
|
||||
Type: "aws",
|
||||
Alias: "foo",
|
||||
},
|
||||
},
|
||||
``,
|
||||
`module.baz["foo"].provider["registry.terraform.io/hashicorp/aws"]`,
|
||||
AbsProviderConfig{},
|
||||
`Provider address cannot contain module indexes`,
|
||||
},
|
||||
{
|
||||
`module.baz["foo"].provider.aws`,
|
||||
AbsProviderConfig{
|
||||
Module: ModuleInstance{
|
||||
{
|
||||
Name: "baz",
|
||||
InstanceKey: StringKey("foo"),
|
||||
},
|
||||
},
|
||||
ProviderConfig: ProviderConfig{
|
||||
Type: "aws",
|
||||
},
|
||||
},
|
||||
``,
|
||||
`module.baz[1].provider["registry.terraform.io/hashicorp/aws"]`,
|
||||
AbsProviderConfig{},
|
||||
`Provider address cannot contain module indexes`,
|
||||
},
|
||||
{
|
||||
`module.baz[1].provider.aws`,
|
||||
AbsProviderConfig{
|
||||
Module: ModuleInstance{
|
||||
{
|
||||
Name: "baz",
|
||||
InstanceKey: IntKey(1),
|
||||
},
|
||||
},
|
||||
ProviderConfig: ProviderConfig{
|
||||
Type: "aws",
|
||||
},
|
||||
},
|
||||
``,
|
||||
},
|
||||
{
|
||||
`module.baz[1].module.bar.provider.aws`,
|
||||
AbsProviderConfig{
|
||||
Module: ModuleInstance{
|
||||
{
|
||||
Name: "baz",
|
||||
InstanceKey: IntKey(1),
|
||||
},
|
||||
{
|
||||
Name: "bar",
|
||||
},
|
||||
},
|
||||
ProviderConfig: ProviderConfig{
|
||||
Type: "aws",
|
||||
},
|
||||
},
|
||||
``,
|
||||
`module.baz[1].module.bar.provider["registry.terraform.io/hashicorp/aws"]`,
|
||||
AbsProviderConfig{},
|
||||
`Provider address cannot contain module indexes`,
|
||||
},
|
||||
{
|
||||
`aws`,
|
||||
|
@ -196,12 +102,7 @@ func TestParseAbsProviderConfig(t *testing.T) {
|
|||
`Extraneous operators after provider configuration alias.`,
|
||||
},
|
||||
{
|
||||
`provider["aws"]`,
|
||||
AbsProviderConfig{},
|
||||
`The prefix "provider." must be followed by a provider type name.`,
|
||||
},
|
||||
{
|
||||
`provider.aws["foo"]`,
|
||||
`provider["aws"]["foo"]`,
|
||||
AbsProviderConfig{},
|
||||
`Provider type name must be followed by a configuration alias name.`,
|
||||
},
|
||||
|
@ -211,9 +112,9 @@ func TestParseAbsProviderConfig(t *testing.T) {
|
|||
`Provider address must begin with "provider.", followed by a provider type name.`,
|
||||
},
|
||||
{
|
||||
`module.foo["provider"]`,
|
||||
`provider[0]`,
|
||||
AbsProviderConfig{},
|
||||
`Provider address must begin with "provider.", followed by a provider type name.`,
|
||||
`The prefix "provider." must be followed by a provider type name.`,
|
||||
},
|
||||
}
|
||||
|
||||
|
@ -251,3 +152,129 @@ func TestParseAbsProviderConfig(t *testing.T) {
|
|||
})
|
||||
}
|
||||
}
|
||||
|
||||
func TestAbsProviderConfigString(t *testing.T) {
|
||||
tests := []struct {
|
||||
Config AbsProviderConfig
|
||||
Want string
|
||||
}{
|
||||
{
|
||||
AbsProviderConfig{
|
||||
Module: RootModule,
|
||||
Provider: NewLegacyProvider("foo"),
|
||||
},
|
||||
`provider["registry.terraform.io/-/foo"]`,
|
||||
},
|
||||
{
|
||||
AbsProviderConfig{
|
||||
Module: RootModule.Child("child_module"),
|
||||
Provider: NewDefaultProvider("foo"),
|
||||
},
|
||||
`module.child_module.provider["registry.terraform.io/hashicorp/foo"]`,
|
||||
},
|
||||
{
|
||||
AbsProviderConfig{
|
||||
Module: RootModule,
|
||||
Alias: "bar",
|
||||
Provider: NewDefaultProvider("foo"),
|
||||
},
|
||||
`provider["registry.terraform.io/hashicorp/foo"].bar`,
|
||||
},
|
||||
{
|
||||
AbsProviderConfig{
|
||||
Module: RootModule.Child("child_module"),
|
||||
Alias: "bar",
|
||||
Provider: NewDefaultProvider("foo"),
|
||||
},
|
||||
`module.child_module.provider["registry.terraform.io/hashicorp/foo"].bar`,
|
||||
},
|
||||
}
|
||||
|
||||
for _, test := range tests {
|
||||
got := test.Config.String()
|
||||
if got != test.Want {
|
||||
t.Errorf("wrong result. Got %s, want %s\n", got, test.Want)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func TestAbsProviderConfigLegacyString(t *testing.T) {
|
||||
tests := []struct {
|
||||
Config AbsProviderConfig
|
||||
Want string
|
||||
}{
|
||||
{
|
||||
AbsProviderConfig{
|
||||
Module: RootModule,
|
||||
Provider: NewLegacyProvider("foo"),
|
||||
},
|
||||
`provider.foo`,
|
||||
},
|
||||
{
|
||||
AbsProviderConfig{
|
||||
Module: RootModule.Child("child_module"),
|
||||
Provider: NewLegacyProvider("foo"),
|
||||
},
|
||||
`module.child_module.provider.foo`,
|
||||
},
|
||||
{
|
||||
AbsProviderConfig{
|
||||
Module: RootModule,
|
||||
Alias: "bar",
|
||||
Provider: NewLegacyProvider("foo"),
|
||||
},
|
||||
`provider.foo.bar`,
|
||||
},
|
||||
{
|
||||
AbsProviderConfig{
|
||||
Module: RootModule.Child("child_module"),
|
||||
Alias: "bar",
|
||||
Provider: NewLegacyProvider("foo"),
|
||||
},
|
||||
`module.child_module.provider.foo.bar`,
|
||||
},
|
||||
}
|
||||
|
||||
for _, test := range tests {
|
||||
got := test.Config.LegacyString()
|
||||
if got != test.Want {
|
||||
t.Errorf("wrong result. Got %s, want %s\n", got, test.Want)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func TestParseLegacyAbsProviderConfigStr(t *testing.T) {
|
||||
tests := []struct {
|
||||
Config string
|
||||
Want AbsProviderConfig
|
||||
}{
|
||||
{
|
||||
`provider.foo`,
|
||||
AbsProviderConfig{
|
||||
Module: RootModule,
|
||||
Provider: NewLegacyProvider("foo"),
|
||||
},
|
||||
},
|
||||
{
|
||||
`module.child_module.provider.foo`,
|
||||
AbsProviderConfig{
|
||||
Module: RootModule.Child("child_module"),
|
||||
Provider: NewLegacyProvider("foo"),
|
||||
},
|
||||
},
|
||||
{
|
||||
`provider.terraform`,
|
||||
AbsProviderConfig{
|
||||
Module: RootModule,
|
||||
Provider: NewBuiltInProvider("terraform"),
|
||||
},
|
||||
},
|
||||
}
|
||||
|
||||
for _, test := range tests {
|
||||
got, _ := ParseLegacyAbsProviderConfigStr(test.Config)
|
||||
if !reflect.DeepEqual(got, test.Want) {
|
||||
t.Errorf("wrong result. Got %s, want %s\n", got, test.Want)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
|
|
@ -0,0 +1,560 @@
|
|||
package addrs
|
||||
|
||||
import (
|
||||
"testing"
|
||||
|
||||
"github.com/go-test/deep"
|
||||
svchost "github.com/hashicorp/terraform-svchost"
|
||||
)
|
||||
|
||||
func TestProviderString(t *testing.T) {
|
||||
tests := []struct {
|
||||
Input Provider
|
||||
Want string
|
||||
}{
|
||||
{
|
||||
Provider{
|
||||
Type: "test",
|
||||
Hostname: DefaultRegistryHost,
|
||||
Namespace: "hashicorp",
|
||||
},
|
||||
NewDefaultProvider("test").String(),
|
||||
},
|
||||
{
|
||||
Provider{
|
||||
Type: "test-beta",
|
||||
Hostname: DefaultRegistryHost,
|
||||
Namespace: "hashicorp",
|
||||
},
|
||||
NewDefaultProvider("test-beta").String(),
|
||||
},
|
||||
{
|
||||
Provider{
|
||||
Type: "test",
|
||||
Hostname: "registry.terraform.com",
|
||||
Namespace: "hashicorp",
|
||||
},
|
||||
"registry.terraform.com/hashicorp/test",
|
||||
},
|
||||
{
|
||||
Provider{
|
||||
Type: "test",
|
||||
Hostname: DefaultRegistryHost,
|
||||
Namespace: "othercorp",
|
||||
},
|
||||
DefaultRegistryHost.ForDisplay() + "/othercorp/test",
|
||||
},
|
||||
}
|
||||
|
||||
for _, test := range tests {
|
||||
got := test.Input.String()
|
||||
if got != test.Want {
|
||||
t.Errorf("wrong result for %s\n", test.Input.String())
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func TestProviderLegacyString(t *testing.T) {
|
||||
tests := []struct {
|
||||
Input Provider
|
||||
Want string
|
||||
}{
|
||||
{
|
||||
Provider{
|
||||
Type: "test",
|
||||
Hostname: DefaultRegistryHost,
|
||||
Namespace: LegacyProviderNamespace,
|
||||
},
|
||||
"test",
|
||||
},
|
||||
{
|
||||
Provider{
|
||||
Type: "terraform",
|
||||
Hostname: BuiltInProviderHost,
|
||||
Namespace: BuiltInProviderNamespace,
|
||||
},
|
||||
"terraform",
|
||||
},
|
||||
}
|
||||
|
||||
for _, test := range tests {
|
||||
got := test.Input.LegacyString()
|
||||
if got != test.Want {
|
||||
t.Errorf("wrong result for %s\ngot: %s\nwant: %s", test.Input.String(), got, test.Want)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func TestProviderDisplay(t *testing.T) {
|
||||
tests := []struct {
|
||||
Input Provider
|
||||
Want string
|
||||
}{
|
||||
{
|
||||
Provider{
|
||||
Type: "test",
|
||||
Hostname: DefaultRegistryHost,
|
||||
Namespace: "hashicorp",
|
||||
},
|
||||
"hashicorp/test",
|
||||
},
|
||||
{
|
||||
Provider{
|
||||
Type: "test",
|
||||
Hostname: "registry.terraform.com",
|
||||
Namespace: "hashicorp",
|
||||
},
|
||||
"registry.terraform.com/hashicorp/test",
|
||||
},
|
||||
{
|
||||
Provider{
|
||||
Type: "test",
|
||||
Hostname: DefaultRegistryHost,
|
||||
Namespace: "othercorp",
|
||||
},
|
||||
"othercorp/test",
|
||||
},
|
||||
}
|
||||
|
||||
for _, test := range tests {
|
||||
got := test.Input.ForDisplay()
|
||||
if got != test.Want {
|
||||
t.Errorf("wrong result for %s\n", test.Input.String())
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func TestProviderIsDefault(t *testing.T) {
|
||||
tests := []struct {
|
||||
Input Provider
|
||||
Want bool
|
||||
}{
|
||||
{
|
||||
Provider{
|
||||
Type: "test",
|
||||
Hostname: DefaultRegistryHost,
|
||||
Namespace: "hashicorp",
|
||||
},
|
||||
true,
|
||||
},
|
||||
{
|
||||
Provider{
|
||||
Type: "test",
|
||||
Hostname: "registry.terraform.com",
|
||||
Namespace: "hashicorp",
|
||||
},
|
||||
false,
|
||||
},
|
||||
{
|
||||
Provider{
|
||||
Type: "test",
|
||||
Hostname: DefaultRegistryHost,
|
||||
Namespace: "othercorp",
|
||||
},
|
||||
false,
|
||||
},
|
||||
}
|
||||
|
||||
for _, test := range tests {
|
||||
got := test.Input.IsDefault()
|
||||
if got != test.Want {
|
||||
t.Errorf("wrong result for %s\n", test.Input.String())
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func TestProviderIsBuiltIn(t *testing.T) {
|
||||
tests := []struct {
|
||||
Input Provider
|
||||
Want bool
|
||||
}{
|
||||
{
|
||||
Provider{
|
||||
Type: "test",
|
||||
Hostname: BuiltInProviderHost,
|
||||
Namespace: BuiltInProviderNamespace,
|
||||
},
|
||||
true,
|
||||
},
|
||||
{
|
||||
Provider{
|
||||
Type: "terraform",
|
||||
Hostname: BuiltInProviderHost,
|
||||
Namespace: BuiltInProviderNamespace,
|
||||
},
|
||||
true,
|
||||
},
|
||||
{
|
||||
Provider{
|
||||
Type: "test",
|
||||
Hostname: BuiltInProviderHost,
|
||||
Namespace: "boop",
|
||||
},
|
||||
false,
|
||||
},
|
||||
{
|
||||
Provider{
|
||||
Type: "test",
|
||||
Hostname: DefaultRegistryHost,
|
||||
Namespace: BuiltInProviderNamespace,
|
||||
},
|
||||
false,
|
||||
},
|
||||
{
|
||||
Provider{
|
||||
Type: "test",
|
||||
Hostname: DefaultRegistryHost,
|
||||
Namespace: "hashicorp",
|
||||
},
|
||||
false,
|
||||
},
|
||||
{
|
||||
Provider{
|
||||
Type: "test",
|
||||
Hostname: "registry.terraform.com",
|
||||
Namespace: "hashicorp",
|
||||
},
|
||||
false,
|
||||
},
|
||||
{
|
||||
Provider{
|
||||
Type: "test",
|
||||
Hostname: DefaultRegistryHost,
|
||||
Namespace: "othercorp",
|
||||
},
|
||||
false,
|
||||
},
|
||||
}
|
||||
|
||||
for _, test := range tests {
|
||||
got := test.Input.IsBuiltIn()
|
||||
if got != test.Want {
|
||||
t.Errorf("wrong result for %s\ngot: %#v\nwant: %#v", test.Input.String(), got, test.Want)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func TestProviderIsLegacy(t *testing.T) {
|
||||
tests := []struct {
|
||||
Input Provider
|
||||
Want bool
|
||||
}{
|
||||
{
|
||||
Provider{
|
||||
Type: "test",
|
||||
Hostname: DefaultRegistryHost,
|
||||
Namespace: LegacyProviderNamespace,
|
||||
},
|
||||
true,
|
||||
},
|
||||
{
|
||||
Provider{
|
||||
Type: "test",
|
||||
Hostname: "registry.terraform.com",
|
||||
Namespace: LegacyProviderNamespace,
|
||||
},
|
||||
false,
|
||||
},
|
||||
{
|
||||
Provider{
|
||||
Type: "test",
|
||||
Hostname: DefaultRegistryHost,
|
||||
Namespace: "hashicorp",
|
||||
},
|
||||
false,
|
||||
},
|
||||
}
|
||||
|
||||
for _, test := range tests {
|
||||
got := test.Input.IsLegacy()
|
||||
if got != test.Want {
|
||||
t.Errorf("wrong result for %s\n", test.Input.String())
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func TestParseProviderSourceStr(t *testing.T) {
|
||||
tests := map[string]struct {
|
||||
Want Provider
|
||||
Err bool
|
||||
}{
|
||||
"registry.terraform.io/hashicorp/aws": {
|
||||
Provider{
|
||||
Type: "aws",
|
||||
Namespace: "hashicorp",
|
||||
Hostname: DefaultRegistryHost,
|
||||
},
|
||||
false,
|
||||
},
|
||||
"registry.Terraform.io/HashiCorp/AWS": {
|
||||
Provider{
|
||||
Type: "aws",
|
||||
Namespace: "hashicorp",
|
||||
Hostname: DefaultRegistryHost,
|
||||
},
|
||||
false,
|
||||
},
|
||||
"hashicorp/aws": {
|
||||
Provider{
|
||||
Type: "aws",
|
||||
Namespace: "hashicorp",
|
||||
Hostname: DefaultRegistryHost,
|
||||
},
|
||||
false,
|
||||
},
|
||||
"HashiCorp/AWS": {
|
||||
Provider{
|
||||
Type: "aws",
|
||||
Namespace: "hashicorp",
|
||||
Hostname: DefaultRegistryHost,
|
||||
},
|
||||
false,
|
||||
},
|
||||
"aws": {
|
||||
Provider{
|
||||
Type: "aws",
|
||||
Namespace: "hashicorp",
|
||||
Hostname: DefaultRegistryHost,
|
||||
},
|
||||
false,
|
||||
},
|
||||
"AWS": {
|
||||
Provider{
|
||||
Type: "aws",
|
||||
Namespace: "hashicorp",
|
||||
Hostname: DefaultRegistryHost,
|
||||
},
|
||||
false,
|
||||
},
|
||||
"example.com/foo-bar/baz-boop": {
|
||||
Provider{
|
||||
Type: "baz-boop",
|
||||
Namespace: "foo-bar",
|
||||
Hostname: svchost.Hostname("example.com"),
|
||||
},
|
||||
false,
|
||||
},
|
||||
"foo-bar/baz-boop": {
|
||||
Provider{
|
||||
Type: "baz-boop",
|
||||
Namespace: "foo-bar",
|
||||
Hostname: DefaultRegistryHost,
|
||||
},
|
||||
false,
|
||||
},
|
||||
"localhost:8080/foo/bar": {
|
||||
Provider{
|
||||
Type: "bar",
|
||||
Namespace: "foo",
|
||||
Hostname: svchost.Hostname("localhost:8080"),
|
||||
},
|
||||
false,
|
||||
},
|
||||
"example.com/too/many/parts/here": {
|
||||
Provider{},
|
||||
true,
|
||||
},
|
||||
"/too///many//slashes": {
|
||||
Provider{},
|
||||
true,
|
||||
},
|
||||
"///": {
|
||||
Provider{},
|
||||
true,
|
||||
},
|
||||
"/ / /": { // empty strings
|
||||
Provider{},
|
||||
true,
|
||||
},
|
||||
"badhost!/hashicorp/aws": {
|
||||
Provider{},
|
||||
true,
|
||||
},
|
||||
"example.com/badnamespace!/aws": {
|
||||
Provider{},
|
||||
true,
|
||||
},
|
||||
"example.com/bad--namespace/aws": {
|
||||
Provider{},
|
||||
true,
|
||||
},
|
||||
"example.com/-badnamespace/aws": {
|
||||
Provider{},
|
||||
true,
|
||||
},
|
||||
"example.com/badnamespace-/aws": {
|
||||
Provider{},
|
||||
true,
|
||||
},
|
||||
"example.com/bad.namespace/aws": {
|
||||
Provider{},
|
||||
true,
|
||||
},
|
||||
"example.com/hashicorp/badtype!": {
|
||||
Provider{},
|
||||
true,
|
||||
},
|
||||
"example.com/hashicorp/bad--type": {
|
||||
Provider{},
|
||||
true,
|
||||
},
|
||||
"example.com/hashicorp/-badtype": {
|
||||
Provider{},
|
||||
true,
|
||||
},
|
||||
"example.com/hashicorp/badtype-": {
|
||||
Provider{},
|
||||
true,
|
||||
},
|
||||
"example.com/hashicorp/bad.type": {
|
||||
Provider{},
|
||||
true,
|
||||
},
|
||||
|
||||
// We forbid the terraform- prefix both because it's redundant to
|
||||
// include "terraform" in a Terraform provider name and because we use
|
||||
// the longer prefix terraform-provider- to hint for users who might be
|
||||
// accidentally using the git repository name or executable file name
|
||||
// instead of the provider type.
|
||||
"example.com/hashicorp/terraform-provider-bad": {
|
||||
Provider{},
|
||||
true,
|
||||
},
|
||||
"example.com/hashicorp/terraform-bad": {
|
||||
Provider{},
|
||||
true,
|
||||
},
|
||||
}
|
||||
|
||||
for name, test := range tests {
|
||||
got, diags := ParseProviderSourceString(name)
|
||||
for _, problem := range deep.Equal(got, test.Want) {
|
||||
t.Errorf(problem)
|
||||
}
|
||||
if len(diags) > 0 {
|
||||
if test.Err == false {
|
||||
t.Errorf("got error, expected success")
|
||||
}
|
||||
} else {
|
||||
if test.Err {
|
||||
t.Errorf("got success, expected error")
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func TestParseProviderPart(t *testing.T) {
|
||||
tests := map[string]struct {
|
||||
Want string
|
||||
Error string
|
||||
}{
|
||||
`foo`: {
|
||||
`foo`,
|
||||
``,
|
||||
},
|
||||
`FOO`: {
|
||||
`foo`,
|
||||
``,
|
||||
},
|
||||
`Foo`: {
|
||||
`foo`,
|
||||
``,
|
||||
},
|
||||
`abc-123`: {
|
||||
`abc-123`,
|
||||
``,
|
||||
},
|
||||
`Испытание`: {
|
||||
`испытание`,
|
||||
``,
|
||||
},
|
||||
`münchen`: { // this is a precomposed u with diaeresis
|
||||
`münchen`, // this is a precomposed u with diaeresis
|
||||
``,
|
||||
},
|
||||
`münchen`: { // this is a separate u and combining diaeresis
|
||||
`münchen`, // this is a precomposed u with diaeresis
|
||||
``,
|
||||
},
|
||||
`abc--123`: {
|
||||
``,
|
||||
`cannot use multiple consecutive dashes`,
|
||||
},
|
||||
`xn--80akhbyknj4f`: { // this is the punycode form of "испытание", but we don't accept punycode here
|
||||
``,
|
||||
`cannot use multiple consecutive dashes`,
|
||||
},
|
||||
`abc.123`: {
|
||||
``,
|
||||
`dots are not allowed`,
|
||||
},
|
||||
`-abc123`: {
|
||||
``,
|
||||
`must contain only letters, digits, and dashes, and may not use leading or trailing dashes`,
|
||||
},
|
||||
`abc123-`: {
|
||||
``,
|
||||
`must contain only letters, digits, and dashes, and may not use leading or trailing dashes`,
|
||||
},
|
||||
``: {
|
||||
``,
|
||||
`must have at least one character`,
|
||||
},
|
||||
}
|
||||
|
||||
for given, test := range tests {
|
||||
t.Run(given, func(t *testing.T) {
|
||||
got, err := ParseProviderPart(given)
|
||||
if test.Error != "" {
|
||||
if err == nil {
|
||||
t.Errorf("unexpected success\ngot: %s\nwant: %s", err, test.Error)
|
||||
} else if got := err.Error(); got != test.Error {
|
||||
t.Errorf("wrong error\ngot: %s\nwant: %s", got, test.Error)
|
||||
}
|
||||
} else {
|
||||
if err != nil {
|
||||
t.Errorf("unexpected error\ngot: %s\nwant: <nil>", err)
|
||||
} else if got != test.Want {
|
||||
t.Errorf("wrong result\ngot: %s\nwant: %s", got, test.Want)
|
||||
}
|
||||
}
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
func TestProviderEquals(t *testing.T) {
|
||||
tests := []struct {
|
||||
InputP Provider
|
||||
OtherP Provider
|
||||
Want bool
|
||||
}{
|
||||
{
|
||||
NewProvider(DefaultRegistryHost, "foo", "test"),
|
||||
NewProvider(DefaultRegistryHost, "foo", "test"),
|
||||
true,
|
||||
},
|
||||
{
|
||||
NewProvider(DefaultRegistryHost, "foo", "test"),
|
||||
NewProvider(DefaultRegistryHost, "bar", "test"),
|
||||
false,
|
||||
},
|
||||
{
|
||||
NewProvider(DefaultRegistryHost, "foo", "test"),
|
||||
NewProvider(DefaultRegistryHost, "foo", "my-test"),
|
||||
false,
|
||||
},
|
||||
{
|
||||
NewProvider(DefaultRegistryHost, "foo", "test"),
|
||||
NewProvider("example.com", "foo", "test"),
|
||||
false,
|
||||
},
|
||||
}
|
||||
for _, test := range tests {
|
||||
t.Run(test.InputP.String(), func(t *testing.T) {
|
||||
got := test.InputP.Equals(test.OtherP)
|
||||
if got != test.Want {
|
||||
t.Errorf("wrong result\ngot: %v\nwant: %v", got, test.Want)
|
||||
}
|
||||
})
|
||||
}
|
||||
}
|
|
@ -50,24 +50,24 @@ func (r Resource) Absolute(module ModuleInstance) AbsResource {
|
|||
}
|
||||
}
|
||||
|
||||
// DefaultProviderConfig returns the address of the provider configuration
|
||||
// that should be used for the resource identified by the reciever if it
|
||||
// does not have a provider configuration address explicitly set in
|
||||
// configuration.
|
||||
//
|
||||
// This method is not able to verify that such a configuration exists, nor
|
||||
// represent the behavior of automatically inheriting certain provider
|
||||
// configurations from parent modules. It just does a static analysis of the
|
||||
// receiving address and returns an address to start from, relative to the
|
||||
// same module that contains the resource.
|
||||
func (r Resource) DefaultProviderConfig() ProviderConfig {
|
||||
// InModule returns a ConfigResource from the receiver and the given module
|
||||
// address.
|
||||
func (r Resource) InModule(module Module) ConfigResource {
|
||||
return ConfigResource{
|
||||
Module: module,
|
||||
Resource: r,
|
||||
}
|
||||
}
|
||||
|
||||
// ImpliedProvider returns the implied provider type name, for e.g. the "aws" in
|
||||
// "aws_instance"
|
||||
func (r Resource) ImpliedProvider() string {
|
||||
typeName := r.Type
|
||||
if under := strings.Index(typeName, "_"); under != -1 {
|
||||
typeName = typeName[:under]
|
||||
}
|
||||
return ProviderConfig{
|
||||
Type: typeName,
|
||||
}
|
||||
|
||||
return typeName
|
||||
}
|
||||
|
||||
// ResourceInstance is an address for a specific instance of a resource.
|
||||
|
@ -131,6 +131,14 @@ func (r AbsResource) Instance(key InstanceKey) AbsResourceInstance {
|
|||
}
|
||||
}
|
||||
|
||||
// Config returns the unexpanded ConfigResource for this AbsResource.
|
||||
func (r AbsResource) Config() ConfigResource {
|
||||
return ConfigResource{
|
||||
Module: r.Module.Module(),
|
||||
Resource: r.Resource,
|
||||
}
|
||||
}
|
||||
|
||||
// TargetContains implements Targetable by returning true if the given other
|
||||
// address is either equal to the receiver or is an instance of the
|
||||
// receiver.
|
||||
|
@ -141,6 +149,11 @@ func (r AbsResource) TargetContains(other Targetable) bool {
|
|||
// We'll use our stringification as a cheat-ish way to test for equality.
|
||||
return to.String() == r.String()
|
||||
|
||||
case ConfigResource:
|
||||
// if an absolute resource from parsing a target address contains a
|
||||
// ConfigResource, the string representation will match
|
||||
return to.String() == r.String()
|
||||
|
||||
case AbsResourceInstance:
|
||||
return r.TargetContains(to.ContainingResource())
|
||||
|
||||
|
@ -199,9 +212,15 @@ func (r AbsResourceInstance) ContainingResource() AbsResource {
|
|||
func (r AbsResourceInstance) TargetContains(other Targetable) bool {
|
||||
switch to := other.(type) {
|
||||
|
||||
// while we currently don't start with an AbsResourceInstance as a target
|
||||
// address, check all resource types for consistency.
|
||||
case AbsResourceInstance:
|
||||
// We'll use our stringification as a cheat-ish way to test for equality.
|
||||
return to.String() == r.String()
|
||||
case ConfigResource:
|
||||
return to.String() == r.String()
|
||||
case AbsResource:
|
||||
return to.String() == r.String()
|
||||
|
||||
default:
|
||||
return false
|
||||
|
@ -249,11 +268,66 @@ func (r AbsResourceInstance) Less(o AbsResourceInstance) bool {
|
|||
}
|
||||
}
|
||||
|
||||
// ConfigResource is an address for a resource within a configuration.
|
||||
type ConfigResource struct {
|
||||
targetable
|
||||
Module Module
|
||||
Resource Resource
|
||||
}
|
||||
|
||||
// Resource returns the address of a particular resource within the module.
|
||||
func (m Module) Resource(mode ResourceMode, typeName string, name string) ConfigResource {
|
||||
return ConfigResource{
|
||||
Module: m,
|
||||
Resource: Resource{
|
||||
Mode: mode,
|
||||
Type: typeName,
|
||||
Name: name,
|
||||
},
|
||||
}
|
||||
}
|
||||
|
||||
// Absolute produces the address for the receiver within a specific module instance.
|
||||
func (r ConfigResource) Absolute(module ModuleInstance) AbsResource {
|
||||
return AbsResource{
|
||||
Module: module,
|
||||
Resource: r.Resource,
|
||||
}
|
||||
}
|
||||
|
||||
// TargetContains implements Targetable by returning true if the given other
|
||||
// address is either equal to the receiver or is an instance of the
|
||||
// receiver.
|
||||
func (r ConfigResource) TargetContains(other Targetable) bool {
|
||||
switch to := other.(type) {
|
||||
case ConfigResource:
|
||||
// We'll use our stringification as a cheat-ish way to test for equality.
|
||||
return to.String() == r.String()
|
||||
case AbsResource:
|
||||
return r.TargetContains(to.Config())
|
||||
case AbsResourceInstance:
|
||||
return r.TargetContains(to.ContainingResource())
|
||||
default:
|
||||
return false
|
||||
}
|
||||
}
|
||||
|
||||
func (r ConfigResource) String() string {
|
||||
if len(r.Module) == 0 {
|
||||
return r.Resource.String()
|
||||
}
|
||||
return fmt.Sprintf("%s.%s", r.Module.String(), r.Resource.String())
|
||||
}
|
||||
|
||||
func (r ConfigResource) Equal(o ConfigResource) bool {
|
||||
return r.String() == o.String()
|
||||
}
|
||||
|
||||
// ResourceMode defines which lifecycle applies to a given resource. Each
|
||||
// resource lifecycle has a slightly different address format.
|
||||
type ResourceMode rune
|
||||
|
||||
//go:generate stringer -type ResourceMode
|
||||
//go:generate go run golang.org/x/tools/cmd/stringer -type ResourceMode
|
||||
|
||||
const (
|
||||
// InvalidResourceMode is the zero value of ResourceMode and is not
|
||||
|
|
|
@ -4,6 +4,15 @@ package addrs
|
|||
|
||||
import "strconv"
|
||||
|
||||
func _() {
|
||||
// An "invalid array index" compiler error signifies that the constant values have changed.
|
||||
// Re-run the stringer command to generate them again.
|
||||
var x [1]struct{}
|
||||
_ = x[InvalidResourceMode-0]
|
||||
_ = x[ManagedResourceMode-77]
|
||||
_ = x[DataResourceMode-68]
|
||||
}
|
||||
|
||||
const (
|
||||
_ResourceMode_name_0 = "InvalidResourceMode"
|
||||
_ResourceMode_name_1 = "DataResourceMode"
|
||||
|
|
|
@ -0,0 +1,231 @@
|
|||
package addrs
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"testing"
|
||||
)
|
||||
|
||||
func TestTargetContains(t *testing.T) {
|
||||
for _, test := range []struct {
|
||||
addr, other Targetable
|
||||
expect bool
|
||||
}{
|
||||
{
|
||||
mustParseTarget("module.foo"),
|
||||
mustParseTarget("module.bar"),
|
||||
false,
|
||||
},
|
||||
{
|
||||
mustParseTarget("module.foo"),
|
||||
mustParseTarget("module.foo"),
|
||||
true,
|
||||
},
|
||||
{
|
||||
RootModuleInstance,
|
||||
mustParseTarget("module.foo"),
|
||||
true,
|
||||
},
|
||||
{
|
||||
mustParseTarget("module.foo"),
|
||||
RootModuleInstance,
|
||||
false,
|
||||
},
|
||||
{
|
||||
mustParseTarget("module.foo"),
|
||||
mustParseTarget("module.foo.module.bar[0]"),
|
||||
true,
|
||||
},
|
||||
{
|
||||
mustParseTarget("module.foo"),
|
||||
mustParseTarget("module.foo.module.bar[0]"),
|
||||
true,
|
||||
},
|
||||
{
|
||||
mustParseTarget("module.foo[2]"),
|
||||
mustParseTarget("module.foo[2].module.bar[0]"),
|
||||
true,
|
||||
},
|
||||
{
|
||||
mustParseTarget("module.foo"),
|
||||
mustParseTarget("module.foo.test_resource.bar"),
|
||||
true,
|
||||
},
|
||||
{
|
||||
mustParseTarget("module.foo"),
|
||||
mustParseTarget("module.foo.test_resource.bar[0]"),
|
||||
true,
|
||||
},
|
||||
|
||||
// Resources
|
||||
{
|
||||
mustParseTarget("test_resource.foo"),
|
||||
mustParseTarget("test_resource.foo[\"bar\"]"),
|
||||
true,
|
||||
},
|
||||
{
|
||||
mustParseTarget(`test_resource.foo["bar"]`),
|
||||
mustParseTarget(`test_resource.foo["bar"]`),
|
||||
true,
|
||||
},
|
||||
{
|
||||
mustParseTarget("test_resource.foo"),
|
||||
mustParseTarget("test_resource.foo[2]"),
|
||||
true,
|
||||
},
|
||||
{
|
||||
mustParseTarget("test_resource.foo"),
|
||||
mustParseTarget("module.bar.test_resource.foo[2]"),
|
||||
false,
|
||||
},
|
||||
{
|
||||
mustParseTarget("module.bar.test_resource.foo"),
|
||||
mustParseTarget("module.bar.test_resource.foo[2]"),
|
||||
true,
|
||||
},
|
||||
{
|
||||
mustParseTarget("module.bar.test_resource.foo"),
|
||||
mustParseTarget("module.bar[0].test_resource.foo[2]"),
|
||||
false,
|
||||
},
|
||||
{
|
||||
mustParseTarget("module.bar.test_resource.foo"),
|
||||
mustParseTarget("module.bar.test_resource.foo[0]"),
|
||||
true,
|
||||
},
|
||||
{
|
||||
mustParseTarget("module.bax"),
|
||||
mustParseTarget("module.bax[0].test_resource.foo[0]"),
|
||||
true,
|
||||
},
|
||||
|
||||
// Config paths, while never returned from parsing a target, must still
|
||||
// be targetable
|
||||
{
|
||||
ConfigResource{
|
||||
Module: []string{"bar"},
|
||||
Resource: Resource{
|
||||
Mode: ManagedResourceMode,
|
||||
Type: "test_resource",
|
||||
Name: "foo",
|
||||
},
|
||||
},
|
||||
mustParseTarget("module.bar.test_resource.foo[2]"),
|
||||
true,
|
||||
},
|
||||
{
|
||||
mustParseTarget("module.bar"),
|
||||
ConfigResource{
|
||||
Module: []string{"bar"},
|
||||
Resource: Resource{
|
||||
Mode: ManagedResourceMode,
|
||||
Type: "test_resource",
|
||||
Name: "foo",
|
||||
},
|
||||
},
|
||||
true,
|
||||
},
|
||||
{
|
||||
mustParseTarget("module.bar.test_resource.foo"),
|
||||
ConfigResource{
|
||||
Module: []string{"bar"},
|
||||
Resource: Resource{
|
||||
Mode: ManagedResourceMode,
|
||||
Type: "test_resource",
|
||||
Name: "foo",
|
||||
},
|
||||
},
|
||||
true,
|
||||
},
|
||||
{
|
||||
ConfigResource{
|
||||
Resource: Resource{
|
||||
Mode: ManagedResourceMode,
|
||||
Type: "test_resource",
|
||||
Name: "foo",
|
||||
},
|
||||
},
|
||||
mustParseTarget("module.bar.test_resource.foo[2]"),
|
||||
false,
|
||||
},
|
||||
{
|
||||
ConfigResource{
|
||||
Module: []string{"bar"},
|
||||
Resource: Resource{
|
||||
Mode: ManagedResourceMode,
|
||||
Type: "test_resource",
|
||||
Name: "foo",
|
||||
},
|
||||
},
|
||||
mustParseTarget("module.bar[0].test_resource.foo"),
|
||||
true,
|
||||
},
|
||||
|
||||
// Modules are also never the result of parsing a target, but also need
|
||||
// to be targetable
|
||||
{
|
||||
Module{"bar"},
|
||||
Module{"bar", "baz"},
|
||||
true,
|
||||
},
|
||||
{
|
||||
Module{"bar"},
|
||||
mustParseTarget("module.bar[0]"),
|
||||
true,
|
||||
},
|
||||
{
|
||||
// Parsing an ambiguous module path needs to ensure the
|
||||
// ModuleInstance could contain the Module. This is safe because if
|
||||
// the module could be expanded, it must have an index, meaning no
|
||||
// index indicates that the module instance and module are
|
||||
// functionally equivalent.
|
||||
mustParseTarget("module.bar"),
|
||||
Module{"bar"},
|
||||
true,
|
||||
},
|
||||
{
|
||||
// A specific ModuleInstance cannot contain a module
|
||||
mustParseTarget("module.bar[0]"),
|
||||
Module{"bar"},
|
||||
false,
|
||||
},
|
||||
{
|
||||
Module{"bar", "baz"},
|
||||
mustParseTarget("module.bar[0].module.baz.test_resource.foo[1]"),
|
||||
true,
|
||||
},
|
||||
{
|
||||
mustParseTarget("module.bar[0].module.baz"),
|
||||
Module{"bar", "baz"},
|
||||
false,
|
||||
},
|
||||
} {
|
||||
t.Run(fmt.Sprintf("%s-in-%s", test.other, test.addr), func(t *testing.T) {
|
||||
got := test.addr.TargetContains(test.other)
|
||||
if got != test.expect {
|
||||
t.Fatalf("expected %q.TargetContains(%q) == %t", test.addr, test.other, test.expect)
|
||||
}
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
func TestResourceContains(t *testing.T) {
|
||||
for _, test := range []struct {
|
||||
in, other Targetable
|
||||
expect bool
|
||||
}{} {
|
||||
t.Run(fmt.Sprintf("%s-in-%s", test.other, test.in), func(t *testing.T) {
|
||||
got := test.in.TargetContains(test.other)
|
||||
if got != test.expect {
|
||||
t.Fatalf("expected %q.TargetContains(%q) == %t", test.in, test.other, test.expect)
|
||||
}
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
func mustParseTarget(str string) Targetable {
|
||||
t, diags := ParseTargetStr(str)
|
||||
if diags != nil {
|
||||
panic(fmt.Sprintf("%s: %s", str, diags.ErrWithWarnings()))
|
||||
}
|
||||
return t.Subject
|
||||
}
|
|
@ -7,14 +7,13 @@ import (
|
|||
"strings"
|
||||
"sync"
|
||||
|
||||
"github.com/hashicorp/terraform/states/statemgr"
|
||||
"github.com/hashicorp/terraform/tfdiags"
|
||||
"github.com/zclconf/go-cty/cty"
|
||||
|
||||
"github.com/hashicorp/terraform/backend"
|
||||
"github.com/hashicorp/terraform/configs/configschema"
|
||||
"github.com/hashicorp/terraform/helper/schema"
|
||||
"github.com/hashicorp/terraform/state"
|
||||
"github.com/hashicorp/terraform/state/remote"
|
||||
"github.com/hashicorp/terraform/states/remote"
|
||||
"github.com/hashicorp/terraform/terraform"
|
||||
"github.com/mitchellh/cli"
|
||||
"github.com/mitchellh/colorstring"
|
||||
|
@ -43,9 +42,6 @@ type Backend struct {
|
|||
// stateClient is the legacy state client, setup in Configure
|
||||
stateClient *stateClient
|
||||
|
||||
// schema is the schema for configuration, set by init
|
||||
schema *schema.Backend
|
||||
|
||||
// opLock locks operations
|
||||
opLock sync.Mutex
|
||||
}
|
||||
|
@ -79,7 +75,7 @@ func (b *Backend) ConfigSchema() *configschema.Block {
|
|||
}
|
||||
}
|
||||
|
||||
func (b *Backend) ValidateConfig(obj cty.Value) tfdiags.Diagnostics {
|
||||
func (b *Backend) PrepareConfig(obj cty.Value) (cty.Value, tfdiags.Diagnostics) {
|
||||
var diags tfdiags.Diagnostics
|
||||
|
||||
name := obj.GetAttr("name").AsString()
|
||||
|
@ -105,7 +101,7 @@ func (b *Backend) ValidateConfig(obj cty.Value) tfdiags.Diagnostics {
|
|||
}
|
||||
}
|
||||
|
||||
return diags
|
||||
return obj, diags
|
||||
}
|
||||
|
||||
func (b *Backend) Configure(obj cty.Value) tfdiags.Diagnostics {
|
||||
|
@ -116,7 +112,7 @@ func (b *Backend) Configure(obj cty.Value) tfdiags.Diagnostics {
|
|||
RunId: os.Getenv("ATLAS_RUN_ID"),
|
||||
}
|
||||
|
||||
name := obj.GetAttr("name").AsString() // assumed valid due to ValidateConfig method
|
||||
name := obj.GetAttr("name").AsString() // assumed valid due to PrepareConfig method
|
||||
slashIdx := strings.Index(name, "/")
|
||||
client.User = name[:slashIdx]
|
||||
client.Name = name[slashIdx+1:]
|
||||
|
@ -139,7 +135,7 @@ func (b *Backend) Configure(obj cty.Value) tfdiags.Diagnostics {
|
|||
addr := v.AsString()
|
||||
addrURL, err := url.Parse(addr)
|
||||
if err != nil {
|
||||
// We already validated the URL in ValidateConfig, so this shouldn't happen
|
||||
// We already validated the URL in PrepareConfig, so this shouldn't happen
|
||||
panic(err)
|
||||
}
|
||||
client.Server = addr
|
||||
|
@ -175,7 +171,7 @@ func (b *Backend) DeleteWorkspace(name string) error {
|
|||
return backend.ErrWorkspacesNotSupported
|
||||
}
|
||||
|
||||
func (b *Backend) StateMgr(name string) (state.State, error) {
|
||||
func (b *Backend) StateMgr(name string) (statemgr.Full, error) {
|
||||
if name != backend.DefaultStateName {
|
||||
return nil, backend.ErrWorkspacesNotSupported
|
||||
}
|
||||
|
|
|
@ -18,7 +18,7 @@ import (
|
|||
"github.com/hashicorp/go-cleanhttp"
|
||||
"github.com/hashicorp/go-retryablehttp"
|
||||
"github.com/hashicorp/go-rootcerts"
|
||||
"github.com/hashicorp/terraform/state/remote"
|
||||
"github.com/hashicorp/terraform/states/remote"
|
||||
"github.com/hashicorp/terraform/terraform"
|
||||
)
|
||||
|
||||
|
|
|
@ -7,6 +7,7 @@ import (
|
|||
"crypto/tls"
|
||||
"crypto/x509"
|
||||
"encoding/json"
|
||||
"errors"
|
||||
"net/http"
|
||||
"net/http/httptest"
|
||||
"net/url"
|
||||
|
@ -19,7 +20,7 @@ import (
|
|||
"github.com/hashicorp/terraform/backend"
|
||||
"github.com/hashicorp/terraform/configs"
|
||||
"github.com/hashicorp/terraform/helper/acctest"
|
||||
"github.com/hashicorp/terraform/state/remote"
|
||||
"github.com/hashicorp/terraform/states/remote"
|
||||
"github.com/hashicorp/terraform/terraform"
|
||||
)
|
||||
|
||||
|
@ -216,17 +217,20 @@ func TestStateClient_UnresolvableConflict(t *testing.T) {
|
|||
if err := terraform.WriteState(state, &stateJson); err != nil {
|
||||
t.Fatalf("err: %s", err)
|
||||
}
|
||||
doneCh := make(chan struct{})
|
||||
errCh := make(chan error)
|
||||
go func() {
|
||||
defer close(doneCh)
|
||||
defer close(errCh)
|
||||
if err := client.Put(stateJson.Bytes()); err == nil {
|
||||
t.Fatal("Expected error from state conflict, got none.")
|
||||
errCh <- errors.New("expected error from state conflict, got none.")
|
||||
return
|
||||
}
|
||||
}()
|
||||
|
||||
select {
|
||||
case <-doneCh:
|
||||
// OK
|
||||
case err := <-errCh:
|
||||
if err != nil {
|
||||
t.Fatalf("error from anonymous test goroutine: %s", err)
|
||||
}
|
||||
case <-time.After(500 * time.Millisecond):
|
||||
t.Fatalf("Timed out after 500ms, probably because retrying infinitely.")
|
||||
}
|
||||
|
|
|
@ -7,6 +7,8 @@ package backend
|
|||
import (
|
||||
"context"
|
||||
"errors"
|
||||
"io/ioutil"
|
||||
"os"
|
||||
"time"
|
||||
|
||||
"github.com/hashicorp/terraform/addrs"
|
||||
|
@ -20,6 +22,7 @@ import (
|
|||
"github.com/hashicorp/terraform/states/statemgr"
|
||||
"github.com/hashicorp/terraform/terraform"
|
||||
"github.com/hashicorp/terraform/tfdiags"
|
||||
"github.com/mitchellh/go-homedir"
|
||||
"github.com/zclconf/go-cty/cty"
|
||||
)
|
||||
|
||||
|
@ -34,10 +37,6 @@ var (
|
|||
ErrDefaultWorkspaceNotSupported = errors.New("default workspace not supported\n" +
|
||||
"You can create a new workspace with the \"workspace new\" command.")
|
||||
|
||||
// ErrOperationNotSupported is returned when an unsupported operation
|
||||
// is detected by the configured backend.
|
||||
ErrOperationNotSupported = errors.New("operation not supported")
|
||||
|
||||
// ErrWorkspacesNotSupported is an error returned when a caller attempts
|
||||
// to perform an operation on a workspace other than "default" for a
|
||||
// backend that doesn't support multiple workspaces.
|
||||
|
@ -59,9 +58,10 @@ type Backend interface {
|
|||
// be safely used before configuring.
|
||||
ConfigSchema() *configschema.Block
|
||||
|
||||
// ValidateConfig checks the validity of the values in the given
|
||||
// configuration, assuming that its structure has already been validated
|
||||
// per the schema returned by ConfigSchema.
|
||||
// PrepareConfig checks the validity of the values in the given
|
||||
// configuration, and inserts any missing defaults, assuming that its
|
||||
// structure has already been validated per the schema returned by
|
||||
// ConfigSchema.
|
||||
//
|
||||
// This method does not have any side-effects for the backend and can
|
||||
// be safely used before configuring. It also does not consult any
|
||||
|
@ -76,14 +76,14 @@ type Backend interface {
|
|||
// as tfdiags.AttributeValue, and so the caller should provide the
|
||||
// necessary context via the diags.InConfigBody method before returning
|
||||
// diagnostics to the user.
|
||||
ValidateConfig(cty.Value) tfdiags.Diagnostics
|
||||
PrepareConfig(cty.Value) (cty.Value, tfdiags.Diagnostics)
|
||||
|
||||
// Configure uses the provided configuration to set configuration fields
|
||||
// within the backend.
|
||||
//
|
||||
// The given configuration is assumed to have already been validated
|
||||
// against the schema returned by ConfigSchema and passed validation
|
||||
// via ValidateConfig.
|
||||
// via PrepareConfig.
|
||||
//
|
||||
// This method may be called only once per backend instance, and must be
|
||||
// called before all other methods except where otherwise stated.
|
||||
|
@ -195,12 +195,22 @@ type Operation struct {
|
|||
Targets []addrs.Targetable
|
||||
Variables map[string]UnparsedVariableValue
|
||||
|
||||
// Some operations use root module variables only opportunistically or
|
||||
// don't need them at all. If this flag is set, the backend must treat
|
||||
// all variables as optional and provide an unknown value for any required
|
||||
// variables that aren't set in order to allow partial evaluation against
|
||||
// the resulting incomplete context.
|
||||
//
|
||||
// This flag is honored only if PlanFile isn't set. If PlanFile is set then
|
||||
// the variables set in the plan are used instead, and they must be valid.
|
||||
AllowUnsetVariables bool
|
||||
|
||||
// Input/output/control options.
|
||||
UIIn terraform.UIInput
|
||||
UIOut terraform.UIOutput
|
||||
|
||||
// If LockState is true, the Operation must Lock any
|
||||
// state.Lockers for its duration, and Unlock when complete.
|
||||
// statemgr.Lockers for its duration, and Unlock when complete.
|
||||
LockState bool
|
||||
|
||||
// StateLocker is used to lock the state while providing UI feedback to the
|
||||
|
@ -280,3 +290,31 @@ const (
|
|||
func (r OperationResult) ExitStatus() int {
|
||||
return int(r)
|
||||
}
|
||||
|
||||
// If the argument is a path, Read loads it and returns the contents,
|
||||
// otherwise the argument is assumed to be the desired contents and is simply
|
||||
// returned.
|
||||
func ReadPathOrContents(poc string) (string, error) {
|
||||
if len(poc) == 0 {
|
||||
return poc, nil
|
||||
}
|
||||
|
||||
path := poc
|
||||
if path[0] == '~' {
|
||||
var err error
|
||||
path, err = homedir.Expand(path)
|
||||
if err != nil {
|
||||
return path, err
|
||||
}
|
||||
}
|
||||
|
||||
if _, err := os.Stat(path); err == nil {
|
||||
contents, err := ioutil.ReadFile(path)
|
||||
if err != nil {
|
||||
return string(contents), err
|
||||
}
|
||||
return string(contents), nil
|
||||
}
|
||||
|
||||
return poc, nil
|
||||
}
|
||||
|
|
|
@ -0,0 +1,129 @@
|
|||
package backend
|
||||
|
||||
import (
|
||||
"io"
|
||||
"io/ioutil"
|
||||
"os"
|
||||
"os/user"
|
||||
"strings"
|
||||
"testing"
|
||||
|
||||
"github.com/mitchellh/go-homedir"
|
||||
)
|
||||
|
||||
func TestReadPathOrContents_Path(t *testing.T) {
|
||||
f, cleanup := testTempFile(t)
|
||||
defer cleanup()
|
||||
|
||||
if _, err := io.WriteString(f, "foobar"); err != nil {
|
||||
t.Fatalf("err: %s", err)
|
||||
}
|
||||
f.Close()
|
||||
|
||||
contents, err := ReadPathOrContents(f.Name())
|
||||
|
||||
if err != nil {
|
||||
t.Fatalf("err: %s", err)
|
||||
}
|
||||
if contents != "foobar" {
|
||||
t.Fatalf("expected contents %s, got %s", "foobar", contents)
|
||||
}
|
||||
}
|
||||
|
||||
func TestReadPathOrContents_TildePath(t *testing.T) {
|
||||
home, err := homedir.Dir()
|
||||
if err != nil {
|
||||
t.Fatalf("err: %s", err)
|
||||
}
|
||||
f, cleanup := testTempFile(t, home)
|
||||
defer cleanup()
|
||||
|
||||
if _, err := io.WriteString(f, "foobar"); err != nil {
|
||||
t.Fatalf("err: %s", err)
|
||||
}
|
||||
f.Close()
|
||||
|
||||
r := strings.NewReplacer(home, "~")
|
||||
homePath := r.Replace(f.Name())
|
||||
contents, err := ReadPathOrContents(homePath)
|
||||
|
||||
if err != nil {
|
||||
t.Fatalf("err: %s", err)
|
||||
}
|
||||
if contents != "foobar" {
|
||||
t.Fatalf("expected contents %s, got %s", "foobar", contents)
|
||||
}
|
||||
}
|
||||
|
||||
func TestRead_PathNoPermission(t *testing.T) {
|
||||
// This skip condition is intended to get this test out of the way of users
|
||||
// who are building and testing Terraform from within a Linux-based Docker
|
||||
// container, where it is common for processes to be running as effectively
|
||||
// root within the container.
|
||||
if u, err := user.Current(); err == nil && u.Uid == "0" {
|
||||
t.Skip("This test is invalid when running as root, since root can read every file")
|
||||
}
|
||||
|
||||
f, cleanup := testTempFile(t)
|
||||
defer cleanup()
|
||||
|
||||
if _, err := io.WriteString(f, "foobar"); err != nil {
|
||||
t.Fatalf("err: %s", err)
|
||||
}
|
||||
f.Close()
|
||||
|
||||
if err := os.Chmod(f.Name(), 0); err != nil {
|
||||
t.Fatalf("err: %s", err)
|
||||
}
|
||||
|
||||
contents, err := ReadPathOrContents(f.Name())
|
||||
|
||||
if err == nil {
|
||||
t.Fatal("Expected error, got none!")
|
||||
}
|
||||
if contents != "" {
|
||||
t.Fatalf("expected contents %s, got %s", "", contents)
|
||||
}
|
||||
}
|
||||
|
||||
func TestReadPathOrContents_Contents(t *testing.T) {
|
||||
input := "hello"
|
||||
|
||||
contents, err := ReadPathOrContents(input)
|
||||
|
||||
if err != nil {
|
||||
t.Fatalf("err: %s", err)
|
||||
}
|
||||
if contents != input {
|
||||
t.Fatalf("expected contents %s, got %s", input, contents)
|
||||
}
|
||||
}
|
||||
|
||||
func TestReadPathOrContents_TildeContents(t *testing.T) {
|
||||
input := "~/hello/notafile"
|
||||
|
||||
contents, err := ReadPathOrContents(input)
|
||||
|
||||
if err != nil {
|
||||
t.Fatalf("err: %s", err)
|
||||
}
|
||||
if contents != input {
|
||||
t.Fatalf("expected contents %s, got %s", input, contents)
|
||||
}
|
||||
}
|
||||
|
||||
// Returns an open tempfile based at baseDir and a function to clean it up.
|
||||
func testTempFile(t *testing.T, baseDir ...string) (*os.File, func()) {
|
||||
base := ""
|
||||
if len(baseDir) == 1 {
|
||||
base = baseDir[0]
|
||||
}
|
||||
f, err := ioutil.TempFile(base, "tf")
|
||||
if err != nil {
|
||||
t.Fatalf("err: %s", err)
|
||||
}
|
||||
|
||||
return f, func() {
|
||||
os.Remove(f.Name())
|
||||
}
|
||||
}
|
|
@ -14,7 +14,7 @@ func TestDeprecateBackend(t *testing.T) {
|
|||
deprecateMessage,
|
||||
)
|
||||
|
||||
diags := deprecatedBackend.ValidateConfig(cty.EmptyObjectVal)
|
||||
_, diags := deprecatedBackend.PrepareConfig(cty.EmptyObjectVal)
|
||||
if len(diags) != 1 {
|
||||
t.Errorf("got %d diagnostics; want 1", len(diags))
|
||||
for _, diag := range diags {
|
||||
|
|
|
@ -5,8 +5,8 @@ package init
|
|||
import (
|
||||
"sync"
|
||||
|
||||
"github.com/hashicorp/terraform-svchost/disco"
|
||||
"github.com/hashicorp/terraform/backend"
|
||||
"github.com/hashicorp/terraform/svchost/disco"
|
||||
"github.com/hashicorp/terraform/tfdiags"
|
||||
"github.com/zclconf/go-cty/cty"
|
||||
|
||||
|
@ -16,12 +16,16 @@ import (
|
|||
backendArtifactory "github.com/hashicorp/terraform/backend/remote-state/artifactory"
|
||||
backendAzure "github.com/hashicorp/terraform/backend/remote-state/azure"
|
||||
backendConsul "github.com/hashicorp/terraform/backend/remote-state/consul"
|
||||
backendCos "github.com/hashicorp/terraform/backend/remote-state/cos"
|
||||
backendEtcdv2 "github.com/hashicorp/terraform/backend/remote-state/etcdv2"
|
||||
backendEtcdv3 "github.com/hashicorp/terraform/backend/remote-state/etcdv3"
|
||||
backendGCS "github.com/hashicorp/terraform/backend/remote-state/gcs"
|
||||
backendHTTP "github.com/hashicorp/terraform/backend/remote-state/http"
|
||||
backendInmem "github.com/hashicorp/terraform/backend/remote-state/inmem"
|
||||
backendKubernetes "github.com/hashicorp/terraform/backend/remote-state/kubernetes"
|
||||
backendManta "github.com/hashicorp/terraform/backend/remote-state/manta"
|
||||
backendOSS "github.com/hashicorp/terraform/backend/remote-state/oss"
|
||||
backendPg "github.com/hashicorp/terraform/backend/remote-state/pg"
|
||||
backendS3 "github.com/hashicorp/terraform/backend/remote-state/s3"
|
||||
backendSwift "github.com/hashicorp/terraform/backend/remote-state/swift"
|
||||
)
|
||||
|
@ -55,12 +59,16 @@ func Init(services *disco.Disco) {
|
|||
"atlas": func() backend.Backend { return backendAtlas.New() },
|
||||
"azurerm": func() backend.Backend { return backendAzure.New() },
|
||||
"consul": func() backend.Backend { return backendConsul.New() },
|
||||
"cos": func() backend.Backend { return backendCos.New() },
|
||||
"etcd": func() backend.Backend { return backendEtcdv2.New() },
|
||||
"etcdv3": func() backend.Backend { return backendEtcdv3.New() },
|
||||
"gcs": func() backend.Backend { return backendGCS.New() },
|
||||
"http": func() backend.Backend { return backendHTTP.New() },
|
||||
"inmem": func() backend.Backend { return backendInmem.New() },
|
||||
"kubernetes": func() backend.Backend { return backendKubernetes.New() },
|
||||
"manta": func() backend.Backend { return backendManta.New() },
|
||||
"oss": func() backend.Backend { return backendOSS.New() },
|
||||
"pg": func() backend.Backend { return backendPg.New() },
|
||||
"s3": func() backend.Backend { return backendS3.New() },
|
||||
"swift": func() backend.Backend { return backendSwift.New() },
|
||||
|
||||
|
@ -108,11 +116,11 @@ type deprecatedBackendShim struct {
|
|||
Message string
|
||||
}
|
||||
|
||||
// ValidateConfig delegates to the wrapped backend to validate its config
|
||||
// PrepareConfig delegates to the wrapped backend to validate its config
|
||||
// and then appends shim's deprecation warning.
|
||||
func (b deprecatedBackendShim) ValidateConfig(obj cty.Value) tfdiags.Diagnostics {
|
||||
diags := b.Backend.ValidateConfig(obj)
|
||||
return diags.Append(tfdiags.SimpleWarning(b.Message))
|
||||
func (b deprecatedBackendShim) PrepareConfig(obj cty.Value) (cty.Value, tfdiags.Diagnostics) {
|
||||
newObj, diags := b.Backend.PrepareConfig(obj)
|
||||
return newObj, diags.Append(tfdiags.SimpleWarning(b.Message))
|
||||
}
|
||||
|
||||
// DeprecateBackend can be used to wrap a backend to retrun a deprecation
|
||||
|
|
|
@ -18,10 +18,12 @@ func TestInit_backend(t *testing.T) {
|
|||
{"atlas", "*atlas.Backend"},
|
||||
{"azurerm", "*azure.Backend"},
|
||||
{"consul", "*consul.Backend"},
|
||||
{"cos", "*cos.Backend"},
|
||||
{"etcdv3", "*etcd.Backend"},
|
||||
{"gcs", "*gcs.Backend"},
|
||||
{"inmem", "*inmem.Backend"},
|
||||
{"manta", "*manta.Backend"},
|
||||
{"pg", "*pg.Backend"},
|
||||
{"s3", "*s3.Backend"},
|
||||
{"swift", "*swift.Backend"},
|
||||
{"azure", "init.deprecatedBackendShim"},
|
||||
|
|
|
@ -9,13 +9,11 @@ import (
|
|||
"os"
|
||||
"path/filepath"
|
||||
"sort"
|
||||
"strings"
|
||||
"sync"
|
||||
|
||||
"github.com/hashicorp/terraform/backend"
|
||||
"github.com/hashicorp/terraform/command/clistate"
|
||||
"github.com/hashicorp/terraform/configs/configschema"
|
||||
"github.com/hashicorp/terraform/helper/schema"
|
||||
"github.com/hashicorp/terraform/states/statemgr"
|
||||
"github.com/hashicorp/terraform/terraform"
|
||||
"github.com/hashicorp/terraform/tfdiags"
|
||||
|
@ -140,9 +138,9 @@ func (b *Local) ConfigSchema() *configschema.Block {
|
|||
}
|
||||
}
|
||||
|
||||
func (b *Local) ValidateConfig(obj cty.Value) tfdiags.Diagnostics {
|
||||
func (b *Local) PrepareConfig(obj cty.Value) (cty.Value, tfdiags.Diagnostics) {
|
||||
if b.Backend != nil {
|
||||
return b.Backend.ValidateConfig(obj)
|
||||
return b.Backend.PrepareConfig(obj)
|
||||
}
|
||||
|
||||
var diags tfdiags.Diagnostics
|
||||
|
@ -171,7 +169,7 @@ func (b *Local) ValidateConfig(obj cty.Value) tfdiags.Diagnostics {
|
|||
}
|
||||
}
|
||||
|
||||
return diags
|
||||
return obj, diags
|
||||
}
|
||||
|
||||
func (b *Local) Configure(obj cty.Value) tfdiags.Diagnostics {
|
||||
|
@ -181,11 +179,6 @@ func (b *Local) Configure(obj cty.Value) tfdiags.Diagnostics {
|
|||
|
||||
var diags tfdiags.Diagnostics
|
||||
|
||||
type Config struct {
|
||||
Path string `hcl:"path,optional"`
|
||||
WorkspaceDir string `hcl:"workspace_dir,optional"`
|
||||
}
|
||||
|
||||
if val := obj.GetAttr("path"); !val.IsNull() {
|
||||
p := val.AsString()
|
||||
b.StatePath = p
|
||||
|
@ -343,16 +336,6 @@ func (b *Local) Operation(ctx context.Context, op *backend.Operation) (*backend.
|
|||
defer stop()
|
||||
defer cancel()
|
||||
|
||||
// the state was locked during context creation, unlock the state when
|
||||
// the operation completes
|
||||
defer func() {
|
||||
err := op.StateLocker.Unlock(nil)
|
||||
if err != nil {
|
||||
b.ShowDiagnostics(err)
|
||||
runningOp.Result = backend.OperationFailure
|
||||
}
|
||||
}()
|
||||
|
||||
defer b.opLock.Unlock()
|
||||
f(stopCtx, cancelCtx, op, runningOp)
|
||||
}()
|
||||
|
@ -444,7 +427,7 @@ func (b *Local) ReportResult(op *backend.RunningOperation, diags tfdiags.Diagnos
|
|||
}
|
||||
|
||||
// Colorize returns the Colorize structure that can be used for colorizing
|
||||
// output. This is gauranteed to always return a non-nil value and so is useful
|
||||
// output. This is guaranteed to always return a non-nil value and so is useful
|
||||
// as a helper to wrap any potentially colored strings.
|
||||
func (b *Local) Colorize() *colorstring.Colorize {
|
||||
if b.CLIColor != nil {
|
||||
|
@ -457,39 +440,6 @@ func (b *Local) Colorize() *colorstring.Colorize {
|
|||
}
|
||||
}
|
||||
|
||||
func (b *Local) schemaConfigure(ctx context.Context) error {
|
||||
d := schema.FromContextBackendConfig(ctx)
|
||||
|
||||
// Set the path if it is set
|
||||
pathRaw, ok := d.GetOk("path")
|
||||
if ok {
|
||||
path := pathRaw.(string)
|
||||
if path == "" {
|
||||
return fmt.Errorf("configured path is empty")
|
||||
}
|
||||
|
||||
b.StatePath = path
|
||||
b.StateOutPath = path
|
||||
}
|
||||
|
||||
if raw, ok := d.GetOk("workspace_dir"); ok {
|
||||
path := raw.(string)
|
||||
if path != "" {
|
||||
b.StateWorkspaceDir = path
|
||||
}
|
||||
}
|
||||
|
||||
// Legacy name, which ConflictsWith workspace_dir
|
||||
if raw, ok := d.GetOk("environment_dir"); ok {
|
||||
path := raw.(string)
|
||||
if path != "" {
|
||||
b.StateWorkspaceDir = path
|
||||
}
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// StatePaths returns the StatePath, StateOutPath, and StateBackupPath as
|
||||
// configured from the CLI.
|
||||
func (b *Local) StatePaths(name string) (stateIn, stateOut, backupOut string) {
|
||||
|
@ -594,25 +544,3 @@ func (b *Local) stateWorkspaceDir() string {
|
|||
|
||||
return DefaultWorkspaceDir
|
||||
}
|
||||
|
||||
func (b *Local) pluginInitRequired(providerErr *terraform.ResourceProviderError) {
|
||||
b.CLI.Output(b.Colorize().Color(fmt.Sprintf(
|
||||
strings.TrimSpace(errPluginInit)+"\n",
|
||||
providerErr)))
|
||||
}
|
||||
|
||||
// this relies on multierror to format the plugin errors below the copy
|
||||
const errPluginInit = `
|
||||
[reset][bold][yellow]Plugin reinitialization required. Please run "terraform init".[reset]
|
||||
[yellow]Reason: Could not satisfy plugin requirements.
|
||||
|
||||
Plugins are external binaries that Terraform uses to access and manipulate
|
||||
resources. The configuration provided requires plugins which can't be located,
|
||||
don't satisfy the version constraints, or are otherwise incompatible.
|
||||
|
||||
[reset][red]%s
|
||||
|
||||
[reset][yellow]Terraform automatically discovers provider requirements from your
|
||||
configuration, including providers used in child modules. To see the
|
||||
requirements and constraints from each module, run "terraform providers".
|
||||
`
|
||||
|
|
|
@ -56,29 +56,25 @@ func (b *Local) opApply(
|
|||
b.ReportResult(runningOp, diags)
|
||||
return
|
||||
}
|
||||
// the state was locked during succesfull context creation; unlock the state
|
||||
// when the operation completes
|
||||
defer func() {
|
||||
err := op.StateLocker.Unlock(nil)
|
||||
if err != nil {
|
||||
b.ShowDiagnostics(err)
|
||||
runningOp.Result = backend.OperationFailure
|
||||
}
|
||||
}()
|
||||
|
||||
// Setup the state
|
||||
runningOp.State = tfCtx.State()
|
||||
|
||||
// If we weren't given a plan, then we refresh/plan
|
||||
if op.PlanFile == nil {
|
||||
// If we're refreshing before apply, perform that
|
||||
if op.PlanRefresh {
|
||||
log.Printf("[INFO] backend/local: apply calling Refresh")
|
||||
_, err := tfCtx.Refresh()
|
||||
if err != nil {
|
||||
diags = diags.Append(err)
|
||||
runningOp.Result = backend.OperationFailure
|
||||
b.ShowDiagnostics(diags)
|
||||
return
|
||||
}
|
||||
}
|
||||
|
||||
// Perform the plan
|
||||
log.Printf("[INFO] backend/local: apply calling Plan")
|
||||
plan, err := tfCtx.Plan()
|
||||
if err != nil {
|
||||
diags = diags.Append(err)
|
||||
plan, planDiags := tfCtx.Plan()
|
||||
diags = diags.Append(planDiags)
|
||||
if planDiags.HasErrors() {
|
||||
b.ReportResult(runningOp, diags)
|
||||
return
|
||||
}
|
||||
|
@ -108,11 +104,18 @@ func (b *Local) opApply(
|
|||
|
||||
if !trivialPlan {
|
||||
// Display the plan of what we are going to apply/destroy.
|
||||
b.renderPlan(plan, tfCtx.Schemas())
|
||||
b.renderPlan(plan, runningOp.State, tfCtx.Schemas())
|
||||
b.CLI.Output("")
|
||||
}
|
||||
|
||||
v, err := op.UIIn.Input(&terraform.InputOpts{
|
||||
// We'll show any accumulated warnings before we display the prompt,
|
||||
// so the user can consider them when deciding how to answer.
|
||||
if len(diags) > 0 {
|
||||
b.ShowDiagnostics(diags)
|
||||
diags = nil // reset so we won't show the same diagnostics again later
|
||||
}
|
||||
|
||||
v, err := op.UIIn.Input(stopCtx, &terraform.InputOpts{
|
||||
Id: "approve",
|
||||
Query: query,
|
||||
Description: desc,
|
||||
|
@ -157,7 +160,15 @@ func (b *Local) opApply(
|
|||
runningOp.State = applyState
|
||||
err := statemgr.WriteAndPersist(opState, applyState)
|
||||
if err != nil {
|
||||
diags = diags.Append(b.backupStateForError(applyState, err))
|
||||
// Export the state file from the state manager and assign the new
|
||||
// state. This is needed to preserve the existing serial and lineage.
|
||||
stateFile := statemgr.Export(opState)
|
||||
if stateFile == nil {
|
||||
stateFile = &statefile.File{}
|
||||
}
|
||||
stateFile.State = applyState
|
||||
|
||||
diags = diags.Append(b.backupStateForError(stateFile, err))
|
||||
b.ReportResult(runningOp, diags)
|
||||
return
|
||||
}
|
||||
|
@ -208,11 +219,11 @@ func (b *Local) opApply(
|
|||
// to local disk to help the user recover. This is a "last ditch effort" sort
|
||||
// of thing, so we really don't want to end up in this codepath; we should do
|
||||
// everything we possibly can to get the state saved _somewhere_.
|
||||
func (b *Local) backupStateForError(applyState *states.State, err error) error {
|
||||
func (b *Local) backupStateForError(stateFile *statefile.File, err error) error {
|
||||
b.CLI.Error(fmt.Sprintf("Failed to save state: %s\n", err))
|
||||
|
||||
local := statemgr.NewFilesystem("errored.tfstate")
|
||||
writeErr := local.WriteState(applyState)
|
||||
writeErr := local.WriteStateForMigration(stateFile, true)
|
||||
if writeErr != nil {
|
||||
b.CLI.Error(fmt.Sprintf(
|
||||
"Also failed to create local state file for recovery: %s\n\n", writeErr,
|
||||
|
@ -223,9 +234,6 @@ func (b *Local) backupStateForError(applyState *states.State, err error) error {
|
|||
// but at least the user has _some_ path to recover if we end up
|
||||
// here for some reason.
|
||||
stateBuf := new(bytes.Buffer)
|
||||
stateFile := &statefile.File{
|
||||
State: applyState,
|
||||
}
|
||||
jsonErr := statefile.Write(stateFile, stateBuf)
|
||||
if jsonErr != nil {
|
||||
b.CLI.Error(fmt.Sprintf(
|
||||
|
|
|
@ -32,7 +32,7 @@ func TestLocal_applyBasic(t *testing.T) {
|
|||
"ami": cty.StringVal("bar"),
|
||||
})}
|
||||
|
||||
op, configCleanup := testOperationApply(t, "./test-fixtures/apply")
|
||||
op, configCleanup := testOperationApply(t, "./testdata/apply")
|
||||
defer configCleanup()
|
||||
|
||||
run, err := b.Operation(context.Background(), op)
|
||||
|
@ -59,9 +59,10 @@ func TestLocal_applyBasic(t *testing.T) {
|
|||
checkState(t, b.StateOutPath, `
|
||||
test_instance.foo:
|
||||
ID = yes
|
||||
provider = provider.test
|
||||
provider = provider["registry.terraform.io/hashicorp/test"]
|
||||
ami = bar
|
||||
`)
|
||||
|
||||
}
|
||||
|
||||
func TestLocal_applyEmptyDir(t *testing.T) {
|
||||
|
@ -71,7 +72,7 @@ func TestLocal_applyEmptyDir(t *testing.T) {
|
|||
p := TestLocalProvider(t, b, "test", &terraform.ProviderSchema{})
|
||||
p.ApplyResourceChangeResponse = providers.ApplyResourceChangeResponse{NewState: cty.ObjectVal(map[string]cty.Value{"id": cty.StringVal("yes")})}
|
||||
|
||||
op, configCleanup := testOperationApply(t, "./test-fixtures/empty")
|
||||
op, configCleanup := testOperationApply(t, "./testdata/empty")
|
||||
defer configCleanup()
|
||||
|
||||
run, err := b.Operation(context.Background(), op)
|
||||
|
@ -90,6 +91,9 @@ func TestLocal_applyEmptyDir(t *testing.T) {
|
|||
if _, err := os.Stat(b.StateOutPath); err == nil {
|
||||
t.Fatal("should not exist")
|
||||
}
|
||||
|
||||
// the backend should be unlocked after a run
|
||||
assertBackendStateUnlocked(t, b)
|
||||
}
|
||||
|
||||
func TestLocal_applyEmptyDirDestroy(t *testing.T) {
|
||||
|
@ -99,7 +103,7 @@ func TestLocal_applyEmptyDirDestroy(t *testing.T) {
|
|||
p := TestLocalProvider(t, b, "test", &terraform.ProviderSchema{})
|
||||
p.ApplyResourceChangeResponse = providers.ApplyResourceChangeResponse{}
|
||||
|
||||
op, configCleanup := testOperationApply(t, "./test-fixtures/empty")
|
||||
op, configCleanup := testOperationApply(t, "./testdata/empty")
|
||||
defer configCleanup()
|
||||
op.Destroy = true
|
||||
|
||||
|
@ -123,8 +127,7 @@ func TestLocal_applyError(t *testing.T) {
|
|||
b, cleanup := TestLocal(t)
|
||||
defer cleanup()
|
||||
|
||||
p := TestLocalProvider(t, b, "test", nil)
|
||||
p.GetSchemaReturn = &terraform.ProviderSchema{
|
||||
schema := &terraform.ProviderSchema{
|
||||
ResourceTypes: map[string]*configschema.Block{
|
||||
"test_instance": {
|
||||
Attributes: map[string]*configschema.Attribute{
|
||||
|
@ -134,6 +137,7 @@ func TestLocal_applyError(t *testing.T) {
|
|||
},
|
||||
},
|
||||
}
|
||||
p := TestLocalProvider(t, b, "test", schema)
|
||||
|
||||
var lock sync.Mutex
|
||||
errored := false
|
||||
|
@ -161,7 +165,7 @@ func TestLocal_applyError(t *testing.T) {
|
|||
}
|
||||
}
|
||||
|
||||
op, configCleanup := testOperationApply(t, "./test-fixtures/apply-error")
|
||||
op, configCleanup := testOperationApply(t, "./testdata/apply-error")
|
||||
defer configCleanup()
|
||||
|
||||
run, err := b.Operation(context.Background(), op)
|
||||
|
@ -176,9 +180,12 @@ func TestLocal_applyError(t *testing.T) {
|
|||
checkState(t, b.StateOutPath, `
|
||||
test_instance.foo:
|
||||
ID = foo
|
||||
provider = provider.test
|
||||
provider = provider["registry.terraform.io/hashicorp/test"]
|
||||
ami = bar
|
||||
`)
|
||||
|
||||
// the backend should be unlocked after a run
|
||||
assertBackendStateUnlocked(t, b)
|
||||
}
|
||||
|
||||
func TestLocal_applyBackendFail(t *testing.T) {
|
||||
|
@ -201,7 +208,7 @@ func TestLocal_applyBackendFail(t *testing.T) {
|
|||
}
|
||||
defer os.Chdir(wd)
|
||||
|
||||
op, configCleanup := testOperationApply(t, wd+"/test-fixtures/apply")
|
||||
op, configCleanup := testOperationApply(t, wd+"/testdata/apply")
|
||||
defer configCleanup()
|
||||
|
||||
b.Backend = &backendWithFailingState{}
|
||||
|
@ -226,9 +233,12 @@ func TestLocal_applyBackendFail(t *testing.T) {
|
|||
checkState(t, "errored.tfstate", `
|
||||
test_instance.foo:
|
||||
ID = yes
|
||||
provider = provider.test
|
||||
provider = provider["registry.terraform.io/hashicorp/test"]
|
||||
ami = bar
|
||||
`)
|
||||
|
||||
// the backend should be unlocked after a run
|
||||
assertBackendStateUnlocked(t, b)
|
||||
}
|
||||
|
||||
type backendWithFailingState struct {
|
||||
|
@ -261,28 +271,8 @@ func testOperationApply(t *testing.T, configDir string) (*backend.Operation, fun
|
|||
}, configCleanup
|
||||
}
|
||||
|
||||
// testApplyState is just a common state that we use for testing refresh.
|
||||
func testApplyState() *terraform.State {
|
||||
return &terraform.State{
|
||||
Version: 2,
|
||||
Modules: []*terraform.ModuleState{
|
||||
&terraform.ModuleState{
|
||||
Path: []string{"root"},
|
||||
Resources: map[string]*terraform.ResourceState{
|
||||
"test_instance.foo": &terraform.ResourceState{
|
||||
Type: "test_instance",
|
||||
Primary: &terraform.InstanceState{
|
||||
ID: "bar",
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
}
|
||||
}
|
||||
|
||||
// applyFixtureSchema returns a schema suitable for processing the
|
||||
// configuration in test-fixtures/apply . This schema should be
|
||||
// configuration in testdata/apply . This schema should be
|
||||
// assigned to a mock provider named "test".
|
||||
func applyFixtureSchema() *terraform.ProviderSchema {
|
||||
return &terraform.ProviderSchema{
|
||||
|
|
|
@ -4,10 +4,12 @@ import (
|
|||
"context"
|
||||
"fmt"
|
||||
"log"
|
||||
"sort"
|
||||
|
||||
"github.com/hashicorp/errwrap"
|
||||
"github.com/hashicorp/terraform/backend"
|
||||
"github.com/hashicorp/terraform/command/clistate"
|
||||
"github.com/hashicorp/terraform/configs"
|
||||
"github.com/hashicorp/terraform/configs/configload"
|
||||
"github.com/hashicorp/terraform/plans/planfile"
|
||||
"github.com/hashicorp/terraform/states/statemgr"
|
||||
|
@ -47,6 +49,18 @@ func (b *Local) context(op *backend.Operation) (*terraform.Context, *configload.
|
|||
diags = diags.Append(errwrap.Wrapf("Error locking state: {{err}}", err))
|
||||
return nil, nil, nil, diags
|
||||
}
|
||||
|
||||
defer func() {
|
||||
// If we're returning with errors, and thus not producing a valid
|
||||
// context, we'll want to avoid leaving the workspace locked.
|
||||
if diags.HasErrors() {
|
||||
err := op.StateLocker.Unlock(nil)
|
||||
if err != nil {
|
||||
diags = diags.Append(errwrap.Wrapf("Error unlocking state: {{err}}", err))
|
||||
}
|
||||
}
|
||||
}()
|
||||
|
||||
log.Printf("[TRACE] backend/local: reading remote state for workspace %q", op.Workspace)
|
||||
if err := s.RefreshState(); err != nil {
|
||||
diags = diags.Append(errwrap.Wrapf("Error loading state: {{err}}", err))
|
||||
|
@ -64,6 +78,11 @@ func (b *Local) context(op *backend.Operation) (*terraform.Context, *configload.
|
|||
opts.Targets = op.Targets
|
||||
opts.UIInput = op.UIIn
|
||||
|
||||
opts.SkipRefresh = op.Type == backend.OperationTypePlan && !op.PlanRefresh
|
||||
if opts.SkipRefresh {
|
||||
log.Printf("[DEBUG] backend/local: skipping refresh of managed resources")
|
||||
}
|
||||
|
||||
// Load the latest state. If we enter contextFromPlanFile below then the
|
||||
// state snapshot in the plan file must match this, or else it'll return
|
||||
// error diagnostics.
|
||||
|
@ -103,8 +122,6 @@ func (b *Local) context(op *backend.Operation) (*terraform.Context, *configload.
|
|||
// If input asking is enabled, then do that
|
||||
if op.PlanFile == nil && b.OpInput {
|
||||
mode := terraform.InputModeProvider
|
||||
mode |= terraform.InputModeVar
|
||||
mode |= terraform.InputModeVarUnset
|
||||
|
||||
log.Printf("[TRACE] backend/local: requesting interactive input, if necessary")
|
||||
inputDiags := tfCtx.Input(mode)
|
||||
|
@ -136,14 +153,27 @@ func (b *Local) contextDirect(op *backend.Operation, opts terraform.ContextOpts)
|
|||
}
|
||||
opts.Config = config
|
||||
|
||||
variables, varDiags := backend.ParseVariableValues(op.Variables, config.Module.Variables)
|
||||
var rawVariables map[string]backend.UnparsedVariableValue
|
||||
if op.AllowUnsetVariables {
|
||||
// Rather than prompting for input, we'll just stub out the required
|
||||
// but unset variables with unknown values to represent that they are
|
||||
// placeholders for values the user would need to provide for other
|
||||
// operations.
|
||||
rawVariables = b.stubUnsetRequiredVariables(op.Variables, config.Module.Variables)
|
||||
} else {
|
||||
// If interactive input is enabled, we might gather some more variable
|
||||
// values through interactive prompts.
|
||||
// TODO: Need to route the operation context through into here, so that
|
||||
// the interactive prompts can be sensitive to its timeouts/etc.
|
||||
rawVariables = b.interactiveCollectVariables(context.TODO(), op.Variables, config.Module.Variables, opts.UIInput)
|
||||
}
|
||||
|
||||
variables, varDiags := backend.ParseVariableValues(rawVariables, config.Module.Variables)
|
||||
diags = diags.Append(varDiags)
|
||||
if diags.HasErrors() {
|
||||
return nil, nil, diags
|
||||
}
|
||||
if op.Variables != nil {
|
||||
opts.Variables = variables
|
||||
}
|
||||
opts.Variables = variables
|
||||
|
||||
tfCtx, ctxDiags := terraform.NewContext(&opts)
|
||||
diags = diags.Append(ctxDiags)
|
||||
|
@ -190,7 +220,7 @@ func (b *Local) contextFromPlanFile(pf *planfile.Reader, opts terraform.ContextO
|
|||
// If the caller sets this, we require that the stored prior state
|
||||
// has the same metadata, which is an extra safety check that nothing
|
||||
// has changed since the plan was created. (All of the "real-world"
|
||||
// state manager implementstions support this, but simpler test backends
|
||||
// state manager implementations support this, but simpler test backends
|
||||
// may not.)
|
||||
if currentStateMeta.Lineage != "" && priorStateFile.Lineage != "" {
|
||||
if priorStateFile.Serial != currentStateMeta.Serial || priorStateFile.Lineage != currentStateMeta.Lineage {
|
||||
|
@ -245,10 +275,155 @@ func (b *Local) contextFromPlanFile(pf *planfile.Reader, opts terraform.ContextO
|
|||
return tfCtx, snap, diags
|
||||
}
|
||||
|
||||
const validateWarnHeader = `
|
||||
There are warnings related to your configuration. If no errors occurred,
|
||||
Terraform will continue despite these warnings. It is a good idea to resolve
|
||||
these warnings in the near future.
|
||||
// interactiveCollectVariables attempts to complete the given existing
|
||||
// map of variables by interactively prompting for any variables that are
|
||||
// declared as required but not yet present.
|
||||
//
|
||||
// If interactive input is disabled for this backend instance then this is
|
||||
// a no-op. If input is enabled but fails for some reason, the resulting
|
||||
// map will be incomplete. For these reasons, the caller must still validate
|
||||
// that the result is complete and valid.
|
||||
//
|
||||
// This function does not modify the map given in "existing", but may return
|
||||
// it unchanged if no modifications are required. If modifications are required,
|
||||
// the result is a new map with all of the elements from "existing" plus
|
||||
// additional elements as appropriate.
|
||||
//
|
||||
// Interactive prompting is a "best effort" thing for first-time user UX and
|
||||
// not something we expect folks to be relying on for routine use. Terraform
|
||||
// is primarily a non-interactive tool and so we prefer to report in error
|
||||
// messages that variables are not set rather than reporting that input failed:
|
||||
// the primary resolution to missing variables is to provide them by some other
|
||||
// means.
|
||||
func (b *Local) interactiveCollectVariables(ctx context.Context, existing map[string]backend.UnparsedVariableValue, vcs map[string]*configs.Variable, uiInput terraform.UIInput) map[string]backend.UnparsedVariableValue {
|
||||
var needed []string
|
||||
if b.OpInput && uiInput != nil {
|
||||
for name, vc := range vcs {
|
||||
if !vc.Required() {
|
||||
continue // We only prompt for required variables
|
||||
}
|
||||
if _, exists := existing[name]; !exists {
|
||||
needed = append(needed, name)
|
||||
}
|
||||
}
|
||||
} else {
|
||||
log.Print("[DEBUG] backend/local: Skipping interactive prompts for variables because input is disabled")
|
||||
}
|
||||
if len(needed) == 0 {
|
||||
return existing
|
||||
}
|
||||
|
||||
Warnings:
|
||||
`
|
||||
log.Printf("[DEBUG] backend/local: will prompt for input of unset required variables %s", needed)
|
||||
|
||||
// If we get here then we're planning to prompt for at least one additional
|
||||
// variable's value.
|
||||
sort.Strings(needed) // prompt in lexical order
|
||||
ret := make(map[string]backend.UnparsedVariableValue, len(vcs))
|
||||
for k, v := range existing {
|
||||
ret[k] = v
|
||||
}
|
||||
for _, name := range needed {
|
||||
vc := vcs[name]
|
||||
rawValue, err := uiInput.Input(ctx, &terraform.InputOpts{
|
||||
Id: fmt.Sprintf("var.%s", name),
|
||||
Query: fmt.Sprintf("var.%s", name),
|
||||
Description: vc.Description,
|
||||
})
|
||||
if err != nil {
|
||||
// Since interactive prompts are best-effort, we'll just continue
|
||||
// here and let subsequent validation report this as a variable
|
||||
// not specified.
|
||||
log.Printf("[WARN] backend/local: Failed to request user input for variable %q: %s", name, err)
|
||||
continue
|
||||
}
|
||||
ret[name] = unparsedInteractiveVariableValue{Name: name, RawValue: rawValue}
|
||||
}
|
||||
return ret
|
||||
}
|
||||
|
||||
// stubUnsetVariables ensures that all required variables defined in the
|
||||
// configuration exist in the resulting map, by adding new elements as necessary.
|
||||
//
|
||||
// The stubbed value of any additions will be an unknown variable conforming
|
||||
// to the variable's configured type constraint, meaning that no particular
|
||||
// value is known and that one must be provided by the user in order to get
|
||||
// a complete result.
|
||||
//
|
||||
// Unset optional attributes (those with default values) will not be populated
|
||||
// by this function, under the assumption that a later step will handle those.
|
||||
// In this sense, stubUnsetRequiredVariables is essentially a non-interactive,
|
||||
// non-error-producing variant of interactiveCollectVariables that creates
|
||||
// placeholders for values the user would be prompted for interactively on
|
||||
// other operations.
|
||||
//
|
||||
// This function should be used only in situations where variables values
|
||||
// will not be directly used and the variables map is being constructed only
|
||||
// to produce a complete Terraform context for some ancillary functionality
|
||||
// like "terraform console", "terraform state ...", etc.
|
||||
//
|
||||
// This function is guaranteed not to modify the given map, but it may return
|
||||
// the given map unchanged if no additions are required. If additions are
|
||||
// required then the result will be a new map containing everything in the
|
||||
// given map plus additional elements.
|
||||
func (b *Local) stubUnsetRequiredVariables(existing map[string]backend.UnparsedVariableValue, vcs map[string]*configs.Variable) map[string]backend.UnparsedVariableValue {
|
||||
var missing bool // Do we need to add anything?
|
||||
for name, vc := range vcs {
|
||||
if !vc.Required() {
|
||||
continue // We only stub required variables
|
||||
}
|
||||
if _, exists := existing[name]; !exists {
|
||||
missing = true
|
||||
}
|
||||
}
|
||||
if !missing {
|
||||
return existing
|
||||
}
|
||||
|
||||
// If we get down here then there's at least one variable value to add.
|
||||
ret := make(map[string]backend.UnparsedVariableValue, len(vcs))
|
||||
for k, v := range existing {
|
||||
ret[k] = v
|
||||
}
|
||||
for name, vc := range vcs {
|
||||
if !vc.Required() {
|
||||
continue
|
||||
}
|
||||
if _, exists := existing[name]; !exists {
|
||||
ret[name] = unparsedUnknownVariableValue{Name: name, WantType: vc.Type}
|
||||
}
|
||||
}
|
||||
return ret
|
||||
}
|
||||
|
||||
type unparsedInteractiveVariableValue struct {
|
||||
Name, RawValue string
|
||||
}
|
||||
|
||||
var _ backend.UnparsedVariableValue = unparsedInteractiveVariableValue{}
|
||||
|
||||
func (v unparsedInteractiveVariableValue) ParseVariableValue(mode configs.VariableParsingMode) (*terraform.InputValue, tfdiags.Diagnostics) {
|
||||
var diags tfdiags.Diagnostics
|
||||
val, valDiags := mode.Parse(v.Name, v.RawValue)
|
||||
diags = diags.Append(valDiags)
|
||||
if diags.HasErrors() {
|
||||
return nil, diags
|
||||
}
|
||||
return &terraform.InputValue{
|
||||
Value: val,
|
||||
SourceType: terraform.ValueFromInput,
|
||||
}, diags
|
||||
}
|
||||
|
||||
type unparsedUnknownVariableValue struct {
|
||||
Name string
|
||||
WantType cty.Type
|
||||
}
|
||||
|
||||
var _ backend.UnparsedVariableValue = unparsedUnknownVariableValue{}
|
||||
|
||||
func (v unparsedUnknownVariableValue) ParseVariableValue(mode configs.VariableParsingMode) (*terraform.InputValue, tfdiags.Diagnostics) {
|
||||
return &terraform.InputValue{
|
||||
Value: cty.UnknownVal(v.WantType),
|
||||
SourceType: terraform.ValueFromInput,
|
||||
}, nil
|
||||
}
|
||||
|
|
|
@ -0,0 +1,57 @@
|
|||
package local
|
||||
|
||||
import (
|
||||
"testing"
|
||||
|
||||
"github.com/hashicorp/terraform/backend"
|
||||
"github.com/hashicorp/terraform/internal/initwd"
|
||||
)
|
||||
|
||||
func TestLocalContext(t *testing.T) {
|
||||
configDir := "./testdata/empty"
|
||||
b, cleanup := TestLocal(t)
|
||||
defer cleanup()
|
||||
|
||||
_, configLoader, configCleanup := initwd.MustLoadConfigForTests(t, configDir)
|
||||
defer configCleanup()
|
||||
|
||||
op := &backend.Operation{
|
||||
ConfigDir: configDir,
|
||||
ConfigLoader: configLoader,
|
||||
Workspace: backend.DefaultStateName,
|
||||
LockState: true,
|
||||
}
|
||||
|
||||
_, _, diags := b.Context(op)
|
||||
if diags.HasErrors() {
|
||||
t.Fatalf("unexpected error: %s", diags.Err().Error())
|
||||
}
|
||||
|
||||
// Context() retains a lock on success
|
||||
assertBackendStateLocked(t, b)
|
||||
}
|
||||
|
||||
func TestLocalContext_error(t *testing.T) {
|
||||
configDir := "./testdata/apply"
|
||||
b, cleanup := TestLocal(t)
|
||||
defer cleanup()
|
||||
|
||||
_, configLoader, configCleanup := initwd.MustLoadConfigForTests(t, configDir)
|
||||
defer configCleanup()
|
||||
|
||||
op := &backend.Operation{
|
||||
ConfigDir: configDir,
|
||||
ConfigLoader: configLoader,
|
||||
Workspace: backend.DefaultStateName,
|
||||
LockState: true,
|
||||
}
|
||||
|
||||
_, _, diags := b.Context(op)
|
||||
if !diags.HasErrors() {
|
||||
t.Fatal("unexpected success")
|
||||
}
|
||||
|
||||
// Context() unlocks the state on failure
|
||||
assertBackendStateUnlocked(t, b)
|
||||
|
||||
}
|
|
@ -8,11 +8,15 @@ import (
|
|||
"sort"
|
||||
"strings"
|
||||
|
||||
"github.com/mitchellh/cli"
|
||||
"github.com/mitchellh/colorstring"
|
||||
|
||||
"github.com/hashicorp/terraform/addrs"
|
||||
"github.com/hashicorp/terraform/backend"
|
||||
"github.com/hashicorp/terraform/command/format"
|
||||
"github.com/hashicorp/terraform/plans"
|
||||
"github.com/hashicorp/terraform/plans/planfile"
|
||||
"github.com/hashicorp/terraform/states"
|
||||
"github.com/hashicorp/terraform/states/statemgr"
|
||||
"github.com/hashicorp/terraform/terraform"
|
||||
"github.com/hashicorp/terraform/tfdiags"
|
||||
|
@ -69,30 +73,17 @@ func (b *Local) opPlan(
|
|||
b.ReportResult(runningOp, diags)
|
||||
return
|
||||
}
|
||||
|
||||
// Setup the state
|
||||
runningOp.State = tfCtx.State()
|
||||
|
||||
// If we're refreshing before plan, perform that
|
||||
baseState := runningOp.State
|
||||
if op.PlanRefresh {
|
||||
log.Printf("[INFO] backend/local: plan calling Refresh")
|
||||
|
||||
if b.CLI != nil {
|
||||
b.CLI.Output(b.Colorize().Color(strings.TrimSpace(planRefreshing) + "\n"))
|
||||
}
|
||||
|
||||
refreshedState, err := tfCtx.Refresh()
|
||||
// the state was locked during succesfull context creation; unlock the state
|
||||
// when the operation completes
|
||||
defer func() {
|
||||
err := op.StateLocker.Unlock(nil)
|
||||
if err != nil {
|
||||
diags = diags.Append(err)
|
||||
b.ReportResult(runningOp, diags)
|
||||
return
|
||||
b.ShowDiagnostics(err)
|
||||
runningOp.Result = backend.OperationFailure
|
||||
}
|
||||
baseState = refreshedState // plan will be relative to our refreshed state
|
||||
if b.CLI != nil {
|
||||
b.CLI.Output("\n------------------------------------------------------------------------")
|
||||
}
|
||||
}
|
||||
}()
|
||||
|
||||
runningOp.State = tfCtx.State()
|
||||
|
||||
// Perform the plan in a goroutine so we can be interrupted
|
||||
var plan *plans.Plan
|
||||
|
@ -118,7 +109,8 @@ func (b *Local) opPlan(
|
|||
b.ReportResult(runningOp, diags)
|
||||
return
|
||||
}
|
||||
// Record state
|
||||
|
||||
// Record whether this plan includes any side-effects that could be applied.
|
||||
runningOp.PlanEmpty = plan.Changes.Empty()
|
||||
|
||||
// Save the plan to disk
|
||||
|
@ -137,7 +129,7 @@ func (b *Local) opPlan(
|
|||
// We may have updated the state in the refresh step above, but we
|
||||
// will freeze that updated state in the plan file for now and
|
||||
// only write it if this plan is subsequently applied.
|
||||
plannedStateFile := statemgr.PlannedStateUpdate(opState, baseState)
|
||||
plannedStateFile := statemgr.PlannedStateUpdate(opState, plan.State)
|
||||
|
||||
log.Printf("[INFO] backend/local: writing plan output to: %s", path)
|
||||
err := planfile.Create(path, configSnap, plannedStateFile, plan)
|
||||
|
@ -156,12 +148,14 @@ func (b *Local) opPlan(
|
|||
if b.CLI != nil {
|
||||
schemas := tfCtx.Schemas()
|
||||
|
||||
if plan.Changes.Empty() {
|
||||
if runningOp.PlanEmpty {
|
||||
b.CLI.Output("\n" + b.Colorize().Color(strings.TrimSpace(planNoChanges)))
|
||||
// Even if there are no changes, there still could be some warnings
|
||||
b.ShowDiagnostics(diags)
|
||||
return
|
||||
}
|
||||
|
||||
b.renderPlan(plan, schemas)
|
||||
b.renderPlan(plan, plan.State, schemas)
|
||||
|
||||
// If we've accumulated any warnings along the way then we'll show them
|
||||
// here just before we show the summary and next steps. If we encountered
|
||||
|
@ -188,7 +182,31 @@ func (b *Local) opPlan(
|
|||
}
|
||||
}
|
||||
|
||||
func (b *Local) renderPlan(plan *plans.Plan, schemas *terraform.Schemas) {
|
||||
func (b *Local) renderPlan(plan *plans.Plan, baseState *states.State, schemas *terraform.Schemas) {
|
||||
RenderPlan(plan, baseState, schemas, b.CLI, b.Colorize())
|
||||
}
|
||||
|
||||
// RenderPlan renders the given plan to the given UI.
|
||||
//
|
||||
// This is exported only so that the "terraform show" command can re-use it.
|
||||
// Ideally it would be somewhere outside of this backend code so that both
|
||||
// can call into it, but we're leaving it here for now in order to avoid
|
||||
// disruptive refactoring.
|
||||
//
|
||||
// If you find yourself wanting to call this function from a third callsite,
|
||||
// please consider whether it's time to do the more disruptive refactoring
|
||||
// so that something other than the local backend package is offering this
|
||||
// functionality.
|
||||
//
|
||||
// The difference between baseState and priorState is that baseState is the
|
||||
// result of implicitly running refresh (unless that was disabled) while
|
||||
// priorState is a snapshot of the state as it was before we took any actions
|
||||
// at all. priorState can optionally be nil if the caller has only a saved
|
||||
// plan and not the prior state it was built from. In that case, changes to
|
||||
// output values will not currently be rendered because their prior values
|
||||
// are currently stored only in the prior state. (see the docstring for
|
||||
// func planHasSideEffects for why this is and when that might change)
|
||||
func RenderPlan(plan *plans.Plan, baseState *states.State, schemas *terraform.Schemas, ui cli.Ui, colorize *colorstring.Colorize) {
|
||||
counts := map[plans.Action]int{}
|
||||
var rChanges []*plans.ResourceInstanceChangeSrc
|
||||
for _, change := range plan.Changes.Resources {
|
||||
|
@ -222,9 +240,9 @@ func (b *Local) renderPlan(plan *plans.Plan, schemas *terraform.Schemas) {
|
|||
fmt.Fprintf(headerBuf, "%s read (data resources)\n", format.DiffActionSymbol(plans.Read))
|
||||
}
|
||||
|
||||
b.CLI.Output(b.Colorize().Color(headerBuf.String()))
|
||||
ui.Output(colorize.Color(headerBuf.String()))
|
||||
|
||||
b.CLI.Output("Terraform will perform the following actions:\n")
|
||||
ui.Output("Terraform will perform the following actions:\n")
|
||||
|
||||
// Note: we're modifying the backing slice of this plan object in-place
|
||||
// here. The ordering of resource changes in a plan is not significant,
|
||||
|
@ -243,22 +261,35 @@ func (b *Local) renderPlan(plan *plans.Plan, schemas *terraform.Schemas) {
|
|||
if rcs.Action == plans.NoOp {
|
||||
continue
|
||||
}
|
||||
providerSchema := schemas.ProviderSchema(rcs.ProviderAddr.ProviderConfig.Type)
|
||||
|
||||
providerSchema := schemas.ProviderSchema(rcs.ProviderAddr.Provider)
|
||||
if providerSchema == nil {
|
||||
// Should never happen
|
||||
b.CLI.Output(fmt.Sprintf("(schema missing for %s)\n", rcs.ProviderAddr))
|
||||
ui.Output(fmt.Sprintf("(schema missing for %s)\n", rcs.ProviderAddr))
|
||||
continue
|
||||
}
|
||||
rSchema, _ := providerSchema.SchemaForResourceAddr(rcs.Addr.Resource.Resource)
|
||||
if rSchema == nil {
|
||||
// Should never happen
|
||||
b.CLI.Output(fmt.Sprintf("(schema missing for %s)\n", rcs.Addr))
|
||||
ui.Output(fmt.Sprintf("(schema missing for %s)\n", rcs.Addr))
|
||||
continue
|
||||
}
|
||||
b.CLI.Output(format.ResourceChange(
|
||||
|
||||
// check if the change is due to a tainted resource
|
||||
tainted := false
|
||||
if !baseState.Empty() {
|
||||
if is := baseState.ResourceInstance(rcs.Addr); is != nil {
|
||||
if obj := is.GetGeneration(rcs.DeposedKey.Generation()); obj != nil {
|
||||
tainted = obj.Status == states.ObjectTainted
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
ui.Output(format.ResourceChange(
|
||||
rcs,
|
||||
tainted,
|
||||
rSchema,
|
||||
b.CLIColor,
|
||||
colorize,
|
||||
))
|
||||
}
|
||||
|
||||
|
@ -275,23 +306,19 @@ func (b *Local) renderPlan(plan *plans.Plan, schemas *terraform.Schemas) {
|
|||
stats[change.Action]++
|
||||
}
|
||||
}
|
||||
b.CLI.Output(b.Colorize().Color(fmt.Sprintf(
|
||||
ui.Output(colorize.Color(fmt.Sprintf(
|
||||
"[reset][bold]Plan:[reset] "+
|
||||
"%d to add, %d to change, %d to destroy.",
|
||||
stats[plans.Create], stats[plans.Update], stats[plans.Delete],
|
||||
)))
|
||||
|
||||
// If there is at least one planned change to the root module outputs
|
||||
// then we'll render a summary of those too.
|
||||
if len(plan.Changes.Outputs) > 0 {
|
||||
ui.Output(colorize.Color("[reset]\n[bold]Changes to Outputs:[reset]" + format.OutputChanges(plan.Changes.Outputs, colorize)))
|
||||
}
|
||||
}
|
||||
|
||||
const planErrNoConfig = `
|
||||
No configuration files found!
|
||||
|
||||
Plan requires configuration to be present. Planning without a configuration
|
||||
would mark everything for destruction, which is normally not what is desired.
|
||||
If you would like to destroy everything, please run plan with the "-destroy"
|
||||
flag or create a single empty configuration file. Otherwise, please create
|
||||
a Terraform configuration file in the path being executed and try again.
|
||||
`
|
||||
|
||||
const planHeaderIntro = `
|
||||
An execution plan has been generated and is shown below.
|
||||
Resource actions are indicated with the following symbols:
|
||||
|
|
|
@ -25,7 +25,7 @@ func TestLocal_planBasic(t *testing.T) {
|
|||
defer cleanup()
|
||||
p := TestLocalProvider(t, b, "test", planFixtureSchema())
|
||||
|
||||
op, configCleanup := testOperationPlan(t, "./test-fixtures/plan")
|
||||
op, configCleanup := testOperationPlan(t, "./testdata/plan")
|
||||
defer configCleanup()
|
||||
op.PlanRefresh = true
|
||||
|
||||
|
@ -41,6 +41,9 @@ func TestLocal_planBasic(t *testing.T) {
|
|||
if !p.PlanResourceChangeCalled {
|
||||
t.Fatal("PlanResourceChange should be called")
|
||||
}
|
||||
|
||||
// the backend should be unlocked after a run
|
||||
assertBackendStateUnlocked(t, b)
|
||||
}
|
||||
|
||||
func TestLocal_planInAutomation(t *testing.T) {
|
||||
|
@ -59,7 +62,7 @@ func TestLocal_planInAutomation(t *testing.T) {
|
|||
b.RunningInAutomation = false
|
||||
b.CLI = cli.NewMockUi()
|
||||
{
|
||||
op, configCleanup := testOperationPlan(t, "./test-fixtures/plan")
|
||||
op, configCleanup := testOperationPlan(t, "./testdata/plan")
|
||||
defer configCleanup()
|
||||
op.PlanRefresh = true
|
||||
|
||||
|
@ -83,7 +86,7 @@ func TestLocal_planInAutomation(t *testing.T) {
|
|||
b.RunningInAutomation = true
|
||||
b.CLI = cli.NewMockUi()
|
||||
{
|
||||
op, configCleanup := testOperationPlan(t, "./test-fixtures/plan")
|
||||
op, configCleanup := testOperationPlan(t, "./testdata/plan")
|
||||
defer configCleanup()
|
||||
op.PlanRefresh = true
|
||||
|
||||
|
@ -111,7 +114,7 @@ func TestLocal_planNoConfig(t *testing.T) {
|
|||
|
||||
b.CLI = cli.NewMockUi()
|
||||
|
||||
op, configCleanup := testOperationPlan(t, "./test-fixtures/empty")
|
||||
op, configCleanup := testOperationPlan(t, "./testdata/empty")
|
||||
defer configCleanup()
|
||||
op.PlanRefresh = true
|
||||
|
||||
|
@ -128,6 +131,114 @@ func TestLocal_planNoConfig(t *testing.T) {
|
|||
if !strings.Contains(output, "configuration") {
|
||||
t.Fatalf("bad: %s", err)
|
||||
}
|
||||
|
||||
// the backend should be unlocked after a run
|
||||
assertBackendStateUnlocked(t, b)
|
||||
}
|
||||
|
||||
// This test validates the state lacking behavior when the inner call to
|
||||
// Context() fails
|
||||
func TestLocal_plan_context_error(t *testing.T) {
|
||||
b, cleanup := TestLocal(t)
|
||||
defer cleanup()
|
||||
|
||||
op, configCleanup := testOperationPlan(t, "./testdata/plan")
|
||||
defer configCleanup()
|
||||
op.PlanRefresh = true
|
||||
|
||||
// we coerce a failure in Context() by omitting the provider schema
|
||||
run, err := b.Operation(context.Background(), op)
|
||||
if err != nil {
|
||||
t.Fatalf("bad: %s", err)
|
||||
}
|
||||
<-run.Done()
|
||||
if run.Result != backend.OperationFailure {
|
||||
t.Fatalf("plan operation succeeded")
|
||||
}
|
||||
|
||||
// the backend should be unlocked after a run
|
||||
assertBackendStateUnlocked(t, b)
|
||||
}
|
||||
|
||||
func TestLocal_planOutputsChanged(t *testing.T) {
|
||||
b, cleanup := TestLocal(t)
|
||||
defer cleanup()
|
||||
testStateFile(t, b.StatePath, states.BuildState(func(ss *states.SyncState) {
|
||||
ss.SetOutputValue(addrs.AbsOutputValue{
|
||||
Module: addrs.RootModuleInstance,
|
||||
OutputValue: addrs.OutputValue{Name: "changed"},
|
||||
}, cty.StringVal("before"), false)
|
||||
ss.SetOutputValue(addrs.AbsOutputValue{
|
||||
Module: addrs.RootModuleInstance,
|
||||
OutputValue: addrs.OutputValue{Name: "sensitive_before"},
|
||||
}, cty.StringVal("before"), true)
|
||||
ss.SetOutputValue(addrs.AbsOutputValue{
|
||||
Module: addrs.RootModuleInstance,
|
||||
OutputValue: addrs.OutputValue{Name: "sensitive_after"},
|
||||
}, cty.StringVal("before"), false)
|
||||
ss.SetOutputValue(addrs.AbsOutputValue{
|
||||
Module: addrs.RootModuleInstance,
|
||||
OutputValue: addrs.OutputValue{Name: "removed"}, // not present in the config fixture
|
||||
}, cty.StringVal("before"), false)
|
||||
ss.SetOutputValue(addrs.AbsOutputValue{
|
||||
Module: addrs.RootModuleInstance,
|
||||
OutputValue: addrs.OutputValue{Name: "unchanged"},
|
||||
}, cty.StringVal("before"), false)
|
||||
// NOTE: This isn't currently testing the situation where the new
|
||||
// value of an output is unknown, because to do that requires there to
|
||||
// be at least one managed resource Create action in the plan and that
|
||||
// would defeat the point of this test, which is to ensure that a
|
||||
// plan containing only output changes is considered "non-empty".
|
||||
// For now we're not too worried about testing the "new value is
|
||||
// unknown" situation because that's already common for printing out
|
||||
// resource changes and we already have many tests for that.
|
||||
}))
|
||||
b.CLI = cli.NewMockUi()
|
||||
outDir := testTempDir(t)
|
||||
defer os.RemoveAll(outDir)
|
||||
planPath := filepath.Join(outDir, "plan.tfplan")
|
||||
op, configCleanup := testOperationPlan(t, "./testdata/plan-outputs-changed")
|
||||
defer configCleanup()
|
||||
op.PlanRefresh = true
|
||||
op.PlanOutPath = planPath
|
||||
cfg := cty.ObjectVal(map[string]cty.Value{
|
||||
"path": cty.StringVal(b.StatePath),
|
||||
})
|
||||
cfgRaw, err := plans.NewDynamicValue(cfg, cfg.Type())
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
op.PlanOutBackend = &plans.Backend{
|
||||
// Just a placeholder so that we can generate a valid plan file.
|
||||
Type: "local",
|
||||
Config: cfgRaw,
|
||||
}
|
||||
run, err := b.Operation(context.Background(), op)
|
||||
if err != nil {
|
||||
t.Fatalf("bad: %s", err)
|
||||
}
|
||||
<-run.Done()
|
||||
if run.Result != backend.OperationSuccess {
|
||||
t.Fatalf("plan operation failed")
|
||||
}
|
||||
if run.PlanEmpty {
|
||||
t.Fatal("plan should not be empty")
|
||||
}
|
||||
|
||||
expectedOutput := strings.TrimSpace(`
|
||||
Plan: 0 to add, 0 to change, 0 to destroy.
|
||||
|
||||
Changes to Outputs:
|
||||
+ added = "after"
|
||||
~ changed = "before" -> "after"
|
||||
- removed = "before" -> null
|
||||
~ sensitive_after = (sensitive value)
|
||||
~ sensitive_before = (sensitive value)
|
||||
`)
|
||||
output := b.CLI.(*cli.MockUi).OutputWriter.String()
|
||||
if !strings.Contains(output, expectedOutput) {
|
||||
t.Fatalf("Unexpected output:\n%s\n\nwant output containing:\n%s", output, expectedOutput)
|
||||
}
|
||||
}
|
||||
|
||||
func TestLocal_planTainted(t *testing.T) {
|
||||
|
@ -139,7 +250,7 @@ func TestLocal_planTainted(t *testing.T) {
|
|||
outDir := testTempDir(t)
|
||||
defer os.RemoveAll(outDir)
|
||||
planPath := filepath.Join(outDir, "plan.tfplan")
|
||||
op, configCleanup := testOperationPlan(t, "./test-fixtures/plan")
|
||||
op, configCleanup := testOperationPlan(t, "./testdata/plan")
|
||||
defer configCleanup()
|
||||
op.PlanRefresh = true
|
||||
op.PlanOutPath = planPath
|
||||
|
@ -176,14 +287,11 @@ Resource actions are indicated with the following symbols:
|
|||
|
||||
Terraform will perform the following actions:
|
||||
|
||||
# test_instance.foo must be replaced
|
||||
# test_instance.foo is tainted, so must be replaced
|
||||
-/+ resource "test_instance" "foo" {
|
||||
ami = "bar"
|
||||
# (1 unchanged attribute hidden)
|
||||
|
||||
network_interface {
|
||||
description = "Main network interface"
|
||||
device_index = 0
|
||||
}
|
||||
# (1 unchanged block hidden)
|
||||
}
|
||||
|
||||
Plan: 1 to add, 0 to change, 1 to destroy.`
|
||||
|
@ -193,6 +301,122 @@ Plan: 1 to add, 0 to change, 1 to destroy.`
|
|||
}
|
||||
}
|
||||
|
||||
func TestLocal_planDeposedOnly(t *testing.T) {
|
||||
b, cleanup := TestLocal(t)
|
||||
defer cleanup()
|
||||
p := TestLocalProvider(t, b, "test", planFixtureSchema())
|
||||
testStateFile(t, b.StatePath, states.BuildState(func(ss *states.SyncState) {
|
||||
ss.SetResourceInstanceDeposed(
|
||||
addrs.Resource{
|
||||
Mode: addrs.ManagedResourceMode,
|
||||
Type: "test_instance",
|
||||
Name: "foo",
|
||||
}.Instance(addrs.NoKey).Absolute(addrs.RootModuleInstance),
|
||||
states.DeposedKey("00000000"),
|
||||
&states.ResourceInstanceObjectSrc{
|
||||
Status: states.ObjectReady,
|
||||
AttrsJSON: []byte(`{
|
||||
"ami": "bar",
|
||||
"network_interface": [{
|
||||
"device_index": 0,
|
||||
"description": "Main network interface"
|
||||
}]
|
||||
}`),
|
||||
},
|
||||
addrs.AbsProviderConfig{
|
||||
Provider: addrs.NewDefaultProvider("test"),
|
||||
Module: addrs.RootModule,
|
||||
},
|
||||
)
|
||||
}))
|
||||
b.CLI = cli.NewMockUi()
|
||||
outDir := testTempDir(t)
|
||||
defer os.RemoveAll(outDir)
|
||||
planPath := filepath.Join(outDir, "plan.tfplan")
|
||||
op, configCleanup := testOperationPlan(t, "./testdata/plan")
|
||||
defer configCleanup()
|
||||
op.PlanRefresh = true
|
||||
op.PlanOutPath = planPath
|
||||
cfg := cty.ObjectVal(map[string]cty.Value{
|
||||
"path": cty.StringVal(b.StatePath),
|
||||
})
|
||||
cfgRaw, err := plans.NewDynamicValue(cfg, cfg.Type())
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
op.PlanOutBackend = &plans.Backend{
|
||||
// Just a placeholder so that we can generate a valid plan file.
|
||||
Type: "local",
|
||||
Config: cfgRaw,
|
||||
}
|
||||
run, err := b.Operation(context.Background(), op)
|
||||
if err != nil {
|
||||
t.Fatalf("bad: %s", err)
|
||||
}
|
||||
<-run.Done()
|
||||
if run.Result != backend.OperationSuccess {
|
||||
t.Fatalf("plan operation failed")
|
||||
}
|
||||
if p.ReadResourceCalled {
|
||||
t.Fatal("ReadResource should not be called")
|
||||
}
|
||||
if run.PlanEmpty {
|
||||
t.Fatal("plan should not be empty")
|
||||
}
|
||||
|
||||
// The deposed object and the current object are distinct, so our
|
||||
// plan includes separate actions for each of them. This strange situation
|
||||
// is not common: it should arise only if Terraform fails during
|
||||
// a create-before-destroy when the create hasn't completed yet but
|
||||
// in a severe way that prevents the previous object from being restored
|
||||
// as "current".
|
||||
//
|
||||
// However, that situation was more common in some earlier Terraform
|
||||
// versions where deposed objects were not managed properly, so this
|
||||
// can arise when upgrading from an older version with deposed objects
|
||||
// already in the state.
|
||||
//
|
||||
// This is one of the few cases where we expose the idea of "deposed" in
|
||||
// the UI, including the user-unfriendly "deposed key" (00000000 in this
|
||||
// case) just so that users can correlate this with what they might
|
||||
// see in `terraform show` and in the subsequent apply output, because
|
||||
// it's also possible for there to be _multiple_ deposed objects, in the
|
||||
// unlikely event that create_before_destroy _keeps_ crashing across
|
||||
// subsequent runs.
|
||||
expectedOutput := `An execution plan has been generated and is shown below.
|
||||
Resource actions are indicated with the following symbols:
|
||||
+ create
|
||||
- destroy
|
||||
|
||||
Terraform will perform the following actions:
|
||||
|
||||
# test_instance.foo will be created
|
||||
+ resource "test_instance" "foo" {
|
||||
+ ami = "bar"
|
||||
|
||||
+ network_interface {
|
||||
+ description = "Main network interface"
|
||||
+ device_index = 0
|
||||
}
|
||||
}
|
||||
|
||||
# test_instance.foo (deposed object 00000000) will be destroyed
|
||||
- resource "test_instance" "foo" {
|
||||
- ami = "bar" -> null
|
||||
|
||||
- network_interface {
|
||||
- description = "Main network interface" -> null
|
||||
- device_index = 0 -> null
|
||||
}
|
||||
}
|
||||
|
||||
Plan: 1 to add, 0 to change, 1 to destroy.`
|
||||
output := b.CLI.(*cli.MockUi).OutputWriter.String()
|
||||
if !strings.Contains(output, expectedOutput) {
|
||||
t.Fatalf("Unexpected output:\n%s\n\nwant output containing:\n%s", output, expectedOutput)
|
||||
}
|
||||
}
|
||||
|
||||
func TestLocal_planTainted_createBeforeDestroy(t *testing.T) {
|
||||
b, cleanup := TestLocal(t)
|
||||
defer cleanup()
|
||||
|
@ -202,7 +426,7 @@ func TestLocal_planTainted_createBeforeDestroy(t *testing.T) {
|
|||
outDir := testTempDir(t)
|
||||
defer os.RemoveAll(outDir)
|
||||
planPath := filepath.Join(outDir, "plan.tfplan")
|
||||
op, configCleanup := testOperationPlan(t, "./test-fixtures/plan-cbd")
|
||||
op, configCleanup := testOperationPlan(t, "./testdata/plan-cbd")
|
||||
defer configCleanup()
|
||||
op.PlanRefresh = true
|
||||
op.PlanOutPath = planPath
|
||||
|
@ -239,14 +463,11 @@ Resource actions are indicated with the following symbols:
|
|||
|
||||
Terraform will perform the following actions:
|
||||
|
||||
# test_instance.foo must be replaced
|
||||
# test_instance.foo is tainted, so must be replaced
|
||||
+/- resource "test_instance" "foo" {
|
||||
ami = "bar"
|
||||
# (1 unchanged attribute hidden)
|
||||
|
||||
network_interface {
|
||||
description = "Main network interface"
|
||||
device_index = 0
|
||||
}
|
||||
# (1 unchanged block hidden)
|
||||
}
|
||||
|
||||
Plan: 1 to add, 0 to change, 1 to destroy.`
|
||||
|
@ -263,7 +484,7 @@ func TestLocal_planRefreshFalse(t *testing.T) {
|
|||
p := TestLocalProvider(t, b, "test", planFixtureSchema())
|
||||
testStateFile(t, b.StatePath, testPlanState())
|
||||
|
||||
op, configCleanup := testOperationPlan(t, "./test-fixtures/plan")
|
||||
op, configCleanup := testOperationPlan(t, "./testdata/plan")
|
||||
defer configCleanup()
|
||||
|
||||
run, err := b.Operation(context.Background(), op)
|
||||
|
@ -295,7 +516,7 @@ func TestLocal_planDestroy(t *testing.T) {
|
|||
defer os.RemoveAll(outDir)
|
||||
planPath := filepath.Join(outDir, "plan.tfplan")
|
||||
|
||||
op, configCleanup := testOperationPlan(t, "./test-fixtures/plan")
|
||||
op, configCleanup := testOperationPlan(t, "./testdata/plan")
|
||||
defer configCleanup()
|
||||
op.Destroy = true
|
||||
op.PlanRefresh = true
|
||||
|
@ -322,8 +543,8 @@ func TestLocal_planDestroy(t *testing.T) {
|
|||
t.Fatalf("plan operation failed")
|
||||
}
|
||||
|
||||
if !p.ReadResourceCalled {
|
||||
t.Fatal("ReadResource should be called")
|
||||
if p.ReadResourceCalled {
|
||||
t.Fatal("ReadResource should not be called")
|
||||
}
|
||||
|
||||
if run.PlanEmpty {
|
||||
|
@ -351,7 +572,7 @@ func TestLocal_planDestroy_withDataSources(t *testing.T) {
|
|||
defer os.RemoveAll(outDir)
|
||||
planPath := filepath.Join(outDir, "plan.tfplan")
|
||||
|
||||
op, configCleanup := testOperationPlan(t, "./test-fixtures/destroy-with-ds")
|
||||
op, configCleanup := testOperationPlan(t, "./testdata/destroy-with-ds")
|
||||
defer configCleanup()
|
||||
op.Destroy = true
|
||||
op.PlanRefresh = true
|
||||
|
@ -378,12 +599,12 @@ func TestLocal_planDestroy_withDataSources(t *testing.T) {
|
|||
t.Fatalf("plan operation failed")
|
||||
}
|
||||
|
||||
if !p.ReadResourceCalled {
|
||||
t.Fatal("ReadResource should be called")
|
||||
if p.ReadResourceCalled {
|
||||
t.Fatal("ReadResource should not be called")
|
||||
}
|
||||
|
||||
if !p.ReadDataSourceCalled {
|
||||
t.Fatal("ReadDataSourceCalled should be called")
|
||||
if p.ReadDataSourceCalled {
|
||||
t.Fatal("ReadDataSourceCalled should not be called")
|
||||
}
|
||||
|
||||
if run.PlanEmpty {
|
||||
|
@ -400,7 +621,7 @@ func TestLocal_planDestroy_withDataSources(t *testing.T) {
|
|||
// Data source should not be rendered in the output
|
||||
expectedOutput := `Terraform will perform the following actions:
|
||||
|
||||
# test_instance.foo will be destroyed
|
||||
# test_instance.foo[0] will be destroyed
|
||||
- resource "test_instance" "foo" {
|
||||
- ami = "bar" -> null
|
||||
|
||||
|
@ -414,7 +635,7 @@ Plan: 0 to add, 0 to change, 1 to destroy.`
|
|||
|
||||
output := b.CLI.(*cli.MockUi).OutputWriter.String()
|
||||
if !strings.Contains(output, expectedOutput) {
|
||||
t.Fatalf("Unexpected output (expected no data source):\n%s", output)
|
||||
t.Fatalf("Unexpected output:\n%s", output)
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -436,7 +657,7 @@ func TestLocal_planOutPathNoChange(t *testing.T) {
|
|||
defer os.RemoveAll(outDir)
|
||||
planPath := filepath.Join(outDir, "plan.tfplan")
|
||||
|
||||
op, configCleanup := testOperationPlan(t, "./test-fixtures/plan")
|
||||
op, configCleanup := testOperationPlan(t, "./testdata/plan")
|
||||
defer configCleanup()
|
||||
op.PlanOutPath = planPath
|
||||
cfg := cty.ObjectVal(map[string]cty.Value{
|
||||
|
@ -451,6 +672,7 @@ func TestLocal_planOutPathNoChange(t *testing.T) {
|
|||
Type: "local",
|
||||
Config: cfgRaw,
|
||||
}
|
||||
op.PlanRefresh = true
|
||||
|
||||
run, err := b.Operation(context.Background(), op)
|
||||
if err != nil {
|
||||
|
@ -486,7 +708,7 @@ func TestLocal_planScaleOutNoDupeCount(t *testing.T) {
|
|||
outDir := testTempDir(t)
|
||||
defer os.RemoveAll(outDir)
|
||||
|
||||
op, configCleanup := testOperationPlan(t, "./test-fixtures/plan-scaleout")
|
||||
op, configCleanup := testOperationPlan(t, "./testdata/plan-scaleout")
|
||||
defer configCleanup()
|
||||
op.PlanRefresh = true
|
||||
|
||||
|
@ -543,9 +765,10 @@ func testPlanState() *states.State {
|
|||
}]
|
||||
}`),
|
||||
},
|
||||
addrs.ProviderConfig{
|
||||
Type: "test",
|
||||
}.Absolute(addrs.RootModuleInstance),
|
||||
addrs.AbsProviderConfig{
|
||||
Provider: addrs.NewDefaultProvider("test"),
|
||||
Module: addrs.RootModule,
|
||||
},
|
||||
)
|
||||
return state
|
||||
}
|
||||
|
@ -569,9 +792,10 @@ func testPlanState_withDataSource() *states.State {
|
|||
}]
|
||||
}`),
|
||||
},
|
||||
addrs.ProviderConfig{
|
||||
Type: "test",
|
||||
}.Absolute(addrs.RootModuleInstance),
|
||||
addrs.AbsProviderConfig{
|
||||
Provider: addrs.NewDefaultProvider("test"),
|
||||
Module: addrs.RootModule,
|
||||
},
|
||||
)
|
||||
rootModule.SetResourceInstanceCurrent(
|
||||
addrs.Resource{
|
||||
|
@ -585,9 +809,10 @@ func testPlanState_withDataSource() *states.State {
|
|||
"filter": "foo"
|
||||
}`),
|
||||
},
|
||||
addrs.ProviderConfig{
|
||||
Type: "test",
|
||||
}.Absolute(addrs.RootModuleInstance),
|
||||
addrs.AbsProviderConfig{
|
||||
Provider: addrs.NewDefaultProvider("test"),
|
||||
Module: addrs.RootModule,
|
||||
},
|
||||
)
|
||||
return state
|
||||
}
|
||||
|
@ -600,7 +825,7 @@ func testPlanState_tainted() *states.State {
|
|||
Mode: addrs.ManagedResourceMode,
|
||||
Type: "test_instance",
|
||||
Name: "foo",
|
||||
}.Instance(addrs.IntKey(0)),
|
||||
}.Instance(addrs.NoKey),
|
||||
&states.ResourceInstanceObjectSrc{
|
||||
Status: states.ObjectTainted,
|
||||
AttrsJSON: []byte(`{
|
||||
|
@ -611,9 +836,10 @@ func testPlanState_tainted() *states.State {
|
|||
}]
|
||||
}`),
|
||||
},
|
||||
addrs.ProviderConfig{
|
||||
Type: "test",
|
||||
}.Absolute(addrs.RootModuleInstance),
|
||||
addrs.AbsProviderConfig{
|
||||
Provider: addrs.NewDefaultProvider("test"),
|
||||
Module: addrs.RootModule,
|
||||
},
|
||||
)
|
||||
return state
|
||||
}
|
||||
|
@ -636,7 +862,7 @@ func testReadPlan(t *testing.T, path string) *plans.Plan {
|
|||
}
|
||||
|
||||
// planFixtureSchema returns a schema suitable for processing the
|
||||
// configuration in test-fixtures/plan . This schema should be
|
||||
// configuration in testdata/plan . This schema should be
|
||||
// assigned to a mock provider named "test".
|
||||
func planFixtureSchema() *terraform.ProviderSchema {
|
||||
return &terraform.ProviderSchema{
|
||||
|
|
|
@ -42,6 +42,9 @@ func (b *Local) opRefresh(
|
|||
}
|
||||
}
|
||||
|
||||
// Refresh now happens via a plan, so we need to ensure this is enabled
|
||||
op.PlanRefresh = true
|
||||
|
||||
// Get our context
|
||||
tfCtx, _, opState, contextDiags := b.context(op)
|
||||
diags = diags.Append(contextDiags)
|
||||
|
@ -50,6 +53,16 @@ func (b *Local) opRefresh(
|
|||
return
|
||||
}
|
||||
|
||||
// the state was locked during succesfull context creation; unlock the state
|
||||
// when the operation completes
|
||||
defer func() {
|
||||
err := op.StateLocker.Unlock(nil)
|
||||
if err != nil {
|
||||
b.ShowDiagnostics(err)
|
||||
runningOp.Result = backend.OperationFailure
|
||||
}
|
||||
}()
|
||||
|
||||
// Set our state
|
||||
runningOp.State = opState.State()
|
||||
if !runningOp.State.HasResources() {
|
||||
|
|
|
@ -5,12 +5,14 @@ import (
|
|||
"fmt"
|
||||
"testing"
|
||||
|
||||
"github.com/hashicorp/terraform/providers"
|
||||
|
||||
"github.com/hashicorp/terraform/addrs"
|
||||
"github.com/hashicorp/terraform/backend"
|
||||
"github.com/hashicorp/terraform/configs/configschema"
|
||||
"github.com/hashicorp/terraform/internal/initwd"
|
||||
"github.com/hashicorp/terraform/providers"
|
||||
"github.com/hashicorp/terraform/states"
|
||||
"github.com/hashicorp/terraform/terraform"
|
||||
|
||||
"github.com/zclconf/go-cty/cty"
|
||||
)
|
||||
|
||||
|
@ -19,14 +21,14 @@ func TestLocal_refresh(t *testing.T) {
|
|||
defer cleanup()
|
||||
|
||||
p := TestLocalProvider(t, b, "test", refreshFixtureSchema())
|
||||
terraform.TestStateFile(t, b.StatePath, testRefreshState())
|
||||
testStateFile(t, b.StatePath, testRefreshState())
|
||||
|
||||
p.ReadResourceFn = nil
|
||||
p.ReadResourceResponse = providers.ReadResourceResponse{NewState: cty.ObjectVal(map[string]cty.Value{
|
||||
"id": cty.StringVal("yes"),
|
||||
})}
|
||||
|
||||
op, configCleanup := testOperationRefresh(t, "./test-fixtures/refresh")
|
||||
op, configCleanup := testOperationRefresh(t, "./testdata/refresh")
|
||||
defer configCleanup()
|
||||
|
||||
run, err := b.Operation(context.Background(), op)
|
||||
|
@ -42,80 +44,18 @@ func TestLocal_refresh(t *testing.T) {
|
|||
checkState(t, b.StateOutPath, `
|
||||
test_instance.foo:
|
||||
ID = yes
|
||||
provider = provider.test
|
||||
provider = provider["registry.terraform.io/hashicorp/test"]
|
||||
`)
|
||||
}
|
||||
|
||||
func TestLocal_refreshNoConfig(t *testing.T) {
|
||||
b, cleanup := TestLocal(t)
|
||||
defer cleanup()
|
||||
p := TestLocalProvider(t, b, "test", refreshFixtureSchema())
|
||||
terraform.TestStateFile(t, b.StatePath, testRefreshState())
|
||||
p.ReadResourceFn = nil
|
||||
p.ReadResourceResponse = providers.ReadResourceResponse{NewState: cty.ObjectVal(map[string]cty.Value{
|
||||
"id": cty.StringVal("yes"),
|
||||
})}
|
||||
|
||||
op, configCleanup := testOperationRefresh(t, "./test-fixtures/empty")
|
||||
defer configCleanup()
|
||||
|
||||
run, err := b.Operation(context.Background(), op)
|
||||
if err != nil {
|
||||
t.Fatalf("bad: %s", err)
|
||||
}
|
||||
<-run.Done()
|
||||
|
||||
if !p.ReadResourceCalled {
|
||||
t.Fatal("ReadResource should be called")
|
||||
}
|
||||
|
||||
checkState(t, b.StateOutPath, `
|
||||
test_instance.foo:
|
||||
ID = yes
|
||||
provider = provider.test
|
||||
`)
|
||||
}
|
||||
|
||||
// GH-12174
|
||||
func TestLocal_refreshNilModuleWithInput(t *testing.T) {
|
||||
b, cleanup := TestLocal(t)
|
||||
defer cleanup()
|
||||
p := TestLocalProvider(t, b, "test", refreshFixtureSchema())
|
||||
terraform.TestStateFile(t, b.StatePath, testRefreshState())
|
||||
p.ReadResourceFn = nil
|
||||
p.ReadResourceResponse = providers.ReadResourceResponse{NewState: cty.ObjectVal(map[string]cty.Value{
|
||||
"id": cty.StringVal("yes"),
|
||||
})}
|
||||
|
||||
b.OpInput = true
|
||||
|
||||
op, configCleanup := testOperationRefresh(t, "./test-fixtures/empty")
|
||||
defer configCleanup()
|
||||
|
||||
run, err := b.Operation(context.Background(), op)
|
||||
if err != nil {
|
||||
t.Fatalf("bad: %s", err)
|
||||
}
|
||||
<-run.Done()
|
||||
|
||||
if !p.ReadResourceCalled {
|
||||
t.Fatal("ReadResource should be called")
|
||||
}
|
||||
|
||||
checkState(t, b.StateOutPath, `
|
||||
test_instance.foo:
|
||||
ID = yes
|
||||
provider = provider.test
|
||||
`)
|
||||
// the backend should be unlocked after a run
|
||||
assertBackendStateUnlocked(t, b)
|
||||
}
|
||||
|
||||
func TestLocal_refreshInput(t *testing.T) {
|
||||
b, cleanup := TestLocal(t)
|
||||
defer cleanup()
|
||||
p := TestLocalProvider(t, b, "test", refreshFixtureSchema())
|
||||
terraform.TestStateFile(t, b.StatePath, testRefreshState())
|
||||
|
||||
p.GetSchemaReturn = &terraform.ProviderSchema{
|
||||
schema := &terraform.ProviderSchema{
|
||||
Provider: &configschema.Block{
|
||||
Attributes: map[string]*configschema.Attribute{
|
||||
"value": {Type: cty.String, Optional: true},
|
||||
|
@ -124,29 +64,35 @@ func TestLocal_refreshInput(t *testing.T) {
|
|||
ResourceTypes: map[string]*configschema.Block{
|
||||
"test_instance": {
|
||||
Attributes: map[string]*configschema.Attribute{
|
||||
"id": {Type: cty.String, Computed: true},
|
||||
"foo": {Type: cty.String, Optional: true},
|
||||
"id": {Type: cty.String, Optional: true},
|
||||
"ami": {Type: cty.String, Optional: true},
|
||||
},
|
||||
},
|
||||
},
|
||||
}
|
||||
|
||||
p := TestLocalProvider(t, b, "test", schema)
|
||||
testStateFile(t, b.StatePath, testRefreshState())
|
||||
|
||||
p.ReadResourceFn = nil
|
||||
p.ReadResourceResponse = providers.ReadResourceResponse{NewState: cty.ObjectVal(map[string]cty.Value{
|
||||
"id": cty.StringVal("yes"),
|
||||
})}
|
||||
p.ConfigureFn = func(c *terraform.ResourceConfig) error {
|
||||
if v, ok := c.Get("value"); !ok || v != "bar" {
|
||||
return fmt.Errorf("no value set")
|
||||
p.ConfigureFn = func(req providers.ConfigureRequest) (resp providers.ConfigureResponse) {
|
||||
val := req.Config.GetAttr("value")
|
||||
if val.IsNull() || val.AsString() != "bar" {
|
||||
resp.Diagnostics = resp.Diagnostics.Append(fmt.Errorf("incorrect value %#v", val))
|
||||
}
|
||||
|
||||
return nil
|
||||
return
|
||||
}
|
||||
|
||||
// Enable input asking since it is normally disabled by default
|
||||
b.OpInput = true
|
||||
b.ContextOpts.UIInput = &terraform.MockUIInput{InputReturnString: "bar"}
|
||||
|
||||
op, configCleanup := testOperationRefresh(t, "./test-fixtures/refresh-var-unset")
|
||||
op, configCleanup := testOperationRefresh(t, "./testdata/refresh-var-unset")
|
||||
defer configCleanup()
|
||||
op.UIIn = b.ContextOpts.UIInput
|
||||
|
||||
|
@ -163,7 +109,7 @@ func TestLocal_refreshInput(t *testing.T) {
|
|||
checkState(t, b.StateOutPath, `
|
||||
test_instance.foo:
|
||||
ID = yes
|
||||
provider = provider.test
|
||||
provider = provider["registry.terraform.io/hashicorp/test"]
|
||||
`)
|
||||
}
|
||||
|
||||
|
@ -171,7 +117,7 @@ func TestLocal_refreshValidate(t *testing.T) {
|
|||
b, cleanup := TestLocal(t)
|
||||
defer cleanup()
|
||||
p := TestLocalProvider(t, b, "test", refreshFixtureSchema())
|
||||
terraform.TestStateFile(t, b.StatePath, testRefreshState())
|
||||
testStateFile(t, b.StatePath, testRefreshState())
|
||||
p.ReadResourceFn = nil
|
||||
p.ReadResourceResponse = providers.ReadResourceResponse{NewState: cty.ObjectVal(map[string]cty.Value{
|
||||
"id": cty.StringVal("yes"),
|
||||
|
@ -180,7 +126,7 @@ func TestLocal_refreshValidate(t *testing.T) {
|
|||
// Enable validation
|
||||
b.OpValidation = true
|
||||
|
||||
op, configCleanup := testOperationRefresh(t, "./test-fixtures/refresh")
|
||||
op, configCleanup := testOperationRefresh(t, "./testdata/refresh")
|
||||
defer configCleanup()
|
||||
|
||||
run, err := b.Operation(context.Background(), op)
|
||||
|
@ -196,10 +142,32 @@ func TestLocal_refreshValidate(t *testing.T) {
|
|||
checkState(t, b.StateOutPath, `
|
||||
test_instance.foo:
|
||||
ID = yes
|
||||
provider = provider.test
|
||||
provider = provider["registry.terraform.io/hashicorp/test"]
|
||||
`)
|
||||
}
|
||||
|
||||
// This test validates the state lacking behavior when the inner call to
|
||||
// Context() fails
|
||||
func TestLocal_refresh_context_error(t *testing.T) {
|
||||
b, cleanup := TestLocal(t)
|
||||
defer cleanup()
|
||||
testStateFile(t, b.StatePath, testRefreshState())
|
||||
op, configCleanup := testOperationRefresh(t, "./testdata/apply")
|
||||
defer configCleanup()
|
||||
|
||||
// we coerce a failure in Context() by omitting the provider schema
|
||||
|
||||
run, err := b.Operation(context.Background(), op)
|
||||
if err != nil {
|
||||
t.Fatalf("bad: %s", err)
|
||||
}
|
||||
<-run.Done()
|
||||
if run.Result == backend.OperationSuccess {
|
||||
t.Fatal("operation succeeded; want failure")
|
||||
}
|
||||
assertBackendStateUnlocked(t, b)
|
||||
}
|
||||
|
||||
func testOperationRefresh(t *testing.T, configDir string) (*backend.Operation, func()) {
|
||||
t.Helper()
|
||||
|
||||
|
@ -209,32 +177,27 @@ func testOperationRefresh(t *testing.T, configDir string) (*backend.Operation, f
|
|||
Type: backend.OperationTypeRefresh,
|
||||
ConfigDir: configDir,
|
||||
ConfigLoader: configLoader,
|
||||
LockState: true,
|
||||
}, configCleanup
|
||||
}
|
||||
|
||||
// testRefreshState is just a common state that we use for testing refresh.
|
||||
func testRefreshState() *terraform.State {
|
||||
return &terraform.State{
|
||||
Version: 2,
|
||||
Modules: []*terraform.ModuleState{
|
||||
&terraform.ModuleState{
|
||||
Path: []string{"root"},
|
||||
Resources: map[string]*terraform.ResourceState{
|
||||
"test_instance.foo": &terraform.ResourceState{
|
||||
Type: "test_instance",
|
||||
Primary: &terraform.InstanceState{
|
||||
ID: "bar",
|
||||
},
|
||||
},
|
||||
},
|
||||
Outputs: map[string]*terraform.OutputState{},
|
||||
},
|
||||
func testRefreshState() *states.State {
|
||||
state := states.NewState()
|
||||
root := state.EnsureModule(addrs.RootModuleInstance)
|
||||
root.SetResourceInstanceCurrent(
|
||||
mustResourceInstanceAddr("test_instance.foo").Resource,
|
||||
&states.ResourceInstanceObjectSrc{
|
||||
Status: states.ObjectReady,
|
||||
AttrsJSON: []byte(`{"id":"bar"}`),
|
||||
},
|
||||
}
|
||||
mustProviderConfig(`provider["registry.terraform.io/hashicorp/test"]`),
|
||||
)
|
||||
return state
|
||||
}
|
||||
|
||||
// refreshFixtureSchema returns a schema suitable for processing the
|
||||
// configuration in test-fixtures/refresh . This schema should be
|
||||
// configuration in testdata/refresh . This schema should be
|
||||
// assigned to a mock provider named "test".
|
||||
func refreshFixtureSchema() *terraform.ProviderSchema {
|
||||
return &terraform.ProviderSchema{
|
||||
|
|
|
@ -4,6 +4,15 @@ package local
|
|||
|
||||
import "strconv"
|
||||
|
||||
func _() {
|
||||
// An "invalid array index" compiler error signifies that the constant values have changed.
|
||||
// Re-run the stringer command to generate them again.
|
||||
var x [1]struct{}
|
||||
_ = x[countHookActionAdd-0]
|
||||
_ = x[countHookActionChange-1]
|
||||
_ = x[countHookActionRemove-2]
|
||||
}
|
||||
|
||||
const _countHookAction_name = "countHookActionAddcountHookActionChangecountHookActionRemove"
|
||||
|
||||
var _countHookAction_index = [...]uint8{0, 18, 39, 60}
|
||||
|
|
|
@ -1,6 +1,6 @@
|
|||
package local
|
||||
|
||||
//go:generate stringer -type=countHookAction hook_count_action.go
|
||||
//go:generate go run golang.org/x/tools/cmd/stringer -type=countHookAction hook_count_action.go
|
||||
|
||||
type countHookAction byte
|
||||
|
||||
|
|
|
@ -9,7 +9,7 @@ import (
|
|||
)
|
||||
|
||||
// StateHook is a hook that continuously updates the state by calling
|
||||
// WriteState on a state.State.
|
||||
// WriteState on a statemgr.Full.
|
||||
type StateHook struct {
|
||||
terraform.NilHook
|
||||
sync.Mutex
|
||||
|
|
|
@ -3,7 +3,6 @@ package local
|
|||
import (
|
||||
"testing"
|
||||
|
||||
"github.com/hashicorp/terraform/state"
|
||||
"github.com/hashicorp/terraform/states/statemgr"
|
||||
"github.com/hashicorp/terraform/terraform"
|
||||
)
|
||||
|
@ -16,7 +15,7 @@ func TestStateHook(t *testing.T) {
|
|||
is := statemgr.NewTransientInMemory(nil)
|
||||
var hook terraform.Hook = &StateHook{StateMgr: is}
|
||||
|
||||
s := state.TestStateInitial()
|
||||
s := statemgr.TestFullInitialState()
|
||||
action, err := hook.PostStateUpdate(s)
|
||||
if err != nil {
|
||||
t.Fatalf("err: %s", err)
|
||||
|
|
|
@ -1,9 +0,0 @@
|
|||
variable "should_ask" {}
|
||||
|
||||
provider "test" {
|
||||
value = "${var.should_ask}"
|
||||
}
|
||||
|
||||
resource "test_instance" "foo" {
|
||||
foo = "bar"
|
||||
}
|
|
@ -0,0 +1,24 @@
|
|||
output "changed" {
|
||||
value = "after"
|
||||
}
|
||||
|
||||
output "sensitive_before" {
|
||||
value = "after"
|
||||
# no sensitive = true here, but the prior state is marked as sensitive in the test code
|
||||
}
|
||||
|
||||
output "sensitive_after" {
|
||||
value = "after"
|
||||
|
||||
# This one is _not_ sensitive in the prior state, but is transitioning to
|
||||
# being sensitive in our new plan.
|
||||
sensitive = true
|
||||
}
|
||||
|
||||
output "added" { // not present in the prior state
|
||||
value = "after"
|
||||
}
|
||||
|
||||
output "unchanged" {
|
||||
value = "before"
|
||||
}
|
|
@ -0,0 +1,9 @@
|
|||
variable "should_ask" {}
|
||||
|
||||
provider "test" {
|
||||
value = var.should_ask
|
||||
}
|
||||
|
||||
resource "test_instance" "foo" {
|
||||
foo = "bar"
|
||||
}
|
|
@ -6,7 +6,11 @@ import (
|
|||
"path/filepath"
|
||||
"testing"
|
||||
|
||||
"github.com/zclconf/go-cty/cty"
|
||||
|
||||
"github.com/hashicorp/terraform/addrs"
|
||||
"github.com/hashicorp/terraform/backend"
|
||||
"github.com/hashicorp/terraform/configs/configschema"
|
||||
"github.com/hashicorp/terraform/providers"
|
||||
"github.com/hashicorp/terraform/states"
|
||||
"github.com/hashicorp/terraform/states/statemgr"
|
||||
|
@ -38,9 +42,14 @@ func TestLocal(t *testing.T) (*Local, func()) {
|
|||
// function, t.Helper doesn't apply and so the log source
|
||||
// isn't correctly shown in the test log output. This seems
|
||||
// unavoidable as long as this is happening so indirectly.
|
||||
t.Log(diag.Description().Summary)
|
||||
desc := diag.Description()
|
||||
if desc.Detail != "" {
|
||||
t.Logf("%s: %s", desc.Summary, desc.Detail)
|
||||
} else {
|
||||
t.Log(desc.Summary)
|
||||
}
|
||||
if local.CLI != nil {
|
||||
local.CLI.Error(diag.Description().Summary)
|
||||
local.CLI.Error(desc.Summary)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
@ -59,10 +68,34 @@ func TestLocal(t *testing.T) (*Local, func()) {
|
|||
func TestLocalProvider(t *testing.T, b *Local, name string, schema *terraform.ProviderSchema) *terraform.MockProvider {
|
||||
// Build a mock resource provider for in-memory operations
|
||||
p := new(terraform.MockProvider)
|
||||
|
||||
if schema == nil {
|
||||
schema = &terraform.ProviderSchema{} // default schema is empty
|
||||
}
|
||||
p.GetSchemaReturn = schema
|
||||
|
||||
p.PlanResourceChangeFn = func(req providers.PlanResourceChangeRequest) providers.PlanResourceChangeResponse {
|
||||
rSchema, _ := schema.SchemaForResourceType(addrs.ManagedResourceMode, req.TypeName)
|
||||
if rSchema == nil {
|
||||
rSchema = &configschema.Block{} // default schema is empty
|
||||
}
|
||||
plannedVals := map[string]cty.Value{}
|
||||
for name, attrS := range rSchema.Attributes {
|
||||
val := req.ProposedNewState.GetAttr(name)
|
||||
if attrS.Computed && val.IsNull() {
|
||||
val = cty.UnknownVal(attrS.Type)
|
||||
}
|
||||
plannedVals[name] = val
|
||||
}
|
||||
for name := range rSchema.BlockTypes {
|
||||
// For simplicity's sake we just copy the block attributes over
|
||||
// verbatim, since this package's mock providers are all relatively
|
||||
// simple -- we're testing the backend, not esoteric provider features.
|
||||
plannedVals[name] = req.ProposedNewState.GetAttr(name)
|
||||
}
|
||||
|
||||
return providers.PlanResourceChangeResponse{
|
||||
PlannedState: req.ProposedNewState,
|
||||
PlannedState: cty.ObjectVal(plannedVals),
|
||||
PlannedPrivate: req.PriorPrivate,
|
||||
}
|
||||
}
|
||||
|
@ -79,11 +112,9 @@ func TestLocalProvider(t *testing.T, b *Local, name string, schema *terraform.Pr
|
|||
}
|
||||
|
||||
// Setup our provider
|
||||
b.ContextOpts.ProviderResolver = providers.ResolverFixed(
|
||||
map[string]providers.Factory{
|
||||
name: providers.FactoryFixed(p),
|
||||
},
|
||||
)
|
||||
b.ContextOpts.Providers = map[addrs.Provider]providers.Factory{
|
||||
addrs.NewDefaultProvider(name): providers.FactoryFixed(p),
|
||||
}
|
||||
|
||||
return p
|
||||
|
||||
|
@ -178,3 +209,45 @@ func testStateFile(t *testing.T, path string, s *states.State) {
|
|||
stateFile := statemgr.NewFilesystem(path)
|
||||
stateFile.WriteState(s)
|
||||
}
|
||||
|
||||
func mustProviderConfig(s string) addrs.AbsProviderConfig {
|
||||
p, diags := addrs.ParseAbsProviderConfigStr(s)
|
||||
if diags.HasErrors() {
|
||||
panic(diags.Err())
|
||||
}
|
||||
return p
|
||||
}
|
||||
|
||||
func mustResourceInstanceAddr(s string) addrs.AbsResourceInstance {
|
||||
addr, diags := addrs.ParseAbsResourceInstanceStr(s)
|
||||
if diags.HasErrors() {
|
||||
panic(diags.Err())
|
||||
}
|
||||
return addr
|
||||
}
|
||||
|
||||
// assertBackendStateUnlocked attempts to lock the backend state. Failure
|
||||
// indicates that the state was indeed locked and therefore this function will
|
||||
// return true.
|
||||
func assertBackendStateUnlocked(t *testing.T, b *Local) bool {
|
||||
t.Helper()
|
||||
stateMgr, _ := b.StateMgr(backend.DefaultStateName)
|
||||
if _, err := stateMgr.Lock(statemgr.NewLockInfo()); err != nil {
|
||||
t.Errorf("state is already locked: %s", err.Error())
|
||||
return false
|
||||
}
|
||||
return true
|
||||
}
|
||||
|
||||
// assertBackendStateLocked attempts to lock the backend state. Failure
|
||||
// indicates that the state was already locked and therefore this function will
|
||||
// return false.
|
||||
func assertBackendStateLocked(t *testing.T, b *Local) bool {
|
||||
t.Helper()
|
||||
stateMgr, _ := b.StateMgr(backend.DefaultStateName)
|
||||
if _, err := stateMgr.Lock(statemgr.NewLockInfo()); err != nil {
|
||||
return true
|
||||
}
|
||||
t.Error("unexpected success locking state")
|
||||
return true
|
||||
}
|
||||
|
|
|
@ -1,40 +0,0 @@
|
|||
package backend
|
||||
|
||||
import (
|
||||
"github.com/hashicorp/terraform/configs/configschema"
|
||||
"github.com/hashicorp/terraform/states/statemgr"
|
||||
"github.com/hashicorp/terraform/tfdiags"
|
||||
"github.com/zclconf/go-cty/cty"
|
||||
)
|
||||
|
||||
// Nil is a no-op implementation of Backend.
|
||||
//
|
||||
// This is useful to embed within another struct to implement all of the
|
||||
// backend interface for testing.
|
||||
type Nil struct{}
|
||||
|
||||
func (Nil) ConfigSchema() *configschema.Block {
|
||||
return &configschema.Block{}
|
||||
}
|
||||
|
||||
func (Nil) ValidateConfig(cty.Value) tfdiags.Diagnostics {
|
||||
return nil
|
||||
}
|
||||
|
||||
func (Nil) Configure(cty.Value) tfdiags.Diagnostics {
|
||||
return nil
|
||||
}
|
||||
|
||||
func (Nil) StateMgr(string) (statemgr.Full, error) {
|
||||
// We must return a non-nil manager to adhere to the interface, so
|
||||
// we'll return an in-memory-only one.
|
||||
return statemgr.NewFullFake(statemgr.NewTransientInMemory(nil), nil), nil
|
||||
}
|
||||
|
||||
func (Nil) DeleteWorkspace(string) error {
|
||||
return nil
|
||||
}
|
||||
|
||||
func (Nil) Workspaces() ([]string, error) {
|
||||
return []string{DefaultStateName}, nil
|
||||
}
|
|
@ -1,9 +0,0 @@
|
|||
package backend
|
||||
|
||||
import (
|
||||
"testing"
|
||||
)
|
||||
|
||||
func TestNil_impl(t *testing.T) {
|
||||
var _ Backend = new(Nil)
|
||||
}
|
|
@ -1,6 +1,6 @@
|
|||
package backend
|
||||
|
||||
//go:generate stringer -type=OperationType operation_type.go
|
||||
//go:generate go run golang.org/x/tools/cmd/stringer -type=OperationType operation_type.go
|
||||
|
||||
// OperationType is an enum used with Operation to specify the operation
|
||||
// type to perform for Terraform.
|
||||
|
|
|
@ -4,6 +4,16 @@ package backend
|
|||
|
||||
import "strconv"
|
||||
|
||||
func _() {
|
||||
// An "invalid array index" compiler error signifies that the constant values have changed.
|
||||
// Re-run the stringer command to generate them again.
|
||||
var x [1]struct{}
|
||||
_ = x[OperationTypeInvalid-0]
|
||||
_ = x[OperationTypeRefresh-1]
|
||||
_ = x[OperationTypePlan-2]
|
||||
_ = x[OperationTypeApply-3]
|
||||
}
|
||||
|
||||
const _OperationType_name = "OperationTypeInvalidOperationTypeRefreshOperationTypePlanOperationTypeApply"
|
||||
|
||||
var _OperationType_index = [...]uint8{0, 20, 40, 57, 75}
|
||||
|
|
|
@ -3,10 +3,11 @@ package artifactory
|
|||
import (
|
||||
"context"
|
||||
|
||||
cleanhttp "github.com/hashicorp/go-cleanhttp"
|
||||
"github.com/hashicorp/terraform/backend"
|
||||
"github.com/hashicorp/terraform/helper/schema"
|
||||
"github.com/hashicorp/terraform/state"
|
||||
"github.com/hashicorp/terraform/state/remote"
|
||||
"github.com/hashicorp/terraform/states/remote"
|
||||
"github.com/hashicorp/terraform/states/statemgr"
|
||||
artifactory "github.com/lusis/go-artifactory/src/artifactory.v401"
|
||||
)
|
||||
|
||||
|
@ -65,9 +66,10 @@ func (b *Backend) configure(ctx context.Context) error {
|
|||
subpath := data.Get("subpath").(string)
|
||||
|
||||
clientConf := &artifactory.ClientConfig{
|
||||
BaseURL: url,
|
||||
Username: userName,
|
||||
Password: password,
|
||||
BaseURL: url,
|
||||
Username: userName,
|
||||
Password: password,
|
||||
Transport: cleanhttp.DefaultPooledTransport(),
|
||||
}
|
||||
nativeClient := artifactory.NewClient(clientConf)
|
||||
|
||||
|
@ -90,7 +92,7 @@ func (b *Backend) DeleteWorkspace(string) error {
|
|||
return backend.ErrWorkspacesNotSupported
|
||||
}
|
||||
|
||||
func (b *Backend) StateMgr(name string) (state.State, error) {
|
||||
func (b *Backend) StateMgr(name string) (statemgr.Full, error) {
|
||||
if name != backend.DefaultStateName {
|
||||
return nil, backend.ErrWorkspacesNotSupported
|
||||
}
|
||||
|
|
|
@ -5,7 +5,7 @@ import (
|
|||
"fmt"
|
||||
"strings"
|
||||
|
||||
"github.com/hashicorp/terraform/state/remote"
|
||||
"github.com/hashicorp/terraform/states/remote"
|
||||
artifactory "github.com/lusis/go-artifactory/src/artifactory.v401"
|
||||
)
|
||||
|
||||
|
|
|
@ -5,7 +5,7 @@ import (
|
|||
|
||||
"github.com/hashicorp/terraform/backend"
|
||||
"github.com/hashicorp/terraform/configs"
|
||||
"github.com/hashicorp/terraform/state/remote"
|
||||
"github.com/hashicorp/terraform/states/remote"
|
||||
"github.com/zclconf/go-cty/cty"
|
||||
)
|
||||
|
||||
|
|
|
@ -4,18 +4,18 @@ import (
|
|||
"context"
|
||||
"fmt"
|
||||
"log"
|
||||
"net/url"
|
||||
"os"
|
||||
"strings"
|
||||
"time"
|
||||
|
||||
"github.com/tombuildsstuff/giovanni/storage/2018-11-09/blob/blobs"
|
||||
"github.com/tombuildsstuff/giovanni/storage/2018-11-09/blob/containers"
|
||||
|
||||
"github.com/Azure/azure-sdk-for-go/profiles/2017-03-09/resources/mgmt/resources"
|
||||
armStorage "github.com/Azure/azure-sdk-for-go/profiles/2017-03-09/storage/mgmt/storage"
|
||||
"github.com/Azure/azure-sdk-for-go/storage"
|
||||
"github.com/Azure/go-autorest/autorest"
|
||||
"github.com/Azure/go-autorest/autorest/adal"
|
||||
"github.com/Azure/go-autorest/autorest/azure"
|
||||
"github.com/hashicorp/go-azure-helpers/authentication"
|
||||
"github.com/hashicorp/go-azure-helpers/sender"
|
||||
"github.com/hashicorp/terraform/httpclient"
|
||||
)
|
||||
|
||||
|
@ -23,6 +23,8 @@ type ArmClient struct {
|
|||
// These Clients are only initialized if an Access Key isn't provided
|
||||
groupsClient *resources.GroupsClient
|
||||
storageAccountsClient *armStorage.AccountsClient
|
||||
containersClient *containers.Client
|
||||
blobsClient *blobs.Client
|
||||
|
||||
accessKey string
|
||||
environment azure.Environment
|
||||
|
@ -31,8 +33,8 @@ type ArmClient struct {
|
|||
sasToken string
|
||||
}
|
||||
|
||||
func buildArmClient(config BackendConfig) (*ArmClient, error) {
|
||||
env, err := buildArmEnvironment(config)
|
||||
func buildArmClient(ctx context.Context, config BackendConfig) (*ArmClient, error) {
|
||||
env, err := authentication.AzureEnvironmentByNameFromEndpoint(ctx, config.MetadataHost, config.Environment)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
@ -57,30 +59,40 @@ func buildArmClient(config BackendConfig) (*ArmClient, error) {
|
|||
|
||||
builder := authentication.Builder{
|
||||
ClientID: config.ClientID,
|
||||
ClientSecret: config.ClientSecret,
|
||||
SubscriptionID: config.SubscriptionID,
|
||||
TenantID: config.TenantID,
|
||||
CustomResourceManagerEndpoint: config.CustomResourceManagerEndpoint,
|
||||
MetadataURL: config.MetadataHost,
|
||||
Environment: config.Environment,
|
||||
MsiEndpoint: config.MsiEndpoint,
|
||||
ClientSecretDocsLink: "https://www.terraform.io/docs/providers/azurerm/guides/service_principal_client_secret.html",
|
||||
|
||||
// Service Principal (Client Certificate)
|
||||
ClientCertPassword: config.ClientCertificatePassword,
|
||||
ClientCertPath: config.ClientCertificatePath,
|
||||
|
||||
// Service Principal (Client Secret)
|
||||
ClientSecret: config.ClientSecret,
|
||||
|
||||
// Managed Service Identity
|
||||
MsiEndpoint: config.MsiEndpoint,
|
||||
|
||||
// Feature Toggles
|
||||
SupportsAzureCliToken: true,
|
||||
SupportsClientCertAuth: true,
|
||||
SupportsClientSecretAuth: true,
|
||||
SupportsManagedServiceIdentity: config.UseMsi,
|
||||
// TODO: support for Client Certificate auth
|
||||
}
|
||||
armConfig, err := builder.Build()
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("Error building ARM Config: %+v", err)
|
||||
}
|
||||
|
||||
oauthConfig, err := adal.NewOAuthConfig(env.ActiveDirectoryEndpoint, armConfig.TenantID)
|
||||
oauthConfig, err := armConfig.BuildOAuthConfig(env.ActiveDirectoryEndpoint)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
auth, err := armConfig.GetAuthorizationToken(oauthConfig, env.TokenAudience)
|
||||
auth, err := armConfig.GetAuthorizationToken(sender.BuildSender("backend/remote-state/azure"), oauthConfig, env.TokenAudience)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
@ -106,49 +118,81 @@ func buildArmEnvironment(config BackendConfig) (*azure.Environment, error) {
|
|||
return authentication.DetermineEnvironment(config.Environment)
|
||||
}
|
||||
|
||||
func (c ArmClient) getBlobClient(ctx context.Context) (*storage.BlobStorageClient, error) {
|
||||
if c.accessKey != "" {
|
||||
log.Printf("[DEBUG] Building the Blob Client from an Access Token")
|
||||
storageClient, err := storage.NewBasicClientOnSovereignCloud(c.storageAccountName, c.accessKey, c.environment)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("Error creating storage client for storage account %q: %s", c.storageAccountName, err)
|
||||
}
|
||||
client := storageClient.GetBlobService()
|
||||
return &client, nil
|
||||
}
|
||||
|
||||
func (c ArmClient) getBlobClient(ctx context.Context) (*blobs.Client, error) {
|
||||
if c.sasToken != "" {
|
||||
log.Printf("[DEBUG] Building the Blob Client from a SAS Token")
|
||||
token := strings.TrimPrefix(c.sasToken, "?")
|
||||
uri, err := url.ParseQuery(token)
|
||||
storageAuth, err := autorest.NewSASTokenAuthorizer(c.sasToken)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("Error parsing SAS Token: %+v", err)
|
||||
return nil, fmt.Errorf("Error building Authorizer: %+v", err)
|
||||
}
|
||||
|
||||
storageClient := storage.NewAccountSASClient(c.storageAccountName, uri, c.environment)
|
||||
client := storageClient.GetBlobService()
|
||||
return &client, nil
|
||||
blobsClient := blobs.NewWithEnvironment(c.environment)
|
||||
c.configureClient(&blobsClient.Client, storageAuth)
|
||||
return &blobsClient, nil
|
||||
}
|
||||
|
||||
log.Printf("[DEBUG] Building the Blob Client from an Access Token (using user credentials)")
|
||||
keys, err := c.storageAccountsClient.ListKeys(ctx, c.resourceGroupName, c.storageAccountName)
|
||||
accessKey := c.accessKey
|
||||
if accessKey == "" {
|
||||
log.Printf("[DEBUG] Building the Blob Client from an Access Token (using user credentials)")
|
||||
keys, err := c.storageAccountsClient.ListKeys(ctx, c.resourceGroupName, c.storageAccountName)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("Error retrieving keys for Storage Account %q: %s", c.storageAccountName, err)
|
||||
}
|
||||
|
||||
if keys.Keys == nil {
|
||||
return nil, fmt.Errorf("Nil key returned for storage account %q", c.storageAccountName)
|
||||
}
|
||||
|
||||
accessKeys := *keys.Keys
|
||||
accessKey = *accessKeys[0].Value
|
||||
}
|
||||
|
||||
storageAuth, err := autorest.NewSharedKeyAuthorizer(c.storageAccountName, accessKey, autorest.SharedKey)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("Error retrieving keys for Storage Account %q: %s", c.storageAccountName, err)
|
||||
return nil, fmt.Errorf("Error building Authorizer: %+v", err)
|
||||
}
|
||||
|
||||
if keys.Keys == nil {
|
||||
return nil, fmt.Errorf("Nil key returned for storage account %q", c.storageAccountName)
|
||||
blobsClient := blobs.NewWithEnvironment(c.environment)
|
||||
c.configureClient(&blobsClient.Client, storageAuth)
|
||||
return &blobsClient, nil
|
||||
}
|
||||
|
||||
func (c ArmClient) getContainersClient(ctx context.Context) (*containers.Client, error) {
|
||||
if c.sasToken != "" {
|
||||
log.Printf("[DEBUG] Building the Container Client from a SAS Token")
|
||||
storageAuth, err := autorest.NewSASTokenAuthorizer(c.sasToken)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("Error building Authorizer: %+v", err)
|
||||
}
|
||||
|
||||
containersClient := containers.NewWithEnvironment(c.environment)
|
||||
c.configureClient(&containersClient.Client, storageAuth)
|
||||
return &containersClient, nil
|
||||
}
|
||||
accessKey := c.accessKey
|
||||
if accessKey == "" {
|
||||
log.Printf("[DEBUG] Building the Container Client from an Access Token (using user credentials)")
|
||||
keys, err := c.storageAccountsClient.ListKeys(ctx, c.resourceGroupName, c.storageAccountName)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("Error retrieving keys for Storage Account %q: %s", c.storageAccountName, err)
|
||||
}
|
||||
|
||||
if keys.Keys == nil {
|
||||
return nil, fmt.Errorf("Nil key returned for storage account %q", c.storageAccountName)
|
||||
}
|
||||
|
||||
accessKeys := *keys.Keys
|
||||
accessKey = *accessKeys[0].Value
|
||||
}
|
||||
|
||||
accessKeys := *keys.Keys
|
||||
accessKey := accessKeys[0].Value
|
||||
|
||||
storageClient, err := storage.NewBasicClientOnSovereignCloud(c.storageAccountName, *accessKey, c.environment)
|
||||
storageAuth, err := autorest.NewSharedKeyAuthorizer(c.storageAccountName, accessKey, autorest.SharedKey)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("Error creating storage client for storage account %q: %s", c.storageAccountName, err)
|
||||
return nil, fmt.Errorf("Error building Authorizer: %+v", err)
|
||||
}
|
||||
client := storageClient.GetBlobService()
|
||||
return &client, nil
|
||||
|
||||
containersClient := containers.NewWithEnvironment(c.environment)
|
||||
c.configureClient(&containersClient.Client, storageAuth)
|
||||
return &containersClient, nil
|
||||
}
|
||||
|
||||
func (c *ArmClient) configureClient(client *autorest.Client, auth autorest.Authorizer) {
|
||||
|
|
|
@ -30,6 +30,13 @@ func New() backend.Backend {
|
|||
Description: "The blob key.",
|
||||
},
|
||||
|
||||
"metadata_host": {
|
||||
Type: schema.TypeString,
|
||||
Required: true,
|
||||
DefaultFunc: schema.EnvDefaultFunc("ARM_METADATA_HOST", ""),
|
||||
Description: "The Metadata URL which will be used to obtain the Cloud Environment.",
|
||||
},
|
||||
|
||||
"environment": {
|
||||
Type: schema.TypeString,
|
||||
Optional: true,
|
||||
|
@ -51,6 +58,13 @@ func New() backend.Backend {
|
|||
DefaultFunc: schema.EnvDefaultFunc("ARM_SAS_TOKEN", ""),
|
||||
},
|
||||
|
||||
"snapshot": {
|
||||
Type: schema.TypeBool,
|
||||
Optional: true,
|
||||
Description: "Enable/Disable automatic blob snapshotting",
|
||||
DefaultFunc: schema.EnvDefaultFunc("ARM_SNAPSHOT", false),
|
||||
},
|
||||
|
||||
"resource_group_name": {
|
||||
Type: schema.TypeString,
|
||||
Optional: true,
|
||||
|
@ -64,11 +78,11 @@ func New() backend.Backend {
|
|||
DefaultFunc: schema.EnvDefaultFunc("ARM_CLIENT_ID", ""),
|
||||
},
|
||||
|
||||
"client_secret": {
|
||||
"endpoint": {
|
||||
Type: schema.TypeString,
|
||||
Optional: true,
|
||||
Description: "The Client Secret.",
|
||||
DefaultFunc: schema.EnvDefaultFunc("ARM_CLIENT_SECRET", ""),
|
||||
Description: "A custom Endpoint used to access the Azure Resource Manager API's.",
|
||||
DefaultFunc: schema.EnvDefaultFunc("ARM_ENDPOINT", ""),
|
||||
},
|
||||
|
||||
"subscription_id": {
|
||||
|
@ -85,13 +99,35 @@ func New() backend.Backend {
|
|||
DefaultFunc: schema.EnvDefaultFunc("ARM_TENANT_ID", ""),
|
||||
},
|
||||
|
||||
// Service Principal (Client Certificate) specific
|
||||
"client_certificate_password": {
|
||||
Type: schema.TypeString,
|
||||
Optional: true,
|
||||
Description: "The password associated with the Client Certificate specified in `client_certificate_path`",
|
||||
DefaultFunc: schema.EnvDefaultFunc("ARM_CLIENT_CERTIFICATE_PASSWORD", ""),
|
||||
},
|
||||
"client_certificate_path": {
|
||||
Type: schema.TypeString,
|
||||
Optional: true,
|
||||
Description: "The path to the PFX file used as the Client Certificate when authenticating as a Service Principal",
|
||||
DefaultFunc: schema.EnvDefaultFunc("ARM_CLIENT_CERTIFICATE_PATH", ""),
|
||||
},
|
||||
|
||||
// Service Principal (Client Secret) specific
|
||||
"client_secret": {
|
||||
Type: schema.TypeString,
|
||||
Optional: true,
|
||||
Description: "The Client Secret.",
|
||||
DefaultFunc: schema.EnvDefaultFunc("ARM_CLIENT_SECRET", ""),
|
||||
},
|
||||
|
||||
// Managed Service Identity specific
|
||||
"use_msi": {
|
||||
Type: schema.TypeBool,
|
||||
Optional: true,
|
||||
Description: "Should Managed Service Identity be used?.",
|
||||
DefaultFunc: schema.EnvDefaultFunc("ARM_USE_MSI", false),
|
||||
},
|
||||
|
||||
"msi_endpoint": {
|
||||
Type: schema.TypeString,
|
||||
Optional: true,
|
||||
|
@ -99,13 +135,6 @@ func New() backend.Backend {
|
|||
DefaultFunc: schema.EnvDefaultFunc("ARM_MSI_ENDPOINT", ""),
|
||||
},
|
||||
|
||||
"endpoint": {
|
||||
Type: schema.TypeString,
|
||||
Optional: true,
|
||||
Description: "A custom Endpoint used to access the Azure Resource Manager API's.",
|
||||
DefaultFunc: schema.EnvDefaultFunc("ARM_ENDPOINT", ""),
|
||||
},
|
||||
|
||||
// Deprecated fields
|
||||
"arm_client_id": {
|
||||
Type: schema.TypeString,
|
||||
|
@ -149,6 +178,8 @@ type Backend struct {
|
|||
armClient *ArmClient
|
||||
containerName string
|
||||
keyName string
|
||||
accountName string
|
||||
snapshot bool
|
||||
}
|
||||
|
||||
type BackendConfig struct {
|
||||
|
@ -158,8 +189,11 @@ type BackendConfig struct {
|
|||
// Optional
|
||||
AccessKey string
|
||||
ClientID string
|
||||
ClientCertificatePassword string
|
||||
ClientCertificatePath string
|
||||
ClientSecret string
|
||||
CustomResourceManagerEndpoint string
|
||||
MetadataHost string
|
||||
Environment string
|
||||
MsiEndpoint string
|
||||
ResourceGroupName string
|
||||
|
@ -177,7 +211,9 @@ func (b *Backend) configure(ctx context.Context) error {
|
|||
// Grab the resource data
|
||||
data := schema.FromContextBackendConfig(ctx)
|
||||
b.containerName = data.Get("container_name").(string)
|
||||
b.accountName = data.Get("storage_account_name").(string)
|
||||
b.keyName = data.Get("key").(string)
|
||||
b.snapshot = data.Get("snapshot").(bool)
|
||||
|
||||
// support for previously deprecated fields
|
||||
clientId := valueFromDeprecatedField(data, "client_id", "arm_client_id")
|
||||
|
@ -188,8 +224,11 @@ func (b *Backend) configure(ctx context.Context) error {
|
|||
config := BackendConfig{
|
||||
AccessKey: data.Get("access_key").(string),
|
||||
ClientID: clientId,
|
||||
ClientCertificatePassword: data.Get("client_certificate_password").(string),
|
||||
ClientCertificatePath: data.Get("client_certificate_path").(string),
|
||||
ClientSecret: clientSecret,
|
||||
CustomResourceManagerEndpoint: data.Get("endpoint").(string),
|
||||
MetadataHost: data.Get("metadata_host").(string),
|
||||
Environment: data.Get("environment").(string),
|
||||
MsiEndpoint: data.Get("msi_endpoint").(string),
|
||||
ResourceGroupName: data.Get("resource_group_name").(string),
|
||||
|
@ -200,7 +239,7 @@ func (b *Backend) configure(ctx context.Context) error {
|
|||
UseMsi: data.Get("use_msi").(bool),
|
||||
}
|
||||
|
||||
armClient, err := buildArmClient(config)
|
||||
armClient, err := buildArmClient(context.TODO(), config)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
|
|
@ -6,11 +6,12 @@ import (
|
|||
"sort"
|
||||
"strings"
|
||||
|
||||
"github.com/Azure/azure-sdk-for-go/storage"
|
||||
"github.com/hashicorp/terraform/backend"
|
||||
"github.com/hashicorp/terraform/state"
|
||||
"github.com/hashicorp/terraform/state/remote"
|
||||
"github.com/hashicorp/terraform/states"
|
||||
"github.com/hashicorp/terraform/states/remote"
|
||||
"github.com/hashicorp/terraform/states/statemgr"
|
||||
"github.com/tombuildsstuff/giovanni/storage/2018-11-09/blob/blobs"
|
||||
"github.com/tombuildsstuff/giovanni/storage/2018-11-09/blob/containers"
|
||||
)
|
||||
|
||||
const (
|
||||
|
@ -21,23 +22,22 @@ const (
|
|||
|
||||
func (b *Backend) Workspaces() ([]string, error) {
|
||||
prefix := b.keyName + keyEnvPrefix
|
||||
params := storage.ListBlobsParameters{
|
||||
Prefix: prefix,
|
||||
params := containers.ListBlobsInput{
|
||||
Prefix: &prefix,
|
||||
}
|
||||
|
||||
ctx := context.TODO()
|
||||
client, err := b.armClient.getBlobClient(ctx)
|
||||
client, err := b.armClient.getContainersClient(ctx)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
container := client.GetContainerReference(b.containerName)
|
||||
resp, err := container.ListBlobs(params)
|
||||
resp, err := client.ListBlobs(ctx, b.armClient.storageAccountName, b.containerName, params)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
envs := map[string]struct{}{}
|
||||
for _, obj := range resp.Blobs {
|
||||
for _, obj := range resp.Blobs.Blobs {
|
||||
key := obj.Name
|
||||
if strings.HasPrefix(key, prefix) {
|
||||
name := strings.TrimPrefix(key, prefix)
|
||||
|
@ -69,14 +69,16 @@ func (b *Backend) DeleteWorkspace(name string) error {
|
|||
return err
|
||||
}
|
||||
|
||||
containerReference := client.GetContainerReference(b.containerName)
|
||||
blobReference := containerReference.GetBlobReference(b.path(name))
|
||||
options := &storage.DeleteBlobOptions{}
|
||||
if resp, err := client.Delete(ctx, b.armClient.storageAccountName, b.containerName, b.path(name), blobs.DeleteInput{}); err != nil {
|
||||
if resp.Response.StatusCode != 404 {
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
||||
return blobReference.Delete(options)
|
||||
return nil
|
||||
}
|
||||
|
||||
func (b *Backend) StateMgr(name string) (state.State, error) {
|
||||
func (b *Backend) StateMgr(name string) (statemgr.Full, error) {
|
||||
ctx := context.TODO()
|
||||
blobClient, err := b.armClient.getBlobClient(ctx)
|
||||
if err != nil {
|
||||
|
@ -84,9 +86,11 @@ func (b *Backend) StateMgr(name string) (state.State, error) {
|
|||
}
|
||||
|
||||
client := &RemoteClient{
|
||||
blobClient: *blobClient,
|
||||
containerName: b.containerName,
|
||||
keyName: b.path(name),
|
||||
giovanniBlobClient: *blobClient,
|
||||
containerName: b.containerName,
|
||||
keyName: b.path(name),
|
||||
accountName: b.accountName,
|
||||
snapshot: b.snapshot,
|
||||
}
|
||||
|
||||
stateMgr := &remote.State{Client: client}
|
||||
|
@ -95,7 +99,7 @@ func (b *Backend) StateMgr(name string) (state.State, error) {
|
|||
//it's listed by States.
|
||||
if name != backend.DefaultStateName {
|
||||
// take a lock on this state while we write it
|
||||
lockInfo := state.NewLockInfo()
|
||||
lockInfo := statemgr.NewLockInfo()
|
||||
lockInfo.Operation = "init"
|
||||
lockId, err := client.Lock(lockInfo)
|
||||
if err != nil {
|
||||
|
|
|
@ -21,6 +21,7 @@ func TestBackendConfig(t *testing.T) {
|
|||
"storage_account_name": "tfaccount",
|
||||
"container_name": "tfcontainer",
|
||||
"key": "state",
|
||||
"snapshot": false,
|
||||
// Access Key must be Base64
|
||||
"access_key": "QUNDRVNTX0tFWQ0K",
|
||||
}
|
||||
|
@ -33,6 +34,9 @@ func TestBackendConfig(t *testing.T) {
|
|||
if b.keyName != "state" {
|
||||
t.Fatalf("Incorrect keyName was populated")
|
||||
}
|
||||
if b.snapshot != false {
|
||||
t.Fatalf("Incorrect snapshot was populated")
|
||||
}
|
||||
}
|
||||
|
||||
func TestBackendAccessKeyBasic(t *testing.T) {
|
||||
|
@ -119,7 +123,44 @@ func TestBackendSASTokenBasic(t *testing.T) {
|
|||
backend.TestBackendStates(t, b)
|
||||
}
|
||||
|
||||
func TestBackendServicePrincipalBasic(t *testing.T) {
|
||||
func TestBackendServicePrincipalClientCertificateBasic(t *testing.T) {
|
||||
testAccAzureBackend(t)
|
||||
|
||||
clientCertPassword := os.Getenv("ARM_CLIENT_CERTIFICATE_PASSWORD")
|
||||
clientCertPath := os.Getenv("ARM_CLIENT_CERTIFICATE_PATH")
|
||||
if clientCertPath == "" {
|
||||
t.Skip("Skipping since `ARM_CLIENT_CERTIFICATE_PATH` is not specified!")
|
||||
}
|
||||
|
||||
rs := acctest.RandString(4)
|
||||
res := testResourceNames(rs, "testState")
|
||||
armClient := buildTestClient(t, res)
|
||||
|
||||
ctx := context.TODO()
|
||||
err := armClient.buildTestResources(ctx, &res)
|
||||
defer armClient.destroyTestResources(ctx, res)
|
||||
if err != nil {
|
||||
t.Fatalf("Error creating Test Resources: %q", err)
|
||||
}
|
||||
|
||||
b := backend.TestBackendConfig(t, New(), backend.TestWrapConfig(map[string]interface{}{
|
||||
"storage_account_name": res.storageAccountName,
|
||||
"container_name": res.storageContainerName,
|
||||
"key": res.storageKeyName,
|
||||
"resource_group_name": res.resourceGroup,
|
||||
"subscription_id": os.Getenv("ARM_SUBSCRIPTION_ID"),
|
||||
"tenant_id": os.Getenv("ARM_TENANT_ID"),
|
||||
"client_id": os.Getenv("ARM_CLIENT_ID"),
|
||||
"client_certificate_password": clientCertPassword,
|
||||
"client_certificate_path": clientCertPath,
|
||||
"environment": os.Getenv("ARM_ENVIRONMENT"),
|
||||
"endpoint": os.Getenv("ARM_ENDPOINT"),
|
||||
})).(*Backend)
|
||||
|
||||
backend.TestBackendStates(t, b)
|
||||
}
|
||||
|
||||
func TestBackendServicePrincipalClientSecretBasic(t *testing.T) {
|
||||
testAccAzureBackend(t)
|
||||
rs := acctest.RandString(4)
|
||||
res := testResourceNames(rs, "testState")
|
||||
|
@ -148,7 +189,7 @@ func TestBackendServicePrincipalBasic(t *testing.T) {
|
|||
backend.TestBackendStates(t, b)
|
||||
}
|
||||
|
||||
func TestBackendServicePrincipalCustomEndpoint(t *testing.T) {
|
||||
func TestBackendServicePrincipalClientSecretCustomEndpoint(t *testing.T) {
|
||||
testAccAzureBackend(t)
|
||||
|
||||
// this is only applicable for Azure Stack.
|
||||
|
|
|
@ -1,19 +1,18 @@
|
|||
package azure
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"context"
|
||||
"encoding/base64"
|
||||
"encoding/json"
|
||||
"fmt"
|
||||
"io"
|
||||
"log"
|
||||
|
||||
"github.com/Azure/azure-sdk-for-go/storage"
|
||||
"github.com/hashicorp/go-multierror"
|
||||
"github.com/hashicorp/go-uuid"
|
||||
"github.com/hashicorp/terraform/state"
|
||||
"github.com/hashicorp/terraform/state/remote"
|
||||
"github.com/hashicorp/terraform/states"
|
||||
"github.com/tombuildsstuff/giovanni/storage/2018-11-09/blob/blobs"
|
||||
|
||||
"github.com/hashicorp/terraform/states/remote"
|
||||
"github.com/hashicorp/terraform/states/statemgr"
|
||||
)
|
||||
|
||||
const (
|
||||
|
@ -23,40 +22,31 @@ const (
|
|||
)
|
||||
|
||||
type RemoteClient struct {
|
||||
blobClient storage.BlobStorageClient
|
||||
containerName string
|
||||
keyName string
|
||||
leaseID string
|
||||
giovanniBlobClient blobs.Client
|
||||
accountName string
|
||||
containerName string
|
||||
keyName string
|
||||
leaseID string
|
||||
snapshot bool
|
||||
}
|
||||
|
||||
func (c *RemoteClient) Get() (*remote.Payload, error) {
|
||||
containerReference := c.blobClient.GetContainerReference(c.containerName)
|
||||
blobReference := containerReference.GetBlobReference(c.keyName)
|
||||
options := &storage.GetBlobOptions{}
|
||||
|
||||
options := blobs.GetInput{}
|
||||
if c.leaseID != "" {
|
||||
options.LeaseID = c.leaseID
|
||||
options.LeaseID = &c.leaseID
|
||||
}
|
||||
|
||||
blob, err := blobReference.Get(options)
|
||||
ctx := context.TODO()
|
||||
blob, err := c.giovanniBlobClient.Get(ctx, c.accountName, c.containerName, c.keyName, options)
|
||||
if err != nil {
|
||||
if storErr, ok := err.(storage.AzureStorageServiceError); ok {
|
||||
if storErr.Code == "BlobNotFound" {
|
||||
return nil, nil
|
||||
}
|
||||
if blob.Response.StatusCode == 404 {
|
||||
return nil, nil
|
||||
}
|
||||
return nil, err
|
||||
}
|
||||
|
||||
defer blob.Close()
|
||||
|
||||
buf := bytes.NewBuffer(nil)
|
||||
if _, err := io.Copy(buf, blob); err != nil {
|
||||
return nil, fmt.Errorf("Failed to read remote state: %s", err)
|
||||
}
|
||||
|
||||
payload := &remote.Payload{
|
||||
Data: buf.Bytes(),
|
||||
Data: blob.Contents,
|
||||
}
|
||||
|
||||
// If there was no data, then return nil
|
||||
|
@ -68,57 +58,65 @@ func (c *RemoteClient) Get() (*remote.Payload, error) {
|
|||
}
|
||||
|
||||
func (c *RemoteClient) Put(data []byte) error {
|
||||
getOptions := &storage.GetBlobMetadataOptions{}
|
||||
setOptions := &storage.SetBlobPropertiesOptions{}
|
||||
putOptions := &storage.PutBlobOptions{}
|
||||
|
||||
containerReference := c.blobClient.GetContainerReference(c.containerName)
|
||||
blobReference := containerReference.GetBlobReference(c.keyName)
|
||||
|
||||
blobReference.Properties.ContentType = "application/json"
|
||||
blobReference.Properties.ContentLength = int64(len(data))
|
||||
getOptions := blobs.GetPropertiesInput{}
|
||||
setOptions := blobs.SetPropertiesInput{}
|
||||
putOptions := blobs.PutBlockBlobInput{}
|
||||
|
||||
options := blobs.GetInput{}
|
||||
if c.leaseID != "" {
|
||||
getOptions.LeaseID = c.leaseID
|
||||
setOptions.LeaseID = c.leaseID
|
||||
putOptions.LeaseID = c.leaseID
|
||||
options.LeaseID = &c.leaseID
|
||||
getOptions.LeaseID = &c.leaseID
|
||||
setOptions.LeaseID = &c.leaseID
|
||||
putOptions.LeaseID = &c.leaseID
|
||||
}
|
||||
|
||||
exists, err := blobReference.Exists()
|
||||
ctx := context.TODO()
|
||||
|
||||
if c.snapshot {
|
||||
snapshotInput := blobs.SnapshotInput{LeaseID: options.LeaseID}
|
||||
|
||||
log.Printf("[DEBUG] Snapshotting existing Blob %q (Container %q / Account %q)", c.keyName, c.containerName, c.accountName)
|
||||
if _, err := c.giovanniBlobClient.Snapshot(ctx, c.accountName, c.containerName, c.keyName, snapshotInput); err != nil {
|
||||
return fmt.Errorf("error snapshotting Blob %q (Container %q / Account %q): %+v", c.keyName, c.containerName, c.accountName, err)
|
||||
}
|
||||
|
||||
log.Print("[DEBUG] Created blob snapshot")
|
||||
}
|
||||
|
||||
blob, err := c.giovanniBlobClient.GetProperties(ctx, c.accountName, c.containerName, c.keyName, getOptions)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
if exists {
|
||||
err = blobReference.GetMetadata(getOptions)
|
||||
if err != nil {
|
||||
if blob.StatusCode != 404 {
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
||||
reader := bytes.NewReader(data)
|
||||
contentType := "application/json"
|
||||
putOptions.Content = &data
|
||||
putOptions.ContentType = &contentType
|
||||
putOptions.MetaData = blob.MetaData
|
||||
_, err = c.giovanniBlobClient.PutBlockBlob(ctx, c.accountName, c.containerName, c.keyName, putOptions)
|
||||
|
||||
err = blobReference.CreateBlockBlobFromReader(reader, putOptions)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
return blobReference.SetProperties(setOptions)
|
||||
return err
|
||||
}
|
||||
|
||||
func (c *RemoteClient) Delete() error {
|
||||
containerReference := c.blobClient.GetContainerReference(c.containerName)
|
||||
blobReference := containerReference.GetBlobReference(c.keyName)
|
||||
options := &storage.DeleteBlobOptions{}
|
||||
options := blobs.DeleteInput{}
|
||||
|
||||
if c.leaseID != "" {
|
||||
options.LeaseID = c.leaseID
|
||||
options.LeaseID = &c.leaseID
|
||||
}
|
||||
|
||||
return blobReference.Delete(options)
|
||||
ctx := context.TODO()
|
||||
resp, err := c.giovanniBlobClient.Delete(ctx, c.accountName, c.containerName, c.keyName, options)
|
||||
if err != nil {
|
||||
if resp.Response.StatusCode != 404 {
|
||||
return err
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (c *RemoteClient) Lock(info *state.LockInfo) (string, error) {
|
||||
func (c *RemoteClient) Lock(info *statemgr.LockInfo) (string, error) {
|
||||
stateName := fmt.Sprintf("%s/%s", c.containerName, c.keyName)
|
||||
info.Path = stateName
|
||||
|
||||
|
@ -137,47 +135,50 @@ func (c *RemoteClient) Lock(info *state.LockInfo) (string, error) {
|
|||
err = multierror.Append(err, infoErr)
|
||||
}
|
||||
|
||||
return &state.LockError{
|
||||
return &statemgr.LockError{
|
||||
Err: err,
|
||||
Info: lockInfo,
|
||||
}
|
||||
}
|
||||
|
||||
containerReference := c.blobClient.GetContainerReference(c.containerName)
|
||||
blobReference := containerReference.GetBlobReference(c.keyName)
|
||||
leaseID, err := blobReference.AcquireLease(-1, info.ID, &storage.LeaseOptions{})
|
||||
leaseOptions := blobs.AcquireLeaseInput{
|
||||
ProposedLeaseID: &info.ID,
|
||||
LeaseDuration: -1,
|
||||
}
|
||||
ctx := context.TODO()
|
||||
|
||||
// obtain properties to see if the blob lease is already in use. If the blob doesn't exist, create it
|
||||
properties, err := c.giovanniBlobClient.GetProperties(ctx, c.accountName, c.containerName, c.keyName, blobs.GetPropertiesInput{})
|
||||
if err != nil {
|
||||
if storErr, ok := err.(storage.AzureStorageServiceError); ok && storErr.Code != "BlobNotFound" {
|
||||
// error if we had issues getting the blob
|
||||
if properties.Response.StatusCode != 404 {
|
||||
return "", getLockInfoErr(err)
|
||||
}
|
||||
// if we don't find the blob, we need to build it
|
||||
|
||||
// failed to lock as there was no state blob, write empty state
|
||||
stateMgr := &remote.State{Client: c}
|
||||
|
||||
// ensure state is actually empty
|
||||
if err := stateMgr.RefreshState(); err != nil {
|
||||
return "", fmt.Errorf("Failed to refresh state before writing empty state for locking: %s", err)
|
||||
contentType := "application/json"
|
||||
putGOptions := blobs.PutBlockBlobInput{
|
||||
ContentType: &contentType,
|
||||
}
|
||||
|
||||
log.Print("[DEBUG] Could not lock as state blob did not exist, creating with empty state")
|
||||
|
||||
if v := stateMgr.State(); v == nil {
|
||||
if err := stateMgr.WriteState(states.NewState()); err != nil {
|
||||
return "", fmt.Errorf("Failed to write empty state for locking: %s", err)
|
||||
}
|
||||
if err := stateMgr.PersistState(); err != nil {
|
||||
return "", fmt.Errorf("Failed to persist empty state for locking: %s", err)
|
||||
}
|
||||
}
|
||||
|
||||
leaseID, err = blobReference.AcquireLease(-1, info.ID, &storage.LeaseOptions{})
|
||||
_, err = c.giovanniBlobClient.PutBlockBlob(ctx, c.accountName, c.containerName, c.keyName, putGOptions)
|
||||
if err != nil {
|
||||
return "", getLockInfoErr(err)
|
||||
}
|
||||
}
|
||||
|
||||
info.ID = leaseID
|
||||
c.leaseID = leaseID
|
||||
// if the blob is already locked then error
|
||||
if properties.LeaseStatus == blobs.Locked {
|
||||
return "", getLockInfoErr(fmt.Errorf("state blob is already locked"))
|
||||
}
|
||||
|
||||
leaseID, err := c.giovanniBlobClient.AcquireLease(ctx, c.accountName, c.containerName, c.keyName, leaseOptions)
|
||||
if err != nil {
|
||||
return "", getLockInfoErr(err)
|
||||
}
|
||||
|
||||
info.ID = leaseID.LeaseID
|
||||
c.leaseID = leaseID.LeaseID
|
||||
|
||||
if err := c.writeLockInfo(info); err != nil {
|
||||
return "", err
|
||||
|
@ -186,15 +187,19 @@ func (c *RemoteClient) Lock(info *state.LockInfo) (string, error) {
|
|||
return info.ID, nil
|
||||
}
|
||||
|
||||
func (c *RemoteClient) getLockInfo() (*state.LockInfo, error) {
|
||||
containerReference := c.blobClient.GetContainerReference(c.containerName)
|
||||
blobReference := containerReference.GetBlobReference(c.keyName)
|
||||
err := blobReference.GetMetadata(&storage.GetBlobMetadataOptions{})
|
||||
func (c *RemoteClient) getLockInfo() (*statemgr.LockInfo, error) {
|
||||
options := blobs.GetPropertiesInput{}
|
||||
if c.leaseID != "" {
|
||||
options.LeaseID = &c.leaseID
|
||||
}
|
||||
|
||||
ctx := context.TODO()
|
||||
blob, err := c.giovanniBlobClient.GetProperties(ctx, c.accountName, c.containerName, c.keyName, options)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
raw := blobReference.Metadata[lockInfoMetaKey]
|
||||
raw := blob.MetaData[lockInfoMetaKey]
|
||||
if raw == "" {
|
||||
return nil, fmt.Errorf("blob metadata %q was empty", lockInfoMetaKey)
|
||||
}
|
||||
|
@ -204,7 +209,7 @@ func (c *RemoteClient) getLockInfo() (*state.LockInfo, error) {
|
|||
return nil, err
|
||||
}
|
||||
|
||||
lockInfo := &state.LockInfo{}
|
||||
lockInfo := &statemgr.LockInfo{}
|
||||
err = json.Unmarshal(data, lockInfo)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
|
@ -214,31 +219,34 @@ func (c *RemoteClient) getLockInfo() (*state.LockInfo, error) {
|
|||
}
|
||||
|
||||
// writes info to blob meta data, deletes metadata entry if info is nil
|
||||
func (c *RemoteClient) writeLockInfo(info *state.LockInfo) error {
|
||||
containerReference := c.blobClient.GetContainerReference(c.containerName)
|
||||
blobReference := containerReference.GetBlobReference(c.keyName)
|
||||
err := blobReference.GetMetadata(&storage.GetBlobMetadataOptions{
|
||||
LeaseID: c.leaseID,
|
||||
})
|
||||
func (c *RemoteClient) writeLockInfo(info *statemgr.LockInfo) error {
|
||||
ctx := context.TODO()
|
||||
blob, err := c.giovanniBlobClient.GetProperties(ctx, c.accountName, c.containerName, c.keyName, blobs.GetPropertiesInput{LeaseID: &c.leaseID})
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
if info == nil {
|
||||
delete(blobReference.Metadata, lockInfoMetaKey)
|
||||
delete(blob.MetaData, lockInfoMetaKey)
|
||||
} else {
|
||||
value := base64.StdEncoding.EncodeToString(info.Marshal())
|
||||
blobReference.Metadata[lockInfoMetaKey] = value
|
||||
blob.MetaData[lockInfoMetaKey] = value
|
||||
}
|
||||
|
||||
opts := &storage.SetBlobMetadataOptions{
|
||||
LeaseID: c.leaseID,
|
||||
opts := blobs.SetMetaDataInput{
|
||||
LeaseID: &c.leaseID,
|
||||
MetaData: blob.MetaData,
|
||||
}
|
||||
return blobReference.SetMetadata(opts)
|
||||
|
||||
_, err = c.giovanniBlobClient.SetMetaData(ctx, c.accountName, c.containerName, c.keyName, opts)
|
||||
return err
|
||||
}
|
||||
|
||||
func (c *RemoteClient) Unlock(id string) error {
|
||||
lockErr := &state.LockError{}
|
||||
lockErr := &statemgr.LockError{}
|
||||
|
||||
lockInfo, err := c.getLockInfo()
|
||||
if err != nil {
|
||||
|
@ -258,9 +266,8 @@ func (c *RemoteClient) Unlock(id string) error {
|
|||
return lockErr
|
||||
}
|
||||
|
||||
containerReference := c.blobClient.GetContainerReference(c.containerName)
|
||||
blobReference := containerReference.GetBlobReference(c.keyName)
|
||||
err = blobReference.ReleaseLease(id, &storage.LeaseOptions{})
|
||||
ctx := context.TODO()
|
||||
_, err = c.giovanniBlobClient.ReleaseLease(ctx, c.accountName, c.containerName, c.keyName, id)
|
||||
if err != nil {
|
||||
lockErr.Err = err
|
||||
return lockErr
|
||||
|
|
|
@ -5,10 +5,10 @@ import (
|
|||
"os"
|
||||
"testing"
|
||||
|
||||
"github.com/Azure/azure-sdk-for-go/storage"
|
||||
"github.com/hashicorp/terraform/backend"
|
||||
"github.com/hashicorp/terraform/helper/acctest"
|
||||
"github.com/hashicorp/terraform/state/remote"
|
||||
"github.com/hashicorp/terraform/states/remote"
|
||||
"github.com/tombuildsstuff/giovanni/storage/2018-11-09/blob/blobs"
|
||||
)
|
||||
|
||||
func TestRemoteClient_impl(t *testing.T) {
|
||||
|
@ -264,21 +264,22 @@ func TestPutMaintainsMetaData(t *testing.T) {
|
|||
if err != nil {
|
||||
t.Fatalf("Error building Blob Client: %+v", err)
|
||||
}
|
||||
containerReference := client.GetContainerReference(res.storageContainerName)
|
||||
blobReference := containerReference.GetBlobReference(res.storageKeyName)
|
||||
|
||||
err = blobReference.CreateBlockBlob(&storage.PutBlobOptions{})
|
||||
_, err = client.PutBlockBlob(ctx, res.storageAccountName, res.storageContainerName, res.storageKeyName, blobs.PutBlockBlobInput{})
|
||||
if err != nil {
|
||||
t.Fatalf("Error Creating Block Blob: %+v", err)
|
||||
}
|
||||
|
||||
err = blobReference.GetMetadata(&storage.GetBlobMetadataOptions{})
|
||||
blobReference, err := client.GetProperties(ctx, res.storageAccountName, res.storageContainerName, res.storageKeyName, blobs.GetPropertiesInput{})
|
||||
if err != nil {
|
||||
t.Fatalf("Error loading MetaData: %+v", err)
|
||||
}
|
||||
|
||||
blobReference.Metadata[headerName] = expectedValue
|
||||
err = blobReference.SetMetadata(&storage.SetBlobMetadataOptions{})
|
||||
blobReference.MetaData[headerName] = expectedValue
|
||||
opts := blobs.SetMetaDataInput{
|
||||
MetaData: blobReference.MetaData,
|
||||
}
|
||||
_, err = client.SetMetaData(ctx, res.storageAccountName, res.storageContainerName, res.storageKeyName, opts)
|
||||
if err != nil {
|
||||
t.Fatalf("Error setting MetaData: %+v", err)
|
||||
}
|
||||
|
@ -287,8 +288,9 @@ func TestPutMaintainsMetaData(t *testing.T) {
|
|||
remoteClient := RemoteClient{
|
||||
keyName: res.storageKeyName,
|
||||
containerName: res.storageContainerName,
|
||||
accountName: res.storageAccountName,
|
||||
|
||||
blobClient: *client,
|
||||
giovanniBlobClient: *client,
|
||||
}
|
||||
|
||||
bytes := []byte(acctest.RandString(20))
|
||||
|
@ -298,12 +300,12 @@ func TestPutMaintainsMetaData(t *testing.T) {
|
|||
}
|
||||
|
||||
// Verify it still exists
|
||||
err = blobReference.GetMetadata(&storage.GetBlobMetadataOptions{})
|
||||
blobReference, err = client.GetProperties(ctx, res.storageAccountName, res.storageContainerName, res.storageKeyName, blobs.GetPropertiesInput{})
|
||||
if err != nil {
|
||||
t.Fatalf("Error loading MetaData: %+v", err)
|
||||
}
|
||||
|
||||
if blobReference.Metadata[headerName] != expectedValue {
|
||||
t.Fatalf("%q was not set to %q in the MetaData: %+v", headerName, expectedValue, blobReference.Metadata)
|
||||
if blobReference.MetaData[headerName] != expectedValue {
|
||||
t.Fatalf("%q was not set to %q in the MetaData: %+v", headerName, expectedValue, blobReference.MetaData)
|
||||
}
|
||||
}
|
||||
|
|
|
@ -11,8 +11,9 @@ import (
|
|||
|
||||
"github.com/Azure/azure-sdk-for-go/profiles/2017-03-09/resources/mgmt/resources"
|
||||
armStorage "github.com/Azure/azure-sdk-for-go/profiles/2017-03-09/storage/mgmt/storage"
|
||||
"github.com/Azure/azure-sdk-for-go/storage"
|
||||
"github.com/Azure/go-autorest/autorest"
|
||||
sasStorage "github.com/hashicorp/go-azure-helpers/storage"
|
||||
"github.com/tombuildsstuff/giovanni/storage/2018-11-09/blob/containers"
|
||||
)
|
||||
|
||||
const (
|
||||
|
@ -72,7 +73,7 @@ func buildTestClient(t *testing.T, res resourceNames) *ArmClient {
|
|||
// Endpoint is optional (only for Stack)
|
||||
endpoint := os.Getenv("ARM_ENDPOINT")
|
||||
|
||||
armClient, err := buildArmClient(BackendConfig{
|
||||
armClient, err := buildArmClient(context.TODO(), BackendConfig{
|
||||
SubscriptionID: subscriptionID,
|
||||
TenantID: tenantID,
|
||||
ClientID: clientID,
|
||||
|
@ -108,7 +109,7 @@ func buildSasToken(accountName, accessKey string) (*string, error) {
|
|||
startDate := utcNow.Add(time.Minute * -5).Format(time.RFC3339)
|
||||
endDate := utcNow.Add(time.Hour * 24).Format(time.RFC3339)
|
||||
|
||||
sasToken, err := sasStorage.ComputeSASToken(accountName, accessKey, permissions, services, resourceTypes,
|
||||
sasToken, err := sasStorage.ComputeAccountSASToken(accountName, accessKey, permissions, services, resourceTypes,
|
||||
startDate, endDate, signedProtocol, signedIp, signedVersion)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("Error computing SAS Token: %+v", err)
|
||||
|
@ -128,7 +129,7 @@ type resourceNames struct {
|
|||
|
||||
func testResourceNames(rString string, keyName string) resourceNames {
|
||||
return resourceNames{
|
||||
resourceGroup: fmt.Sprintf("acctestrg-backend-%s", rString),
|
||||
resourceGroup: fmt.Sprintf("acctestRG-backend-%s-%s", strings.Replace(time.Now().Local().Format("060102150405.00"), ".", "", 1), rString),
|
||||
location: os.Getenv("ARM_LOCATION"),
|
||||
storageAccountName: fmt.Sprintf("acctestsa%s", rString),
|
||||
storageContainerName: "acctestcont",
|
||||
|
@ -170,15 +171,16 @@ func (c *ArmClient) buildTestResources(ctx context.Context, names *resourceNames
|
|||
accessKey := *keys[0].Value
|
||||
names.storageAccountAccessKey = accessKey
|
||||
|
||||
storageClient, err := storage.NewBasicClientOnSovereignCloud(names.storageAccountName, accessKey, c.environment)
|
||||
storageAuth, err := autorest.NewSharedKeyAuthorizer(names.storageAccountName, accessKey, autorest.SharedKey)
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to list storage account keys %s:", err)
|
||||
return fmt.Errorf("Error building Authorizer: %+v", err)
|
||||
}
|
||||
|
||||
containersClient := containers.NewWithEnvironment(c.environment)
|
||||
containersClient.Client.Authorizer = storageAuth
|
||||
|
||||
log.Printf("Creating Container %q in Storage Account %q (Resource Group %q)", names.storageContainerName, names.storageAccountName, names.resourceGroup)
|
||||
blobService := storageClient.GetBlobService()
|
||||
container := blobService.GetContainerReference(names.storageContainerName)
|
||||
err = container.Create(&storage.CreateContainerOptions{})
|
||||
_, err = containersClient.Create(ctx, names.storageAccountName, names.storageContainerName, containers.CreateInput{})
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to create storage container: %s", err)
|
||||
}
|
||||
|
|
|
@ -21,7 +21,7 @@ func withRequestLogging() autorest.SendDecorator {
|
|||
return func(s autorest.Sender) autorest.Sender {
|
||||
return autorest.SenderFunc(func(r *http.Request) (*http.Response, error) {
|
||||
// only log if logging's enabled
|
||||
logLevel := logging.LogLevel()
|
||||
logLevel := logging.CurrentLogLevel()
|
||||
if logLevel == "" {
|
||||
return s.Do(r)
|
||||
}
|
||||
|
|
|
@ -1,72 +0,0 @@
|
|||
// Package remotestate implements a Backend for remote state implementations
|
||||
// from the state/remote package that also implement a backend schema for
|
||||
// configuration.
|
||||
package remotestate
|
||||
|
||||
import (
|
||||
"context"
|
||||
|
||||
"github.com/zclconf/go-cty/cty"
|
||||
|
||||
"github.com/hashicorp/terraform/backend"
|
||||
"github.com/hashicorp/terraform/helper/schema"
|
||||
"github.com/hashicorp/terraform/state/remote"
|
||||
"github.com/hashicorp/terraform/states/statemgr"
|
||||
"github.com/hashicorp/terraform/tfdiags"
|
||||
)
|
||||
|
||||
// Backend implements backend.Backend for remote state backends.
|
||||
//
|
||||
// All exported fields should be set. This struct should only be used
|
||||
// by implementers of backends, not by consumers. If you're consuming, please
|
||||
// use a higher level package such as Consul backends.
|
||||
type Backend struct {
|
||||
// Backend should be set to the configuration schema. ConfigureFunc
|
||||
// should not be set on the schema.
|
||||
*schema.Backend
|
||||
|
||||
// ConfigureFunc takes the ctx from a schema.Backend and returns a
|
||||
// fully configured remote client to use for state operations.
|
||||
ConfigureFunc func(ctx context.Context) (remote.Client, error)
|
||||
|
||||
client remote.Client
|
||||
}
|
||||
|
||||
func (b *Backend) Configure(obj cty.Value) tfdiags.Diagnostics {
|
||||
|
||||
// Set our configureFunc manually
|
||||
b.Backend.ConfigureFunc = func(ctx context.Context) error {
|
||||
c, err := b.ConfigureFunc(ctx)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
// Set the client for later
|
||||
b.client = c
|
||||
return nil
|
||||
}
|
||||
|
||||
return b.Backend.Configure(obj)
|
||||
}
|
||||
|
||||
func (b *Backend) Workspaces() ([]string, error) {
|
||||
return nil, backend.ErrWorkspacesNotSupported
|
||||
}
|
||||
|
||||
func (b *Backend) DeleteWorkspace(name string) error {
|
||||
return backend.ErrWorkspacesNotSupported
|
||||
}
|
||||
|
||||
func (b *Backend) StateMgr(name string) (statemgr.Full, error) {
|
||||
// This shouldn't happen
|
||||
if b.client == nil {
|
||||
panic("nil remote client")
|
||||
}
|
||||
|
||||
if name != backend.DefaultStateName {
|
||||
return nil, backend.ErrWorkspacesNotSupported
|
||||
}
|
||||
|
||||
s := &remote.State{Client: b.client}
|
||||
return s, nil
|
||||
}
|
|
@ -1,11 +0,0 @@
|
|||
package remotestate
|
||||
|
||||
import (
|
||||
"testing"
|
||||
|
||||
"github.com/hashicorp/terraform/backend"
|
||||
)
|
||||
|
||||
func TestBackend_impl(t *testing.T) {
|
||||
var _ backend.Backend = new(Backend)
|
||||
}
|
|
@ -5,9 +5,8 @@ import (
|
|||
"strings"
|
||||
|
||||
"github.com/hashicorp/terraform/backend"
|
||||
"github.com/hashicorp/terraform/state"
|
||||
"github.com/hashicorp/terraform/state/remote"
|
||||
"github.com/hashicorp/terraform/states"
|
||||
"github.com/hashicorp/terraform/states/remote"
|
||||
"github.com/hashicorp/terraform/states/statemgr"
|
||||
)
|
||||
|
||||
|
@ -93,7 +92,7 @@ func (b *Backend) StateMgr(name string) (statemgr.Full, error) {
|
|||
// Grab a lock, we use this to write an empty state if one doesn't
|
||||
// exist already. We have to write an empty state as a sentinel value
|
||||
// so States() knows it exists.
|
||||
lockInfo := state.NewLockInfo()
|
||||
lockInfo := statemgr.NewLockInfo()
|
||||
lockInfo.Operation = "init"
|
||||
lockId, err := stateMgr.Lock(lockInfo)
|
||||
if err != nil {
|
||||
|
|
|
@ -1,6 +1,7 @@
|
|||
package consul
|
||||
|
||||
import (
|
||||
"flag"
|
||||
"fmt"
|
||||
"io/ioutil"
|
||||
"os"
|
||||
|
@ -39,6 +40,10 @@ func newConsulTestServer() (*testutil.TestServer, error) {
|
|||
srv, err := testutil.NewTestServerConfig(func(c *testutil.TestServerConfig) {
|
||||
c.LogLevel = "warn"
|
||||
|
||||
if !flag.Parsed() {
|
||||
flag.Parse()
|
||||
}
|
||||
|
||||
if !testing.Verbose() {
|
||||
c.Stdout = ioutil.Discard
|
||||
c.Stderr = ioutil.Discard
|
||||
|
|
|
@ -9,13 +9,14 @@ import (
|
|||
"errors"
|
||||
"fmt"
|
||||
"log"
|
||||
"strings"
|
||||
"sync"
|
||||
"time"
|
||||
|
||||
consulapi "github.com/hashicorp/consul/api"
|
||||
multierror "github.com/hashicorp/go-multierror"
|
||||
"github.com/hashicorp/terraform/state"
|
||||
"github.com/hashicorp/terraform/state/remote"
|
||||
"github.com/hashicorp/terraform/states/remote"
|
||||
"github.com/hashicorp/terraform/states/statemgr"
|
||||
)
|
||||
|
||||
const (
|
||||
|
@ -54,7 +55,7 @@ type RemoteClient struct {
|
|||
consulLock *consulapi.Lock
|
||||
lockCh <-chan struct{}
|
||||
|
||||
info *state.LockInfo
|
||||
info *statemgr.LockInfo
|
||||
|
||||
// cancel our goroutine which is monitoring the lock to automatically
|
||||
// reacquire it when possible.
|
||||
|
@ -71,7 +72,9 @@ func (c *RemoteClient) Get() (*remote.Payload, error) {
|
|||
c.mu.Lock()
|
||||
defer c.mu.Unlock()
|
||||
|
||||
pair, _, err := c.Client.KV().Get(c.Path, nil)
|
||||
kv := c.Client.KV()
|
||||
|
||||
chunked, hash, chunks, pair, err := c.chunkedMode()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
@ -81,17 +84,36 @@ func (c *RemoteClient) Get() (*remote.Payload, error) {
|
|||
|
||||
c.modifyIndex = pair.ModifyIndex
|
||||
|
||||
payload := pair.Value
|
||||
var payload []byte
|
||||
if chunked {
|
||||
for _, c := range chunks {
|
||||
pair, _, err := kv.Get(c, nil)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if pair == nil {
|
||||
return nil, fmt.Errorf("Key %q could not be found", c)
|
||||
}
|
||||
payload = append(payload, pair.Value[:]...)
|
||||
}
|
||||
} else {
|
||||
payload = pair.Value
|
||||
}
|
||||
|
||||
// If the payload starts with 0x1f, it's gzip, not json
|
||||
if len(pair.Value) >= 1 && pair.Value[0] == '\x1f' {
|
||||
if data, err := uncompressState(pair.Value); err == nil {
|
||||
payload = data
|
||||
} else {
|
||||
if len(payload) >= 1 && payload[0] == '\x1f' {
|
||||
payload, err = uncompressState(payload)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
}
|
||||
|
||||
md5 := md5.Sum(pair.Value)
|
||||
md5 := md5.Sum(payload)
|
||||
|
||||
if hash != "" && fmt.Sprintf("%x", md5) != hash {
|
||||
return nil, fmt.Errorf("The remote state does not match the expected hash")
|
||||
}
|
||||
|
||||
return &remote.Payload{
|
||||
Data: payload,
|
||||
MD5: md5[:],
|
||||
|
@ -99,9 +121,65 @@ func (c *RemoteClient) Get() (*remote.Payload, error) {
|
|||
}
|
||||
|
||||
func (c *RemoteClient) Put(data []byte) error {
|
||||
// The state can be stored in 4 different ways, based on the payload size
|
||||
// and whether the user enabled gzip:
|
||||
// - single entry mode with plain JSON: a single JSON is stored at
|
||||
// "tfstate/my_project"
|
||||
// - single entry mode gzip: the JSON payload is first gziped and stored at
|
||||
// "tfstate/my_project"
|
||||
// - chunked mode with plain JSON: the JSON payload is split in pieces and
|
||||
// stored like so:
|
||||
// - "tfstate/my_project" -> a JSON payload that contains the path of
|
||||
// the chunks and an MD5 sum like so:
|
||||
// {
|
||||
// "current-hash": "abcdef1234",
|
||||
// "chunks": [
|
||||
// "tfstate/my_project/tfstate.abcdef1234/0",
|
||||
// "tfstate/my_project/tfstate.abcdef1234/1",
|
||||
// "tfstate/my_project/tfstate.abcdef1234/2",
|
||||
// ]
|
||||
// }
|
||||
// - "tfstate/my_project/tfstate.abcdef1234/0" -> The first chunk
|
||||
// - "tfstate/my_project/tfstate.abcdef1234/1" -> The next one
|
||||
// - ...
|
||||
// - chunked mode with gzip: the same system but we gziped the JSON payload
|
||||
// before splitting it in chunks
|
||||
//
|
||||
// When overwritting the current state, we need to clean the old chunks if
|
||||
// we were in chunked mode (no matter whether we need to use chunks for the
|
||||
// new one). To do so based on the 4 possibilities above we look at the
|
||||
// value at "tfstate/my_project" and if it is:
|
||||
// - absent then it's a new state and there will be nothing to cleanup,
|
||||
// - not a JSON payload we were in single entry mode with gzip so there will
|
||||
// be nothing to cleanup
|
||||
// - a JSON payload, then we were either single entry mode with plain JSON
|
||||
// or in chunked mode. To differentiate between the two we look whether a
|
||||
// "current-hash" key is present in the payload. If we find one we were
|
||||
// in chunked mode and we will need to remove the old chunks (whether or
|
||||
// not we were using gzip does not matter in that case).
|
||||
|
||||
c.mu.Lock()
|
||||
defer c.mu.Unlock()
|
||||
|
||||
kv := c.Client.KV()
|
||||
|
||||
// First we determine what mode we were using and to prepare the cleanup
|
||||
chunked, hash, _, _, err := c.chunkedMode()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
cleanupOldChunks := func() {}
|
||||
if chunked {
|
||||
cleanupOldChunks = func() {
|
||||
// We ignore all errors that can happen here because we already
|
||||
// saved the new state and there is no way to return a warning to
|
||||
// the user. We may end up with dangling chunks but there is no way
|
||||
// to be sure we won't.
|
||||
path := strings.TrimRight(c.Path, "/") + fmt.Sprintf("/tfstate.%s/", hash)
|
||||
kv.DeleteTree(path, nil)
|
||||
}
|
||||
}
|
||||
|
||||
payload := data
|
||||
if c.GZip {
|
||||
if compressedState, err := compressState(data); err == nil {
|
||||
|
@ -111,8 +189,6 @@ func (c *RemoteClient) Put(data []byte) error {
|
|||
}
|
||||
}
|
||||
|
||||
kv := c.Client.KV()
|
||||
|
||||
// default to doing a CAS
|
||||
verb := consulapi.KVCAS
|
||||
|
||||
|
@ -122,9 +198,44 @@ func (c *RemoteClient) Put(data []byte) error {
|
|||
verb = consulapi.KVSet
|
||||
}
|
||||
|
||||
// If the payload is too large we first write the chunks and replace it
|
||||
// 524288 is the default value, we just hope the user did not set a smaller
|
||||
// one but there is really no reason for them to do so, if they changed it
|
||||
// it is certainly to set a larger value.
|
||||
limit := 524288
|
||||
if len(payload) > limit {
|
||||
md5 := md5.Sum(data)
|
||||
chunks := split(payload, limit)
|
||||
chunkPaths := make([]string, 0)
|
||||
|
||||
// First we write the new chunks
|
||||
for i, p := range chunks {
|
||||
path := strings.TrimRight(c.Path, "/") + fmt.Sprintf("/tfstate.%x/%d", md5, i)
|
||||
chunkPaths = append(chunkPaths, path)
|
||||
_, err := kv.Put(&consulapi.KVPair{
|
||||
Key: path,
|
||||
Value: p,
|
||||
}, nil)
|
||||
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
||||
// We update the link to point to the new chunks
|
||||
payload, err = json.Marshal(map[string]interface{}{
|
||||
"current-hash": fmt.Sprintf("%x", md5),
|
||||
"chunks": chunkPaths,
|
||||
})
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
||||
var txOps consulapi.KVTxnOps
|
||||
// KV.Put doesn't return the new index, so we use a single operation
|
||||
// transaction to get the new index with a single request.
|
||||
txOps := consulapi.KVTxnOps{
|
||||
txOps = consulapi.KVTxnOps{
|
||||
&consulapi.KVTxnOp{
|
||||
Verb: verb,
|
||||
Key: c.Path,
|
||||
|
@ -137,7 +248,6 @@ func (c *RemoteClient) Put(data []byte) error {
|
|||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
// transaction was rolled back
|
||||
if !ok {
|
||||
return fmt.Errorf("consul CAS failed with transaction errors: %v", resp.Errors)
|
||||
|
@ -149,6 +259,10 @@ func (c *RemoteClient) Put(data []byte) error {
|
|||
}
|
||||
|
||||
c.modifyIndex = resp.Results[0].ModifyIndex
|
||||
|
||||
// We remove all the old chunks
|
||||
cleanupOldChunks()
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
|
@ -157,25 +271,44 @@ func (c *RemoteClient) Delete() error {
|
|||
defer c.mu.Unlock()
|
||||
|
||||
kv := c.Client.KV()
|
||||
_, err := kv.Delete(c.Path, nil)
|
||||
|
||||
chunked, hash, _, _, err := c.chunkedMode()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
_, err = kv.Delete(c.Path, nil)
|
||||
|
||||
// If there were chunks we need to remove them
|
||||
if chunked {
|
||||
path := strings.TrimRight(c.Path, "/") + fmt.Sprintf("/tfstate.%s/", hash)
|
||||
kv.DeleteTree(path, nil)
|
||||
}
|
||||
|
||||
return err
|
||||
}
|
||||
|
||||
func (c *RemoteClient) putLockInfo(info *state.LockInfo) error {
|
||||
func (c *RemoteClient) lockPath() string {
|
||||
// we sanitize the path for the lock as Consul does not like having
|
||||
// two consecutive slashes for the lock path
|
||||
return strings.TrimRight(c.Path, "/")
|
||||
}
|
||||
|
||||
func (c *RemoteClient) putLockInfo(info *statemgr.LockInfo) error {
|
||||
info.Path = c.Path
|
||||
info.Created = time.Now().UTC()
|
||||
|
||||
kv := c.Client.KV()
|
||||
_, err := kv.Put(&consulapi.KVPair{
|
||||
Key: c.Path + lockInfoSuffix,
|
||||
Key: c.lockPath() + lockInfoSuffix,
|
||||
Value: info.Marshal(),
|
||||
}, nil)
|
||||
|
||||
return err
|
||||
}
|
||||
|
||||
func (c *RemoteClient) getLockInfo() (*state.LockInfo, error) {
|
||||
path := c.Path + lockInfoSuffix
|
||||
func (c *RemoteClient) getLockInfo() (*statemgr.LockInfo, error) {
|
||||
path := c.lockPath() + lockInfoSuffix
|
||||
pair, _, err := c.Client.KV().Get(path, nil)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
|
@ -184,7 +317,7 @@ func (c *RemoteClient) getLockInfo() (*state.LockInfo, error) {
|
|||
return nil, nil
|
||||
}
|
||||
|
||||
li := &state.LockInfo{}
|
||||
li := &statemgr.LockInfo{}
|
||||
err = json.Unmarshal(pair.Value, li)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("error unmarshaling lock info: %s", err)
|
||||
|
@ -193,7 +326,7 @@ func (c *RemoteClient) getLockInfo() (*state.LockInfo, error) {
|
|||
return li, nil
|
||||
}
|
||||
|
||||
func (c *RemoteClient) Lock(info *state.LockInfo) (string, error) {
|
||||
func (c *RemoteClient) Lock(info *statemgr.LockInfo) (string, error) {
|
||||
c.mu.Lock()
|
||||
defer c.mu.Unlock()
|
||||
|
||||
|
@ -233,8 +366,13 @@ func (c *RemoteClient) lock() (string, error) {
|
|||
// store the session ID for correlation with consul logs
|
||||
c.info.Info = "consul session: " + lockSession
|
||||
|
||||
// A random lock ID has been generated but we override it with the session
|
||||
// ID as this will make it easier to manually invalidate the session
|
||||
// if needed.
|
||||
c.info.ID = lockSession
|
||||
|
||||
opts := &consulapi.LockOptions{
|
||||
Key: c.Path + lockSuffix,
|
||||
Key: c.lockPath() + lockSuffix,
|
||||
Session: lockSession,
|
||||
|
||||
// only wait briefly, so terraform has the choice to fail fast or
|
||||
|
@ -260,7 +398,7 @@ func (c *RemoteClient) lock() (string, error) {
|
|||
return "", err
|
||||
}
|
||||
|
||||
lockErr := &state.LockError{}
|
||||
lockErr := &statemgr.LockError{}
|
||||
|
||||
lockCh, err := c.consulLock.Lock(make(chan struct{}))
|
||||
if err != nil {
|
||||
|
@ -391,8 +529,25 @@ func (c *RemoteClient) Unlock(id string) error {
|
|||
// the unlock implementation.
|
||||
// Only to be called while holding Client.mu
|
||||
func (c *RemoteClient) unlock(id string) error {
|
||||
// this doesn't use the lock id, because the lock is tied to the consul client.
|
||||
// This method can be called in two circumstances:
|
||||
// - when the plan apply or destroy operation finishes and the lock needs to be released,
|
||||
// the watchdog stopped and the session closed
|
||||
// - when the user calls `terraform force-unlock <lock_id>` in which case
|
||||
// we only need to release the lock.
|
||||
|
||||
if c.consulLock == nil || c.lockCh == nil {
|
||||
// The user called `terraform force-unlock <lock_id>`, we just destroy
|
||||
// the session which will release the lock, clean the KV store and quit.
|
||||
|
||||
_, err := c.Client.Session().Destroy(id, nil)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
// We ignore the errors that may happen during cleanup
|
||||
kv := c.Client.KV()
|
||||
kv.Delete(c.lockPath()+lockSuffix, nil)
|
||||
kv.Delete(c.lockPath()+lockInfoSuffix, nil)
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
|
@ -419,7 +574,7 @@ func (c *RemoteClient) unlock(id string) error {
|
|||
|
||||
var errs error
|
||||
|
||||
if _, err := kv.Delete(c.Path+lockInfoSuffix, nil); err != nil {
|
||||
if _, err := kv.Delete(c.lockPath()+lockInfoSuffix, nil); err != nil {
|
||||
errs = multierror.Append(errs, err)
|
||||
}
|
||||
|
||||
|
@ -466,3 +621,42 @@ func uncompressState(data []byte) ([]byte, error) {
|
|||
}
|
||||
return b.Bytes(), nil
|
||||
}
|
||||
|
||||
func split(payload []byte, limit int) [][]byte {
|
||||
var chunk []byte
|
||||
chunks := make([][]byte, 0, len(payload)/limit+1)
|
||||
for len(payload) >= limit {
|
||||
chunk, payload = payload[:limit], payload[limit:]
|
||||
chunks = append(chunks, chunk)
|
||||
}
|
||||
if len(payload) > 0 {
|
||||
chunks = append(chunks, payload[:])
|
||||
}
|
||||
return chunks
|
||||
}
|
||||
|
||||
func (c *RemoteClient) chunkedMode() (bool, string, []string, *consulapi.KVPair, error) {
|
||||
kv := c.Client.KV()
|
||||
pair, _, err := kv.Get(c.Path, nil)
|
||||
if err != nil {
|
||||
return false, "", nil, pair, err
|
||||
}
|
||||
if pair != nil {
|
||||
var d map[string]interface{}
|
||||
err = json.Unmarshal(pair.Value, &d)
|
||||
// If there is an error when unmarshaling the payload, the state has
|
||||
// probably been gziped in single entry mode.
|
||||
if err == nil {
|
||||
// If we find the "current-hash" key we were in chunked mode
|
||||
hash, ok := d["current-hash"]
|
||||
if ok {
|
||||
chunks := make([]string, 0)
|
||||
for _, c := range d["chunks"].([]interface{}) {
|
||||
chunks = append(chunks, c.(string))
|
||||
}
|
||||
return true, hash.(string), chunks, pair, nil
|
||||
}
|
||||
}
|
||||
}
|
||||
return false, "", nil, pair, nil
|
||||
}
|
||||
|
|
|
@ -1,16 +1,21 @@
|
|||
package consul
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"context"
|
||||
"encoding/json"
|
||||
"fmt"
|
||||
"math/rand"
|
||||
"net"
|
||||
"reflect"
|
||||
"strings"
|
||||
"sync"
|
||||
"testing"
|
||||
"time"
|
||||
|
||||
"github.com/hashicorp/terraform/backend"
|
||||
"github.com/hashicorp/terraform/state"
|
||||
"github.com/hashicorp/terraform/state/remote"
|
||||
"github.com/hashicorp/terraform/states/remote"
|
||||
"github.com/hashicorp/terraform/states/statemgr"
|
||||
)
|
||||
|
||||
func TestRemoteClient_impl(t *testing.T) {
|
||||
|
@ -19,20 +24,29 @@ func TestRemoteClient_impl(t *testing.T) {
|
|||
}
|
||||
|
||||
func TestRemoteClient(t *testing.T) {
|
||||
// Get the backend
|
||||
b := backend.TestBackendConfig(t, New(), backend.TestWrapConfig(map[string]interface{}{
|
||||
"address": srv.HTTPAddr,
|
||||
"path": fmt.Sprintf("tf-unit/%s", time.Now().String()),
|
||||
}))
|
||||
|
||||
// Grab the client
|
||||
state, err := b.StateMgr(backend.DefaultStateName)
|
||||
if err != nil {
|
||||
t.Fatalf("err: %s", err)
|
||||
testCases := []string{
|
||||
fmt.Sprintf("tf-unit/%s", time.Now().String()),
|
||||
fmt.Sprintf("tf-unit/%s/", time.Now().String()),
|
||||
}
|
||||
|
||||
// Test
|
||||
remote.TestClient(t, state.(*remote.State).Client)
|
||||
for _, path := range testCases {
|
||||
t.Run(path, func(*testing.T) {
|
||||
// Get the backend
|
||||
b := backend.TestBackendConfig(t, New(), backend.TestWrapConfig(map[string]interface{}{
|
||||
"address": srv.HTTPAddr,
|
||||
"path": path,
|
||||
}))
|
||||
|
||||
// Grab the client
|
||||
state, err := b.StateMgr(backend.DefaultStateName)
|
||||
if err != nil {
|
||||
t.Fatalf("err: %s", err)
|
||||
}
|
||||
|
||||
// Test
|
||||
remote.TestClient(t, state.(*remote.State).Client)
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
// test the gzip functionality of the client
|
||||
|
@ -71,63 +85,247 @@ func TestRemoteClient_gzipUpgrade(t *testing.T) {
|
|||
remote.TestClient(t, state.(*remote.State).Client)
|
||||
}
|
||||
|
||||
func TestConsul_stateLock(t *testing.T) {
|
||||
path := fmt.Sprintf("tf-unit/%s", time.Now().String())
|
||||
// TestConsul_largeState tries to write a large payload using the Consul state
|
||||
// manager, as there is a limit to the size of the values in the KV store it
|
||||
// will need to be split up before being saved and put back together when read.
|
||||
func TestConsul_largeState(t *testing.T) {
|
||||
path := "tf-unit/test-large-state"
|
||||
|
||||
// create 2 instances to get 2 remote.Clients
|
||||
sA, err := backend.TestBackendConfig(t, New(), backend.TestWrapConfig(map[string]interface{}{
|
||||
"address": srv.HTTPAddr,
|
||||
"path": path,
|
||||
})).StateMgr(backend.DefaultStateName)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
sB, err := backend.TestBackendConfig(t, New(), backend.TestWrapConfig(map[string]interface{}{
|
||||
"address": srv.HTTPAddr,
|
||||
"path": path,
|
||||
})).StateMgr(backend.DefaultStateName)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
remote.TestRemoteLocks(t, sA.(*remote.State).Client, sB.(*remote.State).Client)
|
||||
}
|
||||
|
||||
func TestConsul_destroyLock(t *testing.T) {
|
||||
// Get the backend
|
||||
b := backend.TestBackendConfig(t, New(), backend.TestWrapConfig(map[string]interface{}{
|
||||
"address": srv.HTTPAddr,
|
||||
"path": fmt.Sprintf("tf-unit/%s", time.Now().String()),
|
||||
"path": path,
|
||||
}))
|
||||
|
||||
// Grab the client
|
||||
s, err := b.StateMgr(backend.DefaultStateName)
|
||||
if err != nil {
|
||||
t.Fatalf("err: %s", err)
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
c := s.(*remote.State).Client.(*RemoteClient)
|
||||
c.Path = path
|
||||
|
||||
info := state.NewLockInfo()
|
||||
id, err := c.Lock(info)
|
||||
// testPaths fails the test if the keys found at the prefix don't match
|
||||
// what is expected
|
||||
testPaths := func(t *testing.T, expected []string) {
|
||||
kv := c.Client.KV()
|
||||
pairs, _, err := kv.List(c.Path, nil)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
res := make([]string, 0)
|
||||
for _, p := range pairs {
|
||||
res = append(res, p.Key)
|
||||
}
|
||||
if !reflect.DeepEqual(res, expected) {
|
||||
t.Fatalf("Wrong keys: %#v", res)
|
||||
}
|
||||
}
|
||||
|
||||
testPayload := func(t *testing.T, data map[string]string, keys []string) {
|
||||
payload, err := json.Marshal(data)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
err = c.Put(payload)
|
||||
if err != nil {
|
||||
t.Fatal("could not put payload", err)
|
||||
}
|
||||
|
||||
remote, err := c.Get()
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
// md5 := md5.Sum(payload)
|
||||
// if !bytes.Equal(md5[:], remote.MD5) {
|
||||
// t.Fatal("the md5 sums do not match")
|
||||
// }
|
||||
|
||||
if !bytes.Equal(payload, remote.Data) {
|
||||
t.Fatal("the data do not match")
|
||||
}
|
||||
|
||||
testPaths(t, keys)
|
||||
}
|
||||
|
||||
// The default limit for the size of the value in Consul is 524288 bytes
|
||||
testPayload(
|
||||
t,
|
||||
map[string]string{
|
||||
"foo": strings.Repeat("a", 524288+2),
|
||||
},
|
||||
[]string{
|
||||
"tf-unit/test-large-state",
|
||||
"tf-unit/test-large-state/tfstate.2cb96f52c9fff8e0b56cb786ec4d2bed/0",
|
||||
"tf-unit/test-large-state/tfstate.2cb96f52c9fff8e0b56cb786ec4d2bed/1",
|
||||
},
|
||||
)
|
||||
|
||||
// We try to replace the payload with a small one, the old chunks should be removed
|
||||
testPayload(
|
||||
t,
|
||||
map[string]string{"var": "a"},
|
||||
[]string{"tf-unit/test-large-state"},
|
||||
)
|
||||
|
||||
// Test with gzip and chunks
|
||||
b = backend.TestBackendConfig(t, New(), backend.TestWrapConfig(map[string]interface{}{
|
||||
"address": srv.HTTPAddr,
|
||||
"path": path,
|
||||
"gzip": true,
|
||||
}))
|
||||
|
||||
s, err = b.StateMgr(backend.DefaultStateName)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
lockPath := c.Path + lockSuffix
|
||||
c = s.(*remote.State).Client.(*RemoteClient)
|
||||
c.Path = path
|
||||
|
||||
if err := c.Unlock(id); err != nil {
|
||||
t.Fatal(err)
|
||||
// We need a long random string so it results in multiple chunks even after
|
||||
// being gziped
|
||||
|
||||
// We use a fixed seed so the test can be reproductible
|
||||
rand.Seed(1234)
|
||||
RandStringRunes := func(n int) string {
|
||||
var letterRunes = []rune("abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ")
|
||||
b := make([]rune, n)
|
||||
for i := range b {
|
||||
b[i] = letterRunes[rand.Intn(len(letterRunes))]
|
||||
}
|
||||
return string(b)
|
||||
}
|
||||
|
||||
// get the lock val
|
||||
pair, _, err := c.Client.KV().Get(lockPath, nil)
|
||||
testPayload(
|
||||
t,
|
||||
map[string]string{
|
||||
"bar": RandStringRunes(5 * (524288 + 2)),
|
||||
},
|
||||
[]string{
|
||||
"tf-unit/test-large-state",
|
||||
"tf-unit/test-large-state/tfstate.58e8160335864b520b1cc7f2222a4019/0",
|
||||
"tf-unit/test-large-state/tfstate.58e8160335864b520b1cc7f2222a4019/1",
|
||||
"tf-unit/test-large-state/tfstate.58e8160335864b520b1cc7f2222a4019/2",
|
||||
"tf-unit/test-large-state/tfstate.58e8160335864b520b1cc7f2222a4019/3",
|
||||
},
|
||||
)
|
||||
|
||||
// Deleting the state should remove all chunks
|
||||
err = c.Delete()
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
if pair != nil {
|
||||
t.Fatalf("lock key not cleaned up at: %s", pair.Key)
|
||||
testPaths(t, []string{})
|
||||
}
|
||||
|
||||
func TestConsul_stateLock(t *testing.T) {
|
||||
testCases := []string{
|
||||
fmt.Sprintf("tf-unit/%s", time.Now().String()),
|
||||
fmt.Sprintf("tf-unit/%s/", time.Now().String()),
|
||||
}
|
||||
|
||||
for _, path := range testCases {
|
||||
t.Run(path, func(*testing.T) {
|
||||
// create 2 instances to get 2 remote.Clients
|
||||
sA, err := backend.TestBackendConfig(t, New(), backend.TestWrapConfig(map[string]interface{}{
|
||||
"address": srv.HTTPAddr,
|
||||
"path": path,
|
||||
})).StateMgr(backend.DefaultStateName)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
sB, err := backend.TestBackendConfig(t, New(), backend.TestWrapConfig(map[string]interface{}{
|
||||
"address": srv.HTTPAddr,
|
||||
"path": path,
|
||||
})).StateMgr(backend.DefaultStateName)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
remote.TestRemoteLocks(t, sA.(*remote.State).Client, sB.(*remote.State).Client)
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
func TestConsul_destroyLock(t *testing.T) {
|
||||
testCases := []string{
|
||||
fmt.Sprintf("tf-unit/%s", time.Now().String()),
|
||||
fmt.Sprintf("tf-unit/%s/", time.Now().String()),
|
||||
}
|
||||
|
||||
testLock := func(client *RemoteClient, lockPath string) {
|
||||
// get the lock val
|
||||
pair, _, err := client.Client.KV().Get(lockPath, nil)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
if pair != nil {
|
||||
t.Fatalf("lock key not cleaned up at: %s", pair.Key)
|
||||
}
|
||||
}
|
||||
|
||||
for _, path := range testCases {
|
||||
t.Run(path, func(*testing.T) {
|
||||
// Get the backend
|
||||
b := backend.TestBackendConfig(t, New(), backend.TestWrapConfig(map[string]interface{}{
|
||||
"address": srv.HTTPAddr,
|
||||
"path": path,
|
||||
}))
|
||||
|
||||
// Grab the client
|
||||
s, err := b.StateMgr(backend.DefaultStateName)
|
||||
if err != nil {
|
||||
t.Fatalf("err: %s", err)
|
||||
}
|
||||
|
||||
clientA := s.(*remote.State).Client.(*RemoteClient)
|
||||
|
||||
info := statemgr.NewLockInfo()
|
||||
id, err := clientA.Lock(info)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
lockPath := clientA.Path + lockSuffix
|
||||
|
||||
if err := clientA.Unlock(id); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
testLock(clientA, lockPath)
|
||||
|
||||
// The release the lock from a second client to test the
|
||||
// `terraform force-unlock <lock_id>` functionnality
|
||||
s, err = b.StateMgr(backend.DefaultStateName)
|
||||
if err != nil {
|
||||
t.Fatalf("err: %s", err)
|
||||
}
|
||||
|
||||
clientB := s.(*remote.State).Client.(*RemoteClient)
|
||||
|
||||
info = statemgr.NewLockInfo()
|
||||
id, err = clientA.Lock(info)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
if err := clientB.Unlock(id); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
testLock(clientA, lockPath)
|
||||
|
||||
err = clientA.Unlock(id)
|
||||
|
||||
if err == nil {
|
||||
t.Fatal("consul lock should have been lost")
|
||||
}
|
||||
if err.Error() != "consul lock was lost" {
|
||||
t.Fatal("got wrong error", err)
|
||||
}
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -151,7 +349,7 @@ func TestConsul_lostLock(t *testing.T) {
|
|||
t.Fatal(err)
|
||||
}
|
||||
|
||||
info := state.NewLockInfo()
|
||||
info := statemgr.NewLockInfo()
|
||||
info.Operation = "test-lost-lock"
|
||||
id, err := sA.Lock(info)
|
||||
if err != nil {
|
||||
|
@ -200,7 +398,7 @@ func TestConsul_lostLockConnection(t *testing.T) {
|
|||
t.Fatal(err)
|
||||
}
|
||||
|
||||
info := state.NewLockInfo()
|
||||
info := statemgr.NewLockInfo()
|
||||
info.Operation = "test-lost-lock-connection"
|
||||
id, err := s.Lock(info)
|
||||
if err != nil {
|
||||
|
|
|
@ -0,0 +1,169 @@
|
|||
package cos
|
||||
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
"net/http"
|
||||
"net/url"
|
||||
"strings"
|
||||
"time"
|
||||
|
||||
"github.com/hashicorp/terraform/backend"
|
||||
"github.com/hashicorp/terraform/helper/schema"
|
||||
"github.com/tencentcloud/tencentcloud-sdk-go/tencentcloud/common"
|
||||
"github.com/tencentcloud/tencentcloud-sdk-go/tencentcloud/common/profile"
|
||||
tag "github.com/tencentcloud/tencentcloud-sdk-go/tencentcloud/tag/v20180813"
|
||||
"github.com/tencentyun/cos-go-sdk-v5"
|
||||
)
|
||||
|
||||
// Default value from environment variable
|
||||
const (
|
||||
PROVIDER_SECRET_ID = "TENCENTCLOUD_SECRET_ID"
|
||||
PROVIDER_SECRET_KEY = "TENCENTCLOUD_SECRET_KEY"
|
||||
PROVIDER_REGION = "TENCENTCLOUD_REGION"
|
||||
)
|
||||
|
||||
// Backend implements "backend".Backend for tencentCloud cos
|
||||
type Backend struct {
|
||||
*schema.Backend
|
||||
|
||||
cosContext context.Context
|
||||
cosClient *cos.Client
|
||||
tagClient *tag.Client
|
||||
|
||||
region string
|
||||
bucket string
|
||||
prefix string
|
||||
key string
|
||||
encrypt bool
|
||||
acl string
|
||||
}
|
||||
|
||||
// New creates a new backend for TencentCloud cos remote state.
|
||||
func New() backend.Backend {
|
||||
s := &schema.Backend{
|
||||
Schema: map[string]*schema.Schema{
|
||||
"secret_id": {
|
||||
Type: schema.TypeString,
|
||||
Required: true,
|
||||
DefaultFunc: schema.EnvDefaultFunc(PROVIDER_SECRET_ID, nil),
|
||||
Description: "Secret id of Tencent Cloud",
|
||||
},
|
||||
"secret_key": {
|
||||
Type: schema.TypeString,
|
||||
Required: true,
|
||||
DefaultFunc: schema.EnvDefaultFunc(PROVIDER_SECRET_KEY, nil),
|
||||
Description: "Secret key of Tencent Cloud",
|
||||
Sensitive: true,
|
||||
},
|
||||
"region": {
|
||||
Type: schema.TypeString,
|
||||
Required: true,
|
||||
DefaultFunc: schema.EnvDefaultFunc(PROVIDER_REGION, nil),
|
||||
Description: "The region of the COS bucket",
|
||||
InputDefault: "ap-guangzhou",
|
||||
},
|
||||
"bucket": {
|
||||
Type: schema.TypeString,
|
||||
Required: true,
|
||||
Description: "The name of the COS bucket",
|
||||
},
|
||||
"prefix": {
|
||||
Type: schema.TypeString,
|
||||
Optional: true,
|
||||
Description: "The directory for saving the state file in bucket",
|
||||
ValidateFunc: func(v interface{}, s string) ([]string, []error) {
|
||||
prefix := v.(string)
|
||||
if strings.HasPrefix(prefix, "/") || strings.HasPrefix(prefix, "./") {
|
||||
return nil, []error{fmt.Errorf("prefix must not start with '/' or './'")}
|
||||
}
|
||||
return nil, nil
|
||||
},
|
||||
},
|
||||
"key": {
|
||||
Type: schema.TypeString,
|
||||
Optional: true,
|
||||
Description: "The path for saving the state file in bucket",
|
||||
Default: "terraform.tfstate",
|
||||
ValidateFunc: func(v interface{}, s string) ([]string, []error) {
|
||||
if strings.HasPrefix(v.(string), "/") || strings.HasSuffix(v.(string), "/") {
|
||||
return nil, []error{fmt.Errorf("key can not start and end with '/'")}
|
||||
}
|
||||
return nil, nil
|
||||
},
|
||||
},
|
||||
"encrypt": {
|
||||
Type: schema.TypeBool,
|
||||
Optional: true,
|
||||
Description: "Whether to enable server side encryption of the state file",
|
||||
Default: true,
|
||||
},
|
||||
"acl": {
|
||||
Type: schema.TypeString,
|
||||
Optional: true,
|
||||
Description: "Object ACL to be applied to the state file",
|
||||
Default: "private",
|
||||
ValidateFunc: func(v interface{}, s string) ([]string, []error) {
|
||||
value := v.(string)
|
||||
if value != "private" && value != "public-read" {
|
||||
return nil, []error{fmt.Errorf(
|
||||
"acl value invalid, expected %s or %s, got %s",
|
||||
"private", "public-read", value)}
|
||||
}
|
||||
return nil, nil
|
||||
},
|
||||
},
|
||||
},
|
||||
}
|
||||
|
||||
result := &Backend{Backend: s}
|
||||
result.Backend.ConfigureFunc = result.configure
|
||||
|
||||
return result
|
||||
}
|
||||
|
||||
// configure init cos client
|
||||
func (b *Backend) configure(ctx context.Context) error {
|
||||
if b.cosClient != nil {
|
||||
return nil
|
||||
}
|
||||
|
||||
b.cosContext = ctx
|
||||
data := schema.FromContextBackendConfig(b.cosContext)
|
||||
|
||||
b.region = data.Get("region").(string)
|
||||
b.bucket = data.Get("bucket").(string)
|
||||
b.prefix = data.Get("prefix").(string)
|
||||
b.key = data.Get("key").(string)
|
||||
b.encrypt = data.Get("encrypt").(bool)
|
||||
b.acl = data.Get("acl").(string)
|
||||
|
||||
u, err := url.Parse(fmt.Sprintf("https://%s.cos.%s.myqcloud.com", b.bucket, b.region))
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
b.cosClient = cos.NewClient(
|
||||
&cos.BaseURL{BucketURL: u},
|
||||
&http.Client{
|
||||
Timeout: 60 * time.Second,
|
||||
Transport: &cos.AuthorizationTransport{
|
||||
SecretID: data.Get("secret_id").(string),
|
||||
SecretKey: data.Get("secret_key").(string),
|
||||
},
|
||||
},
|
||||
)
|
||||
|
||||
credential := common.NewCredential(
|
||||
data.Get("secret_id").(string),
|
||||
data.Get("secret_key").(string),
|
||||
)
|
||||
|
||||
cpf := profile.NewClientProfile()
|
||||
cpf.HttpProfile.ReqMethod = "POST"
|
||||
cpf.HttpProfile.ReqTimeout = 300
|
||||
cpf.Language = "en-US"
|
||||
b.tagClient, err = tag.NewClient(credential, b.region, cpf)
|
||||
|
||||
return err
|
||||
}
|
|
@ -0,0 +1,178 @@
|
|||
package cos
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"log"
|
||||
"path"
|
||||
"sort"
|
||||
"strings"
|
||||
|
||||
"github.com/hashicorp/terraform/backend"
|
||||
"github.com/hashicorp/terraform/states"
|
||||
"github.com/hashicorp/terraform/states/remote"
|
||||
"github.com/hashicorp/terraform/states/statemgr"
|
||||
"github.com/likexian/gokit/assert"
|
||||
)
|
||||
|
||||
// Define file suffix
|
||||
const (
|
||||
stateFileSuffix = ".tfstate"
|
||||
lockFileSuffix = ".tflock"
|
||||
)
|
||||
|
||||
// Workspaces returns a list of names for the workspaces
|
||||
func (b *Backend) Workspaces() ([]string, error) {
|
||||
c, err := b.client("tencentcloud")
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
obs, err := c.getBucket(b.prefix)
|
||||
log.Printf("[DEBUG] list all workspaces, objects: %v, error: %v", obs, err)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
ws := []string{backend.DefaultStateName}
|
||||
for _, vv := range obs {
|
||||
// <name>.tfstate
|
||||
if !strings.HasSuffix(vv.Key, stateFileSuffix) {
|
||||
continue
|
||||
}
|
||||
// default worksapce
|
||||
if path.Join(b.prefix, b.key) == vv.Key {
|
||||
continue
|
||||
}
|
||||
// <prefix>/<worksapce>/<key>
|
||||
prefix := strings.TrimRight(b.prefix, "/") + "/"
|
||||
parts := strings.Split(strings.TrimPrefix(vv.Key, prefix), "/")
|
||||
if len(parts) > 0 && parts[0] != "" {
|
||||
ws = append(ws, parts[0])
|
||||
}
|
||||
}
|
||||
|
||||
sort.Strings(ws[1:])
|
||||
log.Printf("[DEBUG] list all workspaces, workspaces: %v", ws)
|
||||
|
||||
return ws, nil
|
||||
}
|
||||
|
||||
// DeleteWorkspace deletes the named workspaces. The "default" state cannot be deleted.
|
||||
func (b *Backend) DeleteWorkspace(name string) error {
|
||||
log.Printf("[DEBUG] delete workspace, workspace: %v", name)
|
||||
|
||||
if name == backend.DefaultStateName || name == "" {
|
||||
return fmt.Errorf("default state is not allow to delete")
|
||||
}
|
||||
|
||||
c, err := b.client(name)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
return c.Delete()
|
||||
}
|
||||
|
||||
// StateMgr manage the state, if the named state not exists, a new file will created
|
||||
func (b *Backend) StateMgr(name string) (statemgr.Full, error) {
|
||||
log.Printf("[DEBUG] state manager, current workspace: %v", name)
|
||||
|
||||
c, err := b.client(name)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
stateMgr := &remote.State{Client: c}
|
||||
|
||||
ws, err := b.Workspaces()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
if !assert.IsContains(ws, name) {
|
||||
log.Printf("[DEBUG] workspace %v not exists", name)
|
||||
|
||||
// take a lock on this state while we write it
|
||||
lockInfo := statemgr.NewLockInfo()
|
||||
lockInfo.Operation = "init"
|
||||
lockId, err := c.Lock(lockInfo)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("Failed to lock cos state: %s", err)
|
||||
}
|
||||
|
||||
// Local helper function so we can call it multiple places
|
||||
lockUnlock := func(e error) error {
|
||||
if err := stateMgr.Unlock(lockId); err != nil {
|
||||
return fmt.Errorf(unlockErrMsg, err, lockId)
|
||||
}
|
||||
return e
|
||||
}
|
||||
|
||||
// Grab the value
|
||||
if err := stateMgr.RefreshState(); err != nil {
|
||||
err = lockUnlock(err)
|
||||
return nil, err
|
||||
}
|
||||
|
||||
// If we have no state, we have to create an empty state
|
||||
if v := stateMgr.State(); v == nil {
|
||||
if err := stateMgr.WriteState(states.NewState()); err != nil {
|
||||
err = lockUnlock(err)
|
||||
return nil, err
|
||||
}
|
||||
if err := stateMgr.PersistState(); err != nil {
|
||||
err = lockUnlock(err)
|
||||
return nil, err
|
||||
}
|
||||
}
|
||||
|
||||
// Unlock, the state should now be initialized
|
||||
if err := lockUnlock(nil); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
}
|
||||
|
||||
return stateMgr, nil
|
||||
}
|
||||
|
||||
// client returns a remoteClient for the named state.
|
||||
func (b *Backend) client(name string) (*remoteClient, error) {
|
||||
if strings.TrimSpace(name) == "" {
|
||||
return nil, fmt.Errorf("state name not allow to be empty")
|
||||
}
|
||||
|
||||
return &remoteClient{
|
||||
cosContext: b.cosContext,
|
||||
cosClient: b.cosClient,
|
||||
tagClient: b.tagClient,
|
||||
bucket: b.bucket,
|
||||
stateFile: b.stateFile(name),
|
||||
lockFile: b.lockFile(name),
|
||||
encrypt: b.encrypt,
|
||||
acl: b.acl,
|
||||
}, nil
|
||||
}
|
||||
|
||||
// stateFile returns state file path by name
|
||||
func (b *Backend) stateFile(name string) string {
|
||||
if name == backend.DefaultStateName {
|
||||
return path.Join(b.prefix, b.key)
|
||||
}
|
||||
return path.Join(b.prefix, name, b.key)
|
||||
}
|
||||
|
||||
// lockFile returns lock file path by name
|
||||
func (b *Backend) lockFile(name string) string {
|
||||
return b.stateFile(name) + lockFileSuffix
|
||||
}
|
||||
|
||||
// unlockErrMsg is error msg for unlock failed
|
||||
const unlockErrMsg = `
|
||||
Unlocking the state file on TencentCloud cos backend failed:
|
||||
|
||||
Error message: %v
|
||||
Lock ID (gen): %s
|
||||
|
||||
You may have to force-unlock this state in order to use it again.
|
||||
The TencentCloud backend acquires a lock during initialization
|
||||
to ensure the initial state file is created.
|
||||
`
|
|
@ -0,0 +1,227 @@
|
|||
package cos
|
||||
|
||||
import (
|
||||
"crypto/md5"
|
||||
"fmt"
|
||||
"os"
|
||||
"testing"
|
||||
"time"
|
||||
|
||||
"github.com/hashicorp/terraform/backend"
|
||||
"github.com/hashicorp/terraform/states/remote"
|
||||
"github.com/likexian/gokit/assert"
|
||||
)
|
||||
|
||||
const (
|
||||
defaultPrefix = ""
|
||||
defaultKey = "terraform.tfstate"
|
||||
)
|
||||
|
||||
// Testing Thanks to GCS
|
||||
|
||||
func TestStateFile(t *testing.T) {
|
||||
t.Parallel()
|
||||
|
||||
cases := []struct {
|
||||
prefix string
|
||||
stateName string
|
||||
key string
|
||||
wantStateFile string
|
||||
wantLockFile string
|
||||
}{
|
||||
{"", "default", "default.tfstate", "default.tfstate", "default.tfstate.tflock"},
|
||||
{"", "default", "test.tfstate", "test.tfstate", "test.tfstate.tflock"},
|
||||
{"", "dev", "test.tfstate", "dev/test.tfstate", "dev/test.tfstate.tflock"},
|
||||
{"terraform/test", "default", "default.tfstate", "terraform/test/default.tfstate", "terraform/test/default.tfstate.tflock"},
|
||||
{"terraform/test", "default", "test.tfstate", "terraform/test/test.tfstate", "terraform/test/test.tfstate.tflock"},
|
||||
{"terraform/test", "dev", "test.tfstate", "terraform/test/dev/test.tfstate", "terraform/test/dev/test.tfstate.tflock"},
|
||||
}
|
||||
|
||||
for _, c := range cases {
|
||||
b := &Backend{
|
||||
prefix: c.prefix,
|
||||
key: c.key,
|
||||
}
|
||||
assert.Equal(t, b.stateFile(c.stateName), c.wantStateFile)
|
||||
assert.Equal(t, b.lockFile(c.stateName), c.wantLockFile)
|
||||
}
|
||||
}
|
||||
|
||||
func TestRemoteClient(t *testing.T) {
|
||||
t.Parallel()
|
||||
|
||||
bucket := bucketName(t)
|
||||
|
||||
be := setupBackend(t, bucket, defaultPrefix, defaultKey, false)
|
||||
defer teardownBackend(t, be)
|
||||
|
||||
ss, err := be.StateMgr(backend.DefaultStateName)
|
||||
assert.Nil(t, err)
|
||||
|
||||
rs, ok := ss.(*remote.State)
|
||||
assert.True(t, ok)
|
||||
|
||||
remote.TestClient(t, rs.Client)
|
||||
}
|
||||
|
||||
func TestRemoteClientWithPrefix(t *testing.T) {
|
||||
t.Parallel()
|
||||
|
||||
prefix := "prefix/test"
|
||||
bucket := bucketName(t)
|
||||
|
||||
be := setupBackend(t, bucket, prefix, defaultKey, false)
|
||||
defer teardownBackend(t, be)
|
||||
|
||||
ss, err := be.StateMgr(backend.DefaultStateName)
|
||||
assert.Nil(t, err)
|
||||
|
||||
rs, ok := ss.(*remote.State)
|
||||
assert.True(t, ok)
|
||||
|
||||
remote.TestClient(t, rs.Client)
|
||||
}
|
||||
|
||||
func TestRemoteClientWithEncryption(t *testing.T) {
|
||||
t.Parallel()
|
||||
|
||||
bucket := bucketName(t)
|
||||
|
||||
be := setupBackend(t, bucket, defaultPrefix, defaultKey, true)
|
||||
defer teardownBackend(t, be)
|
||||
|
||||
ss, err := be.StateMgr(backend.DefaultStateName)
|
||||
assert.Nil(t, err)
|
||||
|
||||
rs, ok := ss.(*remote.State)
|
||||
assert.True(t, ok)
|
||||
|
||||
remote.TestClient(t, rs.Client)
|
||||
}
|
||||
|
||||
func TestRemoteLocks(t *testing.T) {
|
||||
t.Parallel()
|
||||
|
||||
bucket := bucketName(t)
|
||||
|
||||
be := setupBackend(t, bucket, defaultPrefix, defaultKey, false)
|
||||
defer teardownBackend(t, be)
|
||||
|
||||
remoteClient := func() (remote.Client, error) {
|
||||
ss, err := be.StateMgr(backend.DefaultStateName)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
rs, ok := ss.(*remote.State)
|
||||
if !ok {
|
||||
return nil, fmt.Errorf("be.StateMgr(): got a %T, want a *remote.State", ss)
|
||||
}
|
||||
|
||||
return rs.Client, nil
|
||||
}
|
||||
|
||||
c0, err := remoteClient()
|
||||
assert.Nil(t, err)
|
||||
|
||||
c1, err := remoteClient()
|
||||
assert.Nil(t, err)
|
||||
|
||||
remote.TestRemoteLocks(t, c0, c1)
|
||||
}
|
||||
|
||||
func TestBackend(t *testing.T) {
|
||||
t.Parallel()
|
||||
|
||||
bucket := bucketName(t)
|
||||
|
||||
be0 := setupBackend(t, bucket, defaultPrefix, defaultKey, false)
|
||||
defer teardownBackend(t, be0)
|
||||
|
||||
be1 := setupBackend(t, bucket, defaultPrefix, defaultKey, false)
|
||||
defer teardownBackend(t, be1)
|
||||
|
||||
backend.TestBackendStates(t, be0)
|
||||
backend.TestBackendStateLocks(t, be0, be1)
|
||||
backend.TestBackendStateForceUnlock(t, be0, be1)
|
||||
}
|
||||
|
||||
func TestBackendWithPrefix(t *testing.T) {
|
||||
t.Parallel()
|
||||
|
||||
prefix := "prefix/test"
|
||||
bucket := bucketName(t)
|
||||
|
||||
be0 := setupBackend(t, bucket, prefix, defaultKey, false)
|
||||
defer teardownBackend(t, be0)
|
||||
|
||||
be1 := setupBackend(t, bucket, prefix+"/", defaultKey, false)
|
||||
defer teardownBackend(t, be1)
|
||||
|
||||
backend.TestBackendStates(t, be0)
|
||||
backend.TestBackendStateLocks(t, be0, be1)
|
||||
}
|
||||
|
||||
func TestBackendWithEncryption(t *testing.T) {
|
||||
t.Parallel()
|
||||
|
||||
bucket := bucketName(t)
|
||||
|
||||
be0 := setupBackend(t, bucket, defaultPrefix, defaultKey, true)
|
||||
defer teardownBackend(t, be0)
|
||||
|
||||
be1 := setupBackend(t, bucket, defaultPrefix, defaultKey, true)
|
||||
defer teardownBackend(t, be1)
|
||||
|
||||
backend.TestBackendStates(t, be0)
|
||||
backend.TestBackendStateLocks(t, be0, be1)
|
||||
}
|
||||
|
||||
func setupBackend(t *testing.T, bucket, prefix, key string, encrypt bool) backend.Backend {
|
||||
t.Helper()
|
||||
|
||||
skip := os.Getenv("TF_COS_APPID") == ""
|
||||
if skip {
|
||||
t.Skip("This test require setting TF_COS_APPID environment variables")
|
||||
}
|
||||
|
||||
if os.Getenv(PROVIDER_REGION) == "" {
|
||||
os.Setenv(PROVIDER_REGION, "ap-guangzhou")
|
||||
}
|
||||
|
||||
appId := os.Getenv("TF_COS_APPID")
|
||||
region := os.Getenv(PROVIDER_REGION)
|
||||
|
||||
config := map[string]interface{}{
|
||||
"region": region,
|
||||
"bucket": bucket + appId,
|
||||
"prefix": prefix,
|
||||
"key": key,
|
||||
}
|
||||
|
||||
b := backend.TestBackendConfig(t, New(), backend.TestWrapConfig(config))
|
||||
be := b.(*Backend)
|
||||
|
||||
c, err := be.client("tencentcloud")
|
||||
assert.Nil(t, err)
|
||||
|
||||
err = c.putBucket()
|
||||
assert.Nil(t, err)
|
||||
|
||||
return b
|
||||
}
|
||||
|
||||
func teardownBackend(t *testing.T, b backend.Backend) {
|
||||
t.Helper()
|
||||
|
||||
c, err := b.(*Backend).client("tencentcloud")
|
||||
assert.Nil(t, err)
|
||||
|
||||
err = c.deleteBucket(true)
|
||||
assert.Nil(t, err)
|
||||
}
|
||||
|
||||
func bucketName(t *testing.T) string {
|
||||
unique := fmt.Sprintf("%s-%x", t.Name(), time.Now().UnixNano())
|
||||
return fmt.Sprintf("terraform-test-%s-%s", fmt.Sprintf("%x", md5.Sum([]byte(unique)))[:10], "")
|
||||
}
|
|
@ -0,0 +1,403 @@
|
|||
package cos
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"context"
|
||||
"crypto/md5"
|
||||
"encoding/json"
|
||||
"fmt"
|
||||
"io/ioutil"
|
||||
"log"
|
||||
"net/http"
|
||||
"strings"
|
||||
"time"
|
||||
|
||||
multierror "github.com/hashicorp/go-multierror"
|
||||
"github.com/hashicorp/terraform/states/remote"
|
||||
"github.com/hashicorp/terraform/states/statemgr"
|
||||
tag "github.com/tencentcloud/tencentcloud-sdk-go/tencentcloud/tag/v20180813"
|
||||
"github.com/tencentyun/cos-go-sdk-v5"
|
||||
)
|
||||
|
||||
const (
|
||||
lockTagKey = "tencentcloud-terraform-lock"
|
||||
)
|
||||
|
||||
// RemoteClient implements the client of remote state
|
||||
type remoteClient struct {
|
||||
cosContext context.Context
|
||||
cosClient *cos.Client
|
||||
tagClient *tag.Client
|
||||
|
||||
bucket string
|
||||
stateFile string
|
||||
lockFile string
|
||||
encrypt bool
|
||||
acl string
|
||||
}
|
||||
|
||||
// Get returns remote state file
|
||||
func (c *remoteClient) Get() (*remote.Payload, error) {
|
||||
log.Printf("[DEBUG] get remote state file %s", c.stateFile)
|
||||
|
||||
exists, data, checksum, err := c.getObject(c.stateFile)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
if !exists {
|
||||
return nil, nil
|
||||
}
|
||||
|
||||
payload := &remote.Payload{
|
||||
Data: data,
|
||||
MD5: []byte(checksum),
|
||||
}
|
||||
|
||||
return payload, nil
|
||||
}
|
||||
|
||||
// Put put state file to remote
|
||||
func (c *remoteClient) Put(data []byte) error {
|
||||
log.Printf("[DEBUG] put remote state file %s", c.stateFile)
|
||||
|
||||
return c.putObject(c.stateFile, data)
|
||||
}
|
||||
|
||||
// Delete delete remote state file
|
||||
func (c *remoteClient) Delete() error {
|
||||
log.Printf("[DEBUG] delete remote state file %s", c.stateFile)
|
||||
|
||||
return c.deleteObject(c.stateFile)
|
||||
}
|
||||
|
||||
// Lock lock remote state file for writing
|
||||
func (c *remoteClient) Lock(info *statemgr.LockInfo) (string, error) {
|
||||
log.Printf("[DEBUG] lock remote state file %s", c.lockFile)
|
||||
|
||||
err := c.cosLock(c.bucket, c.lockFile)
|
||||
if err != nil {
|
||||
return "", c.lockError(err)
|
||||
}
|
||||
defer c.cosUnlock(c.bucket, c.lockFile)
|
||||
|
||||
exists, _, _, err := c.getObject(c.lockFile)
|
||||
if err != nil {
|
||||
return "", c.lockError(err)
|
||||
}
|
||||
|
||||
if exists {
|
||||
return "", c.lockError(fmt.Errorf("lock file %s exists", c.lockFile))
|
||||
}
|
||||
|
||||
info.Path = c.lockFile
|
||||
data, err := json.Marshal(info)
|
||||
if err != nil {
|
||||
return "", c.lockError(err)
|
||||
}
|
||||
|
||||
check := fmt.Sprintf("%x", md5.Sum(data))
|
||||
err = c.putObject(c.lockFile, data)
|
||||
if err != nil {
|
||||
return "", c.lockError(err)
|
||||
}
|
||||
|
||||
return check, nil
|
||||
}
|
||||
|
||||
// Unlock unlock remote state file
|
||||
func (c *remoteClient) Unlock(check string) error {
|
||||
log.Printf("[DEBUG] unlock remote state file %s", c.lockFile)
|
||||
|
||||
info, err := c.lockInfo()
|
||||
if err != nil {
|
||||
return c.lockError(err)
|
||||
}
|
||||
|
||||
if info.ID != check {
|
||||
return c.lockError(fmt.Errorf("lock id mismatch, %v != %v", info.ID, check))
|
||||
}
|
||||
|
||||
err = c.deleteObject(c.lockFile)
|
||||
if err != nil {
|
||||
return c.lockError(err)
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// lockError returns statemgr.LockError
|
||||
func (c *remoteClient) lockError(err error) *statemgr.LockError {
|
||||
log.Printf("[DEBUG] failed to lock or unlock %s: %v", c.lockFile, err)
|
||||
|
||||
lockErr := &statemgr.LockError{
|
||||
Err: err,
|
||||
}
|
||||
|
||||
info, infoErr := c.lockInfo()
|
||||
if infoErr != nil {
|
||||
lockErr.Err = multierror.Append(lockErr.Err, infoErr)
|
||||
} else {
|
||||
lockErr.Info = info
|
||||
}
|
||||
|
||||
return lockErr
|
||||
}
|
||||
|
||||
// lockInfo returns LockInfo from lock file
|
||||
func (c *remoteClient) lockInfo() (*statemgr.LockInfo, error) {
|
||||
exists, data, checksum, err := c.getObject(c.lockFile)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
if !exists {
|
||||
return nil, fmt.Errorf("lock file %s not exists", c.lockFile)
|
||||
}
|
||||
|
||||
info := &statemgr.LockInfo{}
|
||||
if err := json.Unmarshal(data, info); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
info.ID = checksum
|
||||
|
||||
return info, nil
|
||||
}
|
||||
|
||||
// getObject get remote object
|
||||
func (c *remoteClient) getObject(cosFile string) (exists bool, data []byte, checksum string, err error) {
|
||||
rsp, err := c.cosClient.Object.Get(c.cosContext, cosFile, nil)
|
||||
if rsp == nil {
|
||||
log.Printf("[DEBUG] getObject %s: error: %v", cosFile, err)
|
||||
err = fmt.Errorf("failed to open file at %v: %v", cosFile, err)
|
||||
return
|
||||
}
|
||||
defer rsp.Body.Close()
|
||||
|
||||
log.Printf("[DEBUG] getObject %s: code: %d, error: %v", cosFile, rsp.StatusCode, err)
|
||||
if err != nil {
|
||||
if rsp.StatusCode == 404 {
|
||||
err = nil
|
||||
} else {
|
||||
err = fmt.Errorf("failed to open file at %v: %v", cosFile, err)
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
checksum = rsp.Header.Get("X-Cos-Meta-Md5")
|
||||
log.Printf("[DEBUG] getObject %s: checksum: %s", cosFile, checksum)
|
||||
if len(checksum) != 32 {
|
||||
err = fmt.Errorf("failed to open file at %v: checksum %s invalid", cosFile, checksum)
|
||||
return
|
||||
}
|
||||
|
||||
exists = true
|
||||
data, err = ioutil.ReadAll(rsp.Body)
|
||||
log.Printf("[DEBUG] getObject %s: data length: %d", cosFile, len(data))
|
||||
if err != nil {
|
||||
err = fmt.Errorf("failed to open file at %v: %v", cosFile, err)
|
||||
return
|
||||
}
|
||||
|
||||
check := fmt.Sprintf("%x", md5.Sum(data))
|
||||
log.Printf("[DEBUG] getObject %s: check: %s", cosFile, check)
|
||||
if check != checksum {
|
||||
err = fmt.Errorf("failed to open file at %v: checksum mismatch, %s != %s", cosFile, check, checksum)
|
||||
return
|
||||
}
|
||||
|
||||
return
|
||||
}
|
||||
|
||||
// putObject put object to remote
|
||||
func (c *remoteClient) putObject(cosFile string, data []byte) error {
|
||||
opt := &cos.ObjectPutOptions{
|
||||
ObjectPutHeaderOptions: &cos.ObjectPutHeaderOptions{
|
||||
XCosMetaXXX: &http.Header{
|
||||
"X-Cos-Meta-Md5": []string{fmt.Sprintf("%x", md5.Sum(data))},
|
||||
},
|
||||
},
|
||||
ACLHeaderOptions: &cos.ACLHeaderOptions{
|
||||
XCosACL: c.acl,
|
||||
},
|
||||
}
|
||||
|
||||
if c.encrypt {
|
||||
opt.ObjectPutHeaderOptions.XCosServerSideEncryption = "AES256"
|
||||
}
|
||||
|
||||
r := bytes.NewReader(data)
|
||||
rsp, err := c.cosClient.Object.Put(c.cosContext, cosFile, r, opt)
|
||||
if rsp == nil {
|
||||
log.Printf("[DEBUG] putObject %s: error: %v", cosFile, err)
|
||||
return fmt.Errorf("failed to save file to %v: %v", cosFile, err)
|
||||
}
|
||||
defer rsp.Body.Close()
|
||||
|
||||
log.Printf("[DEBUG] putObject %s: code: %d, error: %v", cosFile, rsp.StatusCode, err)
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to save file to %v: %v", cosFile, err)
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// deleteObject delete remote object
|
||||
func (c *remoteClient) deleteObject(cosFile string) error {
|
||||
rsp, err := c.cosClient.Object.Delete(c.cosContext, cosFile)
|
||||
if rsp == nil {
|
||||
log.Printf("[DEBUG] deleteObject %s: error: %v", cosFile, err)
|
||||
return fmt.Errorf("failed to delete file %v: %v", cosFile, err)
|
||||
}
|
||||
defer rsp.Body.Close()
|
||||
|
||||
log.Printf("[DEBUG] deleteObject %s: code: %d, error: %v", cosFile, rsp.StatusCode, err)
|
||||
if rsp.StatusCode == 404 {
|
||||
return nil
|
||||
}
|
||||
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to delete file %v: %v", cosFile, err)
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// getBucket list bucket by prefix
|
||||
func (c *remoteClient) getBucket(prefix string) (obs []cos.Object, err error) {
|
||||
fs, rsp, err := c.cosClient.Bucket.Get(c.cosContext, &cos.BucketGetOptions{Prefix: prefix})
|
||||
if rsp == nil {
|
||||
log.Printf("[DEBUG] getBucket %s/%s: error: %v", c.bucket, prefix, err)
|
||||
err = fmt.Errorf("bucket %s not exists", c.bucket)
|
||||
return
|
||||
}
|
||||
defer rsp.Body.Close()
|
||||
|
||||
log.Printf("[DEBUG] getBucket %s/%s: code: %d, error: %v", c.bucket, prefix, rsp.StatusCode, err)
|
||||
if rsp.StatusCode == 404 {
|
||||
err = fmt.Errorf("bucket %s not exists", c.bucket)
|
||||
return
|
||||
}
|
||||
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
|
||||
return fs.Contents, nil
|
||||
}
|
||||
|
||||
// putBucket create cos bucket
|
||||
func (c *remoteClient) putBucket() error {
|
||||
rsp, err := c.cosClient.Bucket.Put(c.cosContext, nil)
|
||||
if rsp == nil {
|
||||
log.Printf("[DEBUG] putBucket %s: error: %v", c.bucket, err)
|
||||
return fmt.Errorf("failed to create bucket %v: %v", c.bucket, err)
|
||||
}
|
||||
defer rsp.Body.Close()
|
||||
|
||||
log.Printf("[DEBUG] putBucket %s: code: %d, error: %v", c.bucket, rsp.StatusCode, err)
|
||||
if rsp.StatusCode == 409 {
|
||||
return nil
|
||||
}
|
||||
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to create bucket %v: %v", c.bucket, err)
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// deleteBucket delete cos bucket
|
||||
func (c *remoteClient) deleteBucket(recursive bool) error {
|
||||
if recursive {
|
||||
obs, err := c.getBucket("")
|
||||
if err != nil {
|
||||
if strings.Contains(err.Error(), "not exists") {
|
||||
return nil
|
||||
}
|
||||
log.Printf("[DEBUG] deleteBucket %s: empty bucket error: %v", c.bucket, err)
|
||||
return fmt.Errorf("failed to empty bucket %v: %v", c.bucket, err)
|
||||
}
|
||||
for _, v := range obs {
|
||||
c.deleteObject(v.Key)
|
||||
}
|
||||
}
|
||||
|
||||
rsp, err := c.cosClient.Bucket.Delete(c.cosContext)
|
||||
if rsp == nil {
|
||||
log.Printf("[DEBUG] deleteBucket %s: error: %v", c.bucket, err)
|
||||
return fmt.Errorf("failed to delete bucket %v: %v", c.bucket, err)
|
||||
}
|
||||
defer rsp.Body.Close()
|
||||
|
||||
log.Printf("[DEBUG] deleteBucket %s: code: %d, error: %v", c.bucket, rsp.StatusCode, err)
|
||||
if rsp.StatusCode == 404 {
|
||||
return nil
|
||||
}
|
||||
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to delete bucket %v: %v", c.bucket, err)
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// cosLock lock cos for writing
|
||||
func (c *remoteClient) cosLock(bucket, cosFile string) error {
|
||||
log.Printf("[DEBUG] lock cos file %s:%s", bucket, cosFile)
|
||||
|
||||
cosPath := fmt.Sprintf("%s:%s", bucket, cosFile)
|
||||
lockTagValue := fmt.Sprintf("%x", md5.Sum([]byte(cosPath)))
|
||||
|
||||
return c.CreateTag(lockTagKey, lockTagValue)
|
||||
}
|
||||
|
||||
// cosUnlock unlock cos writing
|
||||
func (c *remoteClient) cosUnlock(bucket, cosFile string) error {
|
||||
log.Printf("[DEBUG] unlock cos file %s:%s", bucket, cosFile)
|
||||
|
||||
cosPath := fmt.Sprintf("%s:%s", bucket, cosFile)
|
||||
lockTagValue := fmt.Sprintf("%x", md5.Sum([]byte(cosPath)))
|
||||
|
||||
var err error
|
||||
for i := 0; i < 30; i++ {
|
||||
err = c.DeleteTag(lockTagKey, lockTagValue)
|
||||
if err == nil {
|
||||
return nil
|
||||
}
|
||||
time.Sleep(1 * time.Second)
|
||||
}
|
||||
|
||||
return err
|
||||
}
|
||||
|
||||
// CreateTag create tag by key and value
|
||||
func (c *remoteClient) CreateTag(key, value string) error {
|
||||
request := tag.NewCreateTagRequest()
|
||||
request.TagKey = &key
|
||||
request.TagValue = &value
|
||||
|
||||
_, err := c.tagClient.CreateTag(request)
|
||||
log.Printf("[DEBUG] create tag %s:%s: error: %v", key, value, err)
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to create tag: %s -> %s: %s", key, value, err)
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// DeleteTag create tag by key and value
|
||||
func (c *remoteClient) DeleteTag(key, value string) error {
|
||||
request := tag.NewDeleteTagRequest()
|
||||
request.TagKey = &key
|
||||
request.TagValue = &value
|
||||
|
||||
_, err := c.tagClient.DeleteTag(request)
|
||||
log.Printf("[DEBUG] delete tag %s:%s: error: %v", key, value, err)
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to delete tag: %s -> %s: %s", key, value, err)
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
|
@ -9,8 +9,8 @@ import (
|
|||
etcdapi "github.com/coreos/etcd/client"
|
||||
"github.com/hashicorp/terraform/backend"
|
||||
"github.com/hashicorp/terraform/helper/schema"
|
||||
"github.com/hashicorp/terraform/state"
|
||||
"github.com/hashicorp/terraform/state/remote"
|
||||
"github.com/hashicorp/terraform/states/remote"
|
||||
"github.com/hashicorp/terraform/states/statemgr"
|
||||
)
|
||||
|
||||
func New() backend.Backend {
|
||||
|
@ -24,7 +24,7 @@ func New() backend.Backend {
|
|||
"endpoints": &schema.Schema{
|
||||
Type: schema.TypeString,
|
||||
Required: true,
|
||||
Description: "A space-separated list of the etcd endpoints<Paste>",
|
||||
Description: "A space-separated list of the etcd endpoints",
|
||||
},
|
||||
"username": &schema.Schema{
|
||||
Type: schema.TypeString,
|
||||
|
@ -83,7 +83,7 @@ func (b *Backend) DeleteWorkspace(string) error {
|
|||
return backend.ErrWorkspacesNotSupported
|
||||
}
|
||||
|
||||
func (b *Backend) StateMgr(name string) (state.State, error) {
|
||||
func (b *Backend) StateMgr(name string) (statemgr.Full, error) {
|
||||
if name != backend.DefaultStateName {
|
||||
return nil, backend.ErrWorkspacesNotSupported
|
||||
}
|
||||
|
|
|
@ -6,7 +6,7 @@ import (
|
|||
"fmt"
|
||||
|
||||
etcdapi "github.com/coreos/etcd/client"
|
||||
"github.com/hashicorp/terraform/state/remote"
|
||||
"github.com/hashicorp/terraform/states/remote"
|
||||
)
|
||||
|
||||
// EtcdClient is a remote client that stores data in etcd.
|
||||
|
|
|
@ -8,7 +8,7 @@ import (
|
|||
|
||||
"github.com/hashicorp/terraform/backend"
|
||||
"github.com/hashicorp/terraform/configs"
|
||||
"github.com/hashicorp/terraform/state/remote"
|
||||
"github.com/hashicorp/terraform/states/remote"
|
||||
"github.com/zclconf/go-cty/cty"
|
||||
)
|
||||
|
||||
|
|
|
@ -9,9 +9,9 @@ import (
|
|||
etcdv3 "github.com/coreos/etcd/clientv3"
|
||||
|
||||
"github.com/hashicorp/terraform/backend"
|
||||
"github.com/hashicorp/terraform/state"
|
||||
"github.com/hashicorp/terraform/state/remote"
|
||||
"github.com/hashicorp/terraform/states"
|
||||
"github.com/hashicorp/terraform/states/remote"
|
||||
"github.com/hashicorp/terraform/states/statemgr"
|
||||
)
|
||||
|
||||
func (b *Backend) Workspaces() ([]string, error) {
|
||||
|
@ -41,8 +41,8 @@ func (b *Backend) DeleteWorkspace(name string) error {
|
|||
return err
|
||||
}
|
||||
|
||||
func (b *Backend) StateMgr(name string) (state.State, error) {
|
||||
var stateMgr state.State = &remote.State{
|
||||
func (b *Backend) StateMgr(name string) (statemgr.Full, error) {
|
||||
var stateMgr statemgr.Full = &remote.State{
|
||||
Client: &RemoteClient{
|
||||
Client: b.client,
|
||||
DoLock: b.lock,
|
||||
|
@ -51,10 +51,10 @@ func (b *Backend) StateMgr(name string) (state.State, error) {
|
|||
}
|
||||
|
||||
if !b.lock {
|
||||
stateMgr = &state.LockDisabled{Inner: stateMgr}
|
||||
stateMgr = &statemgr.LockDisabled{Inner: stateMgr}
|
||||
}
|
||||
|
||||
lockInfo := state.NewLockInfo()
|
||||
lockInfo := statemgr.NewLockInfo()
|
||||
lockInfo.Operation = "init"
|
||||
lockId, err := stateMgr.Lock(lockInfo)
|
||||
if err != nil {
|
||||
|
|
Some files were not shown because too many files have changed in this diff Show More
Loading…
Reference in New Issue