diff --git a/website/Gemfile b/website/Gemfile
deleted file mode 100644
index d1a2ed44a..000000000
--- a/website/Gemfile
+++ /dev/null
@@ -1,3 +0,0 @@
-source "https://rubygems.org"
-
-gem "middleman-hashicorp", "0.3.26"
diff --git a/website/Gemfile.lock b/website/Gemfile.lock
deleted file mode 100644
index a55343642..000000000
--- a/website/Gemfile.lock
+++ /dev/null
@@ -1,159 +0,0 @@
-GEM
- remote: https://rubygems.org/
- specs:
- activesupport (4.2.8)
- i18n (~> 0.7)
- minitest (~> 5.1)
- thread_safe (~> 0.3, >= 0.3.4)
- tzinfo (~> 1.1)
- autoprefixer-rails (7.1.1)
- execjs
- bootstrap-sass (3.3.7)
- autoprefixer-rails (>= 5.2.1)
- sass (>= 3.3.4)
- builder (3.2.3)
- capybara (2.4.4)
- mime-types (>= 1.16)
- nokogiri (>= 1.3.3)
- rack (>= 1.0.0)
- rack-test (>= 0.5.4)
- xpath (~> 2.0)
- chunky_png (1.3.8)
- coffee-script (2.4.1)
- coffee-script-source
- execjs
- coffee-script-source (1.12.2)
- compass (1.0.3)
- chunky_png (~> 1.2)
- compass-core (~> 1.0.2)
- compass-import-once (~> 1.0.5)
- rb-fsevent (>= 0.9.3)
- rb-inotify (>= 0.9)
- sass (>= 3.3.13, < 3.5)
- compass-core (1.0.3)
- multi_json (~> 1.0)
- sass (>= 3.3.0, < 3.5)
- compass-import-once (1.0.5)
- sass (>= 3.2, < 3.5)
- em-websocket (0.5.1)
- eventmachine (>= 0.12.9)
- http_parser.rb (~> 0.6.0)
- erubis (2.7.0)
- eventmachine (1.2.3)
- execjs (2.7.0)
- ffi (1.9.18)
- haml (5.0.1)
- temple (>= 0.8.0)
- tilt
- hike (1.2.3)
- hooks (0.4.1)
- uber (~> 0.0.14)
- http_parser.rb (0.6.0)
- i18n (0.7.0)
- json (2.1.0)
- kramdown (1.13.2)
- listen (3.0.8)
- rb-fsevent (~> 0.9, >= 0.9.4)
- rb-inotify (~> 0.9, >= 0.9.7)
- middleman (3.4.1)
- coffee-script (~> 2.2)
- compass (>= 1.0.0, < 2.0.0)
- compass-import-once (= 1.0.5)
- execjs (~> 2.0)
- haml (>= 4.0.5)
- kramdown (~> 1.2)
- middleman-core (= 3.4.1)
- middleman-sprockets (>= 3.1.2)
- sass (>= 3.4.0, < 4.0)
- uglifier (~> 2.5)
- middleman-core (3.4.1)
- activesupport (~> 4.1)
- bundler (~> 1.1)
- capybara (~> 2.4.4)
- erubis
- hooks (~> 0.3)
- i18n (~> 0.7.0)
- listen (~> 3.0.3)
- padrino-helpers (~> 0.12.3)
- rack (>= 1.4.5, < 2.0)
- thor (>= 0.15.2, < 2.0)
- tilt (~> 1.4.1, < 2.0)
- middleman-hashicorp (0.3.26)
- bootstrap-sass (~> 3.3)
- builder (~> 3.2)
- middleman (~> 3.4)
- middleman-livereload (~> 3.4)
- middleman-syntax (~> 3.0)
- redcarpet (~> 3.3)
- turbolinks (~> 5.0)
- middleman-livereload (3.4.6)
- em-websocket (~> 0.5.1)
- middleman-core (>= 3.3)
- rack-livereload (~> 0.3.15)
- middleman-sprockets (3.5.0)
- middleman-core (>= 3.3)
- sprockets (~> 2.12.1)
- sprockets-helpers (~> 1.1.0)
- sprockets-sass (~> 1.3.0)
- middleman-syntax (3.0.0)
- middleman-core (>= 3.2)
- rouge (~> 2.0)
- mime-types (3.1)
- mime-types-data (~> 3.2015)
- mime-types-data (3.2016.0521)
- mini_portile2 (2.2.0)
- minitest (5.10.2)
- multi_json (1.12.1)
- nokogiri (1.8.0)
- mini_portile2 (~> 2.2.0)
- padrino-helpers (0.12.8.1)
- i18n (~> 0.6, >= 0.6.7)
- padrino-support (= 0.12.8.1)
- tilt (~> 1.4.1)
- padrino-support (0.12.8.1)
- activesupport (>= 3.1)
- rack (1.6.8)
- rack-livereload (0.3.16)
- rack
- rack-test (0.6.3)
- rack (>= 1.0)
- rb-fsevent (0.9.8)
- rb-inotify (0.9.8)
- ffi (>= 0.5.0)
- redcarpet (3.4.0)
- rouge (2.0.7)
- sass (3.4.24)
- sprockets (2.12.4)
- hike (~> 1.2)
- multi_json (~> 1.0)
- rack (~> 1.0)
- tilt (~> 1.1, != 1.3.0)
- sprockets-helpers (1.1.0)
- sprockets (~> 2.0)
- sprockets-sass (1.3.1)
- sprockets (~> 2.0)
- tilt (~> 1.1)
- temple (0.8.0)
- thor (0.19.4)
- thread_safe (0.3.6)
- tilt (1.4.1)
- turbolinks (5.0.1)
- turbolinks-source (~> 5)
- turbolinks-source (5.0.3)
- tzinfo (1.2.3)
- thread_safe (~> 0.1)
- uber (0.0.15)
- uglifier (2.7.2)
- execjs (>= 0.3.0)
- json (>= 1.8.0)
- xpath (2.1.0)
- nokogiri (~> 1.3)
-
-PLATFORMS
- ruby
-
-DEPENDENCIES
- middleman-hashicorp (= 0.3.26)
-
-BUNDLED WITH
- 1.15.1
diff --git a/website/LICENSE.md b/website/LICENSE.md
deleted file mode 100644
index 3189f43a6..000000000
--- a/website/LICENSE.md
+++ /dev/null
@@ -1,10 +0,0 @@
-# Proprietary License
-
-This license is temporary while a more official one is drafted. However,
-this should make it clear:
-
-The text contents of this website are MPL 2.0 licensed.
-
-The design contents of this website are proprietary and may not be reproduced
-or reused in any way other than to run the website locally. The license for
-the design is owned solely by HashiCorp, Inc.
diff --git a/website/Makefile b/website/Makefile
deleted file mode 100644
index 2388d5f68..000000000
--- a/website/Makefile
+++ /dev/null
@@ -1,24 +0,0 @@
-VERSION?="0.3.26"
-
-build:
- @echo "==> Starting build in Docker..."
- @docker run \
- --interactive \
- --rm \
- --tty \
- --volume "$(shell pwd):/website" \
- hashicorp/middleman-hashicorp:${VERSION} \
- bundle exec middleman build --verbose --clean
-
-website:
- @echo "==> Starting website in Docker..."
- @docker run \
- --interactive \
- --rm \
- --tty \
- --publish "4567:4567" \
- --publish "35729:35729" \
- --volume "$(shell pwd):/website" \
- hashicorp/middleman-hashicorp:${VERSION}
-
-.PHONY: build website
diff --git a/website/README.md b/website/README.md
deleted file mode 100644
index 4dbad10d2..000000000
--- a/website/README.md
+++ /dev/null
@@ -1,24 +0,0 @@
-# Terraform Website
-
-This subdirectory contains the entire source for the [Terraform
-Website][terraform]. This is a [Middleman][middleman] project, which builds a
-static site from these source files.
-
-## Contributions Welcome!
-
-If you find a typo or you feel like you can improve the HTML, CSS, or
-JavaScript, we welcome contributions. Feel free to open issues or pull requests
-like any normal GitHub project, and we'll merge it in.
-
-## Running the Site Locally
-
-Running the site locally is simple:
-
-1. Install [Docker](https://docs.docker.com/engine/installation/) if you have not already done so
-2. Clone this repo and run `make website`
-
-Then open up `http://localhost:4567`. Note that some URLs you may need to append
-".html" to make them work (in the navigation).
-
-[middleman]: https://www.middlemanapp.com
-[terraform]: https://www.terraform.io
diff --git a/website/config.rb b/website/config.rb
deleted file mode 100644
index 3f2f4dfd8..000000000
--- a/website/config.rb
+++ /dev/null
@@ -1,100 +0,0 @@
-set :base_url, "https://www.terraform.io/"
-
-activate :hashicorp do |h|
- h.name = "terraform"
- h.version = "0.9.8"
- h.github_slug = "hashicorp/terraform"
-end
-
-helpers do
- # Returns the FQDN of the image URL.
- #
- # @param [String] path
- #
- # @return [String]
- def image_url(path)
- File.join(base_url, image_path(path))
- end
-
- # Get the title for the page.
- #
- # @param [Middleman::Page] page
- #
- # @return [String]
- def title_for(page)
- if page && page.data.page_title
- return "#{page.data.page_title} - Terraform by HashiCorp"
- end
-
- "Terraform by HashiCorp"
- end
-
- # Get the description for the page
- #
- # @param [Middleman::Page] page
- #
- # @return [String]
- def description_for(page)
- description = (page.data.description || "")
- .gsub('"', '')
- .gsub(/\n+/, ' ')
- .squeeze(' ')
-
- return escape_html(description)
- end
-
- # This helps by setting the "active" class for sidebar nav elements
- # if the YAML frontmatter matches the expected value.
- def sidebar_current(expected)
- current = current_page.data.sidebar_current || ""
- if current.start_with?(expected)
- return " class=\"active\""
- else
- return ""
- end
- end
-
- # Returns the id for this page.
- # @return [String]
- def body_id_for(page)
- if !(name = page.data.sidebar_current).blank?
- return "page-#{name.strip}"
- end
- if page.url == "/" || page.url == "/index.html"
- return "page-home"
- end
- if !(title = page.data.page_title).blank?
- return title
- .downcase
- .gsub('"', '')
- .gsub(/[^\w]+/, '-')
- .gsub(/_+/, '-')
- .squeeze('-')
- .squeeze(' ')
- end
- return ""
- end
-
- # Returns the list of classes for this page.
- # @return [String]
- def body_classes_for(page)
- classes = []
-
- if !(layout = page.data.layout).blank?
- classes << "layout-#{page.data.layout}"
- end
-
- if !(title = page.data.page_title).blank?
- title = title
- .downcase
- .gsub('"', '')
- .gsub(/[^\w]+/, '-')
- .gsub(/_+/, '-')
- .squeeze('-')
- .squeeze(' ')
- classes << "page-#{title}"
- end
-
- return classes.join(" ")
- end
-end
diff --git a/website/data/news.yml b/website/data/news.yml
deleted file mode 100644
index a00693bf5..000000000
--- a/website/data/news.yml
+++ /dev/null
@@ -1,19 +0,0 @@
-default_link_text: 'Read more'
-
-featured_post:
- # media_html:
- title: 'Webinar: Controlling Your Organization With HashiCorp Terraform and Google Cloud Platform'
- body: Watch our recent webinar with Seth Vargo and Google Cloud. Learn how to build your entire infrastructure across Google Cloud with one command.
- link_url: https://www.youtube.com/watch?v=Ym6DtUx5REg
- link_text: Watch Now
-
-additional_posts:
- -
- title: Terraform 0.9 Released
- body: Terraform 0.9 adds major new functionality to Terraform. Read the highlights from the 0.9 release.
- link_url: https://www.hashicorp.com/blog/terraform-0-9/
- -
- title: 'Webinar: Multi-Cloud, One Command with Terraform'
- body: Watch our recent webinar with Mitchell Hashimoto to learn how Terraform provisions infrastructure across different clouds using a consistent workflow.
- link_url: https://youtu.be/nLg7fpVcIv4
- link_text: Watch Now
diff --git a/website/source/docs/backends/config.html.md b/website/docs/backends/config.html.md
similarity index 100%
rename from website/source/docs/backends/config.html.md
rename to website/docs/backends/config.html.md
diff --git a/website/source/docs/backends/index.html.md b/website/docs/backends/index.html.md
similarity index 100%
rename from website/source/docs/backends/index.html.md
rename to website/docs/backends/index.html.md
diff --git a/website/source/docs/backends/init.html.md b/website/docs/backends/init.html.md
similarity index 100%
rename from website/source/docs/backends/init.html.md
rename to website/docs/backends/init.html.md
diff --git a/website/source/docs/backends/legacy-0-8.html.md b/website/docs/backends/legacy-0-8.html.md
similarity index 100%
rename from website/source/docs/backends/legacy-0-8.html.md
rename to website/docs/backends/legacy-0-8.html.md
diff --git a/website/source/docs/backends/operations.html.md b/website/docs/backends/operations.html.md
similarity index 100%
rename from website/source/docs/backends/operations.html.md
rename to website/docs/backends/operations.html.md
diff --git a/website/source/docs/backends/state.html.md b/website/docs/backends/state.html.md
similarity index 100%
rename from website/source/docs/backends/state.html.md
rename to website/docs/backends/state.html.md
diff --git a/website/source/docs/backends/types/artifactory.html.md b/website/docs/backends/types/artifactory.html.md
similarity index 100%
rename from website/source/docs/backends/types/artifactory.html.md
rename to website/docs/backends/types/artifactory.html.md
diff --git a/website/source/docs/backends/types/azure.html.md b/website/docs/backends/types/azure.html.md
similarity index 100%
rename from website/source/docs/backends/types/azure.html.md
rename to website/docs/backends/types/azure.html.md
diff --git a/website/source/docs/backends/types/consul.html.md b/website/docs/backends/types/consul.html.md
similarity index 100%
rename from website/source/docs/backends/types/consul.html.md
rename to website/docs/backends/types/consul.html.md
diff --git a/website/source/docs/backends/types/etcd.html.md b/website/docs/backends/types/etcd.html.md
similarity index 100%
rename from website/source/docs/backends/types/etcd.html.md
rename to website/docs/backends/types/etcd.html.md
diff --git a/website/source/docs/backends/types/gcs.html.md b/website/docs/backends/types/gcs.html.md
similarity index 100%
rename from website/source/docs/backends/types/gcs.html.md
rename to website/docs/backends/types/gcs.html.md
diff --git a/website/source/docs/backends/types/http.html.md b/website/docs/backends/types/http.html.md
similarity index 100%
rename from website/source/docs/backends/types/http.html.md
rename to website/docs/backends/types/http.html.md
diff --git a/website/source/docs/backends/types/index.html.md b/website/docs/backends/types/index.html.md
similarity index 100%
rename from website/source/docs/backends/types/index.html.md
rename to website/docs/backends/types/index.html.md
diff --git a/website/source/docs/backends/types/local.html.md b/website/docs/backends/types/local.html.md
similarity index 100%
rename from website/source/docs/backends/types/local.html.md
rename to website/docs/backends/types/local.html.md
diff --git a/website/source/docs/backends/types/manta.html.md b/website/docs/backends/types/manta.html.md
similarity index 100%
rename from website/source/docs/backends/types/manta.html.md
rename to website/docs/backends/types/manta.html.md
diff --git a/website/source/docs/backends/types/s3.html.md b/website/docs/backends/types/s3.html.md
similarity index 100%
rename from website/source/docs/backends/types/s3.html.md
rename to website/docs/backends/types/s3.html.md
diff --git a/website/source/docs/backends/types/swift.html.md b/website/docs/backends/types/swift.html.md
similarity index 100%
rename from website/source/docs/backends/types/swift.html.md
rename to website/docs/backends/types/swift.html.md
diff --git a/website/source/docs/backends/types/terraform-enterprise.html.md b/website/docs/backends/types/terraform-enterprise.html.md
similarity index 100%
rename from website/source/docs/backends/types/terraform-enterprise.html.md
rename to website/docs/backends/types/terraform-enterprise.html.md
diff --git a/website/source/docs/commands/apply.html.markdown b/website/docs/commands/apply.html.markdown
similarity index 100%
rename from website/source/docs/commands/apply.html.markdown
rename to website/docs/commands/apply.html.markdown
diff --git a/website/source/docs/commands/console.html.markdown b/website/docs/commands/console.html.markdown
similarity index 100%
rename from website/source/docs/commands/console.html.markdown
rename to website/docs/commands/console.html.markdown
diff --git a/website/source/docs/commands/destroy.html.markdown b/website/docs/commands/destroy.html.markdown
similarity index 100%
rename from website/source/docs/commands/destroy.html.markdown
rename to website/docs/commands/destroy.html.markdown
diff --git a/website/source/docs/commands/env.html.markdown b/website/docs/commands/env.html.markdown
similarity index 100%
rename from website/source/docs/commands/env.html.markdown
rename to website/docs/commands/env.html.markdown
diff --git a/website/source/docs/commands/fmt.html.markdown b/website/docs/commands/fmt.html.markdown
similarity index 100%
rename from website/source/docs/commands/fmt.html.markdown
rename to website/docs/commands/fmt.html.markdown
diff --git a/website/source/docs/commands/force-unlock.html.markdown b/website/docs/commands/force-unlock.html.markdown
similarity index 100%
rename from website/source/docs/commands/force-unlock.html.markdown
rename to website/docs/commands/force-unlock.html.markdown
diff --git a/website/source/docs/commands/get.html.markdown b/website/docs/commands/get.html.markdown
similarity index 100%
rename from website/source/docs/commands/get.html.markdown
rename to website/docs/commands/get.html.markdown
diff --git a/website/source/docs/commands/graph.html.markdown b/website/docs/commands/graph.html.markdown
similarity index 100%
rename from website/source/docs/commands/graph.html.markdown
rename to website/docs/commands/graph.html.markdown
diff --git a/website/source/docs/commands/import.html.md b/website/docs/commands/import.html.md
similarity index 100%
rename from website/source/docs/commands/import.html.md
rename to website/docs/commands/import.html.md
diff --git a/website/source/docs/commands/index.html.markdown b/website/docs/commands/index.html.markdown
similarity index 100%
rename from website/source/docs/commands/index.html.markdown
rename to website/docs/commands/index.html.markdown
diff --git a/website/source/docs/commands/init.html.markdown b/website/docs/commands/init.html.markdown
similarity index 100%
rename from website/source/docs/commands/init.html.markdown
rename to website/docs/commands/init.html.markdown
diff --git a/website/source/docs/commands/output.html.markdown b/website/docs/commands/output.html.markdown
similarity index 100%
rename from website/source/docs/commands/output.html.markdown
rename to website/docs/commands/output.html.markdown
diff --git a/website/source/docs/commands/plan.html.markdown b/website/docs/commands/plan.html.markdown
similarity index 100%
rename from website/source/docs/commands/plan.html.markdown
rename to website/docs/commands/plan.html.markdown
diff --git a/website/source/docs/commands/providers.html.markdown b/website/docs/commands/providers.html.markdown
similarity index 100%
rename from website/source/docs/commands/providers.html.markdown
rename to website/docs/commands/providers.html.markdown
diff --git a/website/source/docs/commands/push.html.markdown b/website/docs/commands/push.html.markdown
similarity index 100%
rename from website/source/docs/commands/push.html.markdown
rename to website/docs/commands/push.html.markdown
diff --git a/website/source/docs/commands/refresh.html.markdown b/website/docs/commands/refresh.html.markdown
similarity index 100%
rename from website/source/docs/commands/refresh.html.markdown
rename to website/docs/commands/refresh.html.markdown
diff --git a/website/source/docs/commands/show.html.markdown b/website/docs/commands/show.html.markdown
similarity index 100%
rename from website/source/docs/commands/show.html.markdown
rename to website/docs/commands/show.html.markdown
diff --git a/website/source/docs/commands/state/addressing.html.md b/website/docs/commands/state/addressing.html.md
similarity index 100%
rename from website/source/docs/commands/state/addressing.html.md
rename to website/docs/commands/state/addressing.html.md
diff --git a/website/source/docs/commands/state/index.html.md b/website/docs/commands/state/index.html.md
similarity index 100%
rename from website/source/docs/commands/state/index.html.md
rename to website/docs/commands/state/index.html.md
diff --git a/website/source/docs/commands/state/list.html.md b/website/docs/commands/state/list.html.md
similarity index 100%
rename from website/source/docs/commands/state/list.html.md
rename to website/docs/commands/state/list.html.md
diff --git a/website/source/docs/commands/state/mv.html.md b/website/docs/commands/state/mv.html.md
similarity index 100%
rename from website/source/docs/commands/state/mv.html.md
rename to website/docs/commands/state/mv.html.md
diff --git a/website/source/docs/commands/state/pull.html.md b/website/docs/commands/state/pull.html.md
similarity index 100%
rename from website/source/docs/commands/state/pull.html.md
rename to website/docs/commands/state/pull.html.md
diff --git a/website/source/docs/commands/state/push.html.md b/website/docs/commands/state/push.html.md
similarity index 100%
rename from website/source/docs/commands/state/push.html.md
rename to website/docs/commands/state/push.html.md
diff --git a/website/source/docs/commands/state/rm.html.md b/website/docs/commands/state/rm.html.md
similarity index 100%
rename from website/source/docs/commands/state/rm.html.md
rename to website/docs/commands/state/rm.html.md
diff --git a/website/source/docs/commands/state/show.html.md b/website/docs/commands/state/show.html.md
similarity index 100%
rename from website/source/docs/commands/state/show.html.md
rename to website/docs/commands/state/show.html.md
diff --git a/website/source/docs/commands/taint.html.markdown b/website/docs/commands/taint.html.markdown
similarity index 100%
rename from website/source/docs/commands/taint.html.markdown
rename to website/docs/commands/taint.html.markdown
diff --git a/website/source/docs/commands/untaint.html.markdown b/website/docs/commands/untaint.html.markdown
similarity index 100%
rename from website/source/docs/commands/untaint.html.markdown
rename to website/docs/commands/untaint.html.markdown
diff --git a/website/source/docs/commands/validate.html.markdown b/website/docs/commands/validate.html.markdown
similarity index 100%
rename from website/source/docs/commands/validate.html.markdown
rename to website/docs/commands/validate.html.markdown
diff --git a/website/source/docs/commands/workspace/delete.html.md b/website/docs/commands/workspace/delete.html.md
similarity index 100%
rename from website/source/docs/commands/workspace/delete.html.md
rename to website/docs/commands/workspace/delete.html.md
diff --git a/website/source/docs/commands/workspace/index.html.md b/website/docs/commands/workspace/index.html.md
similarity index 100%
rename from website/source/docs/commands/workspace/index.html.md
rename to website/docs/commands/workspace/index.html.md
diff --git a/website/source/docs/commands/workspace/list.html.md b/website/docs/commands/workspace/list.html.md
similarity index 100%
rename from website/source/docs/commands/workspace/list.html.md
rename to website/docs/commands/workspace/list.html.md
diff --git a/website/source/docs/commands/workspace/new.html.md b/website/docs/commands/workspace/new.html.md
similarity index 100%
rename from website/source/docs/commands/workspace/new.html.md
rename to website/docs/commands/workspace/new.html.md
diff --git a/website/source/docs/commands/workspace/select.html.md b/website/docs/commands/workspace/select.html.md
similarity index 100%
rename from website/source/docs/commands/workspace/select.html.md
rename to website/docs/commands/workspace/select.html.md
diff --git a/website/source/docs/configuration/data-sources.html.md b/website/docs/configuration/data-sources.html.md
similarity index 100%
rename from website/source/docs/configuration/data-sources.html.md
rename to website/docs/configuration/data-sources.html.md
diff --git a/website/source/docs/configuration/environment-variables.html.md b/website/docs/configuration/environment-variables.html.md
similarity index 100%
rename from website/source/docs/configuration/environment-variables.html.md
rename to website/docs/configuration/environment-variables.html.md
diff --git a/website/source/docs/configuration/index.html.md b/website/docs/configuration/index.html.md
similarity index 100%
rename from website/source/docs/configuration/index.html.md
rename to website/docs/configuration/index.html.md
diff --git a/website/source/docs/configuration/interpolation.html.md b/website/docs/configuration/interpolation.html.md
similarity index 100%
rename from website/source/docs/configuration/interpolation.html.md
rename to website/docs/configuration/interpolation.html.md
diff --git a/website/source/docs/configuration/load.html.md b/website/docs/configuration/load.html.md
similarity index 100%
rename from website/source/docs/configuration/load.html.md
rename to website/docs/configuration/load.html.md
diff --git a/website/source/docs/configuration/modules.html.md b/website/docs/configuration/modules.html.md
similarity index 100%
rename from website/source/docs/configuration/modules.html.md
rename to website/docs/configuration/modules.html.md
diff --git a/website/source/docs/configuration/outputs.html.md b/website/docs/configuration/outputs.html.md
similarity index 100%
rename from website/source/docs/configuration/outputs.html.md
rename to website/docs/configuration/outputs.html.md
diff --git a/website/source/docs/configuration/override.html.md b/website/docs/configuration/override.html.md
similarity index 100%
rename from website/source/docs/configuration/override.html.md
rename to website/docs/configuration/override.html.md
diff --git a/website/source/docs/configuration/providers.html.md b/website/docs/configuration/providers.html.md
similarity index 100%
rename from website/source/docs/configuration/providers.html.md
rename to website/docs/configuration/providers.html.md
diff --git a/website/source/docs/configuration/resources.html.md b/website/docs/configuration/resources.html.md
similarity index 100%
rename from website/source/docs/configuration/resources.html.md
rename to website/docs/configuration/resources.html.md
diff --git a/website/source/docs/configuration/syntax.html.md b/website/docs/configuration/syntax.html.md
similarity index 100%
rename from website/source/docs/configuration/syntax.html.md
rename to website/docs/configuration/syntax.html.md
diff --git a/website/source/docs/configuration/terraform-enterprise.html.md b/website/docs/configuration/terraform-enterprise.html.md
similarity index 100%
rename from website/source/docs/configuration/terraform-enterprise.html.md
rename to website/docs/configuration/terraform-enterprise.html.md
diff --git a/website/source/docs/configuration/terraform.html.md b/website/docs/configuration/terraform.html.md
similarity index 100%
rename from website/source/docs/configuration/terraform.html.md
rename to website/docs/configuration/terraform.html.md
diff --git a/website/source/docs/configuration/variables.html.md b/website/docs/configuration/variables.html.md
similarity index 100%
rename from website/source/docs/configuration/variables.html.md
rename to website/docs/configuration/variables.html.md
diff --git a/website/source/docs/import/importability.html.md b/website/docs/import/importability.html.md
similarity index 100%
rename from website/source/docs/import/importability.html.md
rename to website/docs/import/importability.html.md
diff --git a/website/source/docs/import/index.html.md b/website/docs/import/index.html.md
similarity index 100%
rename from website/source/docs/import/index.html.md
rename to website/docs/import/index.html.md
diff --git a/website/source/docs/import/usage.html.md b/website/docs/import/usage.html.md
similarity index 100%
rename from website/source/docs/import/usage.html.md
rename to website/docs/import/usage.html.md
diff --git a/website/source/docs/index.html.markdown b/website/docs/index.html.markdown
similarity index 100%
rename from website/source/docs/index.html.markdown
rename to website/docs/index.html.markdown
diff --git a/website/source/docs/internals/debugging.html.md b/website/docs/internals/debugging.html.md
similarity index 100%
rename from website/source/docs/internals/debugging.html.md
rename to website/docs/internals/debugging.html.md
diff --git a/website/source/docs/internals/graph.html.md b/website/docs/internals/graph.html.md
similarity index 100%
rename from website/source/docs/internals/graph.html.md
rename to website/docs/internals/graph.html.md
diff --git a/website/source/docs/internals/index.html.md b/website/docs/internals/index.html.md
similarity index 100%
rename from website/source/docs/internals/index.html.md
rename to website/docs/internals/index.html.md
diff --git a/website/source/docs/internals/internal-plugins.html.md b/website/docs/internals/internal-plugins.html.md
similarity index 100%
rename from website/source/docs/internals/internal-plugins.html.md
rename to website/docs/internals/internal-plugins.html.md
diff --git a/website/source/docs/internals/lifecycle.html.md b/website/docs/internals/lifecycle.html.md
similarity index 100%
rename from website/source/docs/internals/lifecycle.html.md
rename to website/docs/internals/lifecycle.html.md
diff --git a/website/source/docs/internals/resource-addressing.html.markdown b/website/docs/internals/resource-addressing.html.markdown
similarity index 100%
rename from website/source/docs/internals/resource-addressing.html.markdown
rename to website/docs/internals/resource-addressing.html.markdown
diff --git a/website/source/docs/modules/create.html.markdown b/website/docs/modules/create.html.markdown
similarity index 100%
rename from website/source/docs/modules/create.html.markdown
rename to website/docs/modules/create.html.markdown
diff --git a/website/source/docs/modules/index.html.markdown b/website/docs/modules/index.html.markdown
similarity index 100%
rename from website/source/docs/modules/index.html.markdown
rename to website/docs/modules/index.html.markdown
diff --git a/website/source/docs/modules/sources.html.markdown b/website/docs/modules/sources.html.markdown
similarity index 100%
rename from website/source/docs/modules/sources.html.markdown
rename to website/docs/modules/sources.html.markdown
diff --git a/website/source/docs/modules/usage.html.markdown b/website/docs/modules/usage.html.markdown
similarity index 100%
rename from website/source/docs/modules/usage.html.markdown
rename to website/docs/modules/usage.html.markdown
diff --git a/website/source/docs/plugins/basics.html.md b/website/docs/plugins/basics.html.md
similarity index 100%
rename from website/source/docs/plugins/basics.html.md
rename to website/docs/plugins/basics.html.md
diff --git a/website/source/docs/plugins/index.html.md b/website/docs/plugins/index.html.md
similarity index 100%
rename from website/source/docs/plugins/index.html.md
rename to website/docs/plugins/index.html.md
diff --git a/website/source/docs/plugins/provider.html.md b/website/docs/plugins/provider.html.md
similarity index 100%
rename from website/source/docs/plugins/provider.html.md
rename to website/docs/plugins/provider.html.md
diff --git a/website/source/docs/providers/index.html.markdown b/website/docs/providers/index.html.markdown
similarity index 100%
rename from website/source/docs/providers/index.html.markdown
rename to website/docs/providers/index.html.markdown
diff --git a/website/source/docs/provisioners/chef.html.markdown b/website/docs/provisioners/chef.html.markdown
similarity index 100%
rename from website/source/docs/provisioners/chef.html.markdown
rename to website/docs/provisioners/chef.html.markdown
diff --git a/website/source/docs/provisioners/connection.html.markdown b/website/docs/provisioners/connection.html.markdown
similarity index 100%
rename from website/source/docs/provisioners/connection.html.markdown
rename to website/docs/provisioners/connection.html.markdown
diff --git a/website/source/docs/provisioners/file.html.markdown b/website/docs/provisioners/file.html.markdown
similarity index 100%
rename from website/source/docs/provisioners/file.html.markdown
rename to website/docs/provisioners/file.html.markdown
diff --git a/website/source/docs/provisioners/index.html.markdown b/website/docs/provisioners/index.html.markdown
similarity index 100%
rename from website/source/docs/provisioners/index.html.markdown
rename to website/docs/provisioners/index.html.markdown
diff --git a/website/source/docs/provisioners/local-exec.html.markdown b/website/docs/provisioners/local-exec.html.markdown
similarity index 100%
rename from website/source/docs/provisioners/local-exec.html.markdown
rename to website/docs/provisioners/local-exec.html.markdown
diff --git a/website/source/docs/provisioners/null_resource.html.markdown b/website/docs/provisioners/null_resource.html.markdown
similarity index 100%
rename from website/source/docs/provisioners/null_resource.html.markdown
rename to website/docs/provisioners/null_resource.html.markdown
diff --git a/website/source/docs/provisioners/remote-exec.html.markdown b/website/docs/provisioners/remote-exec.html.markdown
similarity index 100%
rename from website/source/docs/provisioners/remote-exec.html.markdown
rename to website/docs/provisioners/remote-exec.html.markdown
diff --git a/website/source/docs/state/environments.html.md b/website/docs/state/environments.html.md
similarity index 100%
rename from website/source/docs/state/environments.html.md
rename to website/docs/state/environments.html.md
diff --git a/website/source/docs/state/import.html.md b/website/docs/state/import.html.md
similarity index 100%
rename from website/source/docs/state/import.html.md
rename to website/docs/state/import.html.md
diff --git a/website/source/docs/state/index.html.md b/website/docs/state/index.html.md
similarity index 100%
rename from website/source/docs/state/index.html.md
rename to website/docs/state/index.html.md
diff --git a/website/source/docs/state/locking.html.md b/website/docs/state/locking.html.md
similarity index 100%
rename from website/source/docs/state/locking.html.md
rename to website/docs/state/locking.html.md
diff --git a/website/source/docs/state/purpose.html.md b/website/docs/state/purpose.html.md
similarity index 100%
rename from website/source/docs/state/purpose.html.md
rename to website/docs/state/purpose.html.md
diff --git a/website/source/docs/state/remote.html.md b/website/docs/state/remote.html.md
similarity index 100%
rename from website/source/docs/state/remote.html.md
rename to website/docs/state/remote.html.md
diff --git a/website/source/docs/state/sensitive-data.html.md b/website/docs/state/sensitive-data.html.md
similarity index 100%
rename from website/source/docs/state/sensitive-data.html.md
rename to website/docs/state/sensitive-data.html.md
diff --git a/website/source/docs/state/workspaces.html.md b/website/docs/state/workspaces.html.md
similarity index 100%
rename from website/source/docs/state/workspaces.html.md
rename to website/docs/state/workspaces.html.md
diff --git a/website/source/guides/index.html.md b/website/guides/index.html.md
similarity index 100%
rename from website/source/guides/index.html.md
rename to website/guides/index.html.md
diff --git a/website/source/guides/writing-custom-terraform-providers.html.md b/website/guides/writing-custom-terraform-providers.html.md
similarity index 100%
rename from website/source/guides/writing-custom-terraform-providers.html.md
rename to website/guides/writing-custom-terraform-providers.html.md
diff --git a/website/source/intro/examples/aws.html.markdown b/website/intro/examples/aws.html.markdown
similarity index 100%
rename from website/source/intro/examples/aws.html.markdown
rename to website/intro/examples/aws.html.markdown
diff --git a/website/source/intro/examples/consul.html.markdown b/website/intro/examples/consul.html.markdown
similarity index 100%
rename from website/source/intro/examples/consul.html.markdown
rename to website/intro/examples/consul.html.markdown
diff --git a/website/source/intro/examples/count.markdown b/website/intro/examples/count.markdown
similarity index 100%
rename from website/source/intro/examples/count.markdown
rename to website/intro/examples/count.markdown
diff --git a/website/source/intro/examples/cross-provider.markdown b/website/intro/examples/cross-provider.markdown
similarity index 100%
rename from website/source/intro/examples/cross-provider.markdown
rename to website/intro/examples/cross-provider.markdown
diff --git a/website/source/intro/examples/index.html.markdown b/website/intro/examples/index.html.markdown
similarity index 100%
rename from website/source/intro/examples/index.html.markdown
rename to website/intro/examples/index.html.markdown
diff --git a/website/source/intro/getting-started/build.html.md b/website/intro/getting-started/build.html.md
similarity index 100%
rename from website/source/intro/getting-started/build.html.md
rename to website/intro/getting-started/build.html.md
diff --git a/website/source/intro/getting-started/change.html.md b/website/intro/getting-started/change.html.md
similarity index 100%
rename from website/source/intro/getting-started/change.html.md
rename to website/intro/getting-started/change.html.md
diff --git a/website/source/intro/getting-started/dependencies.html.md b/website/intro/getting-started/dependencies.html.md
similarity index 100%
rename from website/source/intro/getting-started/dependencies.html.md
rename to website/intro/getting-started/dependencies.html.md
diff --git a/website/source/intro/getting-started/destroy.html.md b/website/intro/getting-started/destroy.html.md
similarity index 100%
rename from website/source/intro/getting-started/destroy.html.md
rename to website/intro/getting-started/destroy.html.md
diff --git a/website/source/intro/getting-started/install.html.markdown b/website/intro/getting-started/install.html.markdown
similarity index 100%
rename from website/source/intro/getting-started/install.html.markdown
rename to website/intro/getting-started/install.html.markdown
diff --git a/website/source/intro/getting-started/modules.html.md b/website/intro/getting-started/modules.html.md
similarity index 100%
rename from website/source/intro/getting-started/modules.html.md
rename to website/intro/getting-started/modules.html.md
diff --git a/website/source/intro/getting-started/next-steps.html.markdown b/website/intro/getting-started/next-steps.html.markdown
similarity index 100%
rename from website/source/intro/getting-started/next-steps.html.markdown
rename to website/intro/getting-started/next-steps.html.markdown
diff --git a/website/source/intro/getting-started/outputs.html.md b/website/intro/getting-started/outputs.html.md
similarity index 100%
rename from website/source/intro/getting-started/outputs.html.md
rename to website/intro/getting-started/outputs.html.md
diff --git a/website/source/intro/getting-started/provision.html.md b/website/intro/getting-started/provision.html.md
similarity index 100%
rename from website/source/intro/getting-started/provision.html.md
rename to website/intro/getting-started/provision.html.md
diff --git a/website/source/intro/getting-started/remote.html.markdown b/website/intro/getting-started/remote.html.markdown
similarity index 100%
rename from website/source/intro/getting-started/remote.html.markdown
rename to website/intro/getting-started/remote.html.markdown
diff --git a/website/source/intro/getting-started/variables.html.md b/website/intro/getting-started/variables.html.md
similarity index 100%
rename from website/source/intro/getting-started/variables.html.md
rename to website/intro/getting-started/variables.html.md
diff --git a/website/source/intro/index.html.markdown b/website/intro/index.html.markdown
similarity index 100%
rename from website/source/intro/index.html.markdown
rename to website/intro/index.html.markdown
diff --git a/website/source/intro/use-cases.html.markdown b/website/intro/use-cases.html.markdown
similarity index 100%
rename from website/source/intro/use-cases.html.markdown
rename to website/intro/use-cases.html.markdown
diff --git a/website/source/intro/vs/boto.html.markdown b/website/intro/vs/boto.html.markdown
similarity index 100%
rename from website/source/intro/vs/boto.html.markdown
rename to website/intro/vs/boto.html.markdown
diff --git a/website/source/intro/vs/chef-puppet.html.markdown b/website/intro/vs/chef-puppet.html.markdown
similarity index 100%
rename from website/source/intro/vs/chef-puppet.html.markdown
rename to website/intro/vs/chef-puppet.html.markdown
diff --git a/website/source/intro/vs/cloudformation.html.markdown b/website/intro/vs/cloudformation.html.markdown
similarity index 100%
rename from website/source/intro/vs/cloudformation.html.markdown
rename to website/intro/vs/cloudformation.html.markdown
diff --git a/website/source/intro/vs/custom.html.markdown b/website/intro/vs/custom.html.markdown
similarity index 100%
rename from website/source/intro/vs/custom.html.markdown
rename to website/intro/vs/custom.html.markdown
diff --git a/website/source/intro/vs/index.html.markdown b/website/intro/vs/index.html.markdown
similarity index 100%
rename from website/source/intro/vs/index.html.markdown
rename to website/intro/vs/index.html.markdown
diff --git a/website/source/layouts/backend-types.erb b/website/layouts/backend-types.erb
similarity index 100%
rename from website/source/layouts/backend-types.erb
rename to website/layouts/backend-types.erb
diff --git a/website/source/layouts/commands-state.erb b/website/layouts/commands-state.erb
similarity index 100%
rename from website/source/layouts/commands-state.erb
rename to website/layouts/commands-state.erb
diff --git a/website/source/layouts/commands-workspace.erb b/website/layouts/commands-workspace.erb
similarity index 100%
rename from website/source/layouts/commands-workspace.erb
rename to website/layouts/commands-workspace.erb
diff --git a/website/source/layouts/docs.erb b/website/layouts/docs.erb
similarity index 100%
rename from website/source/layouts/docs.erb
rename to website/layouts/docs.erb
diff --git a/website/source/layouts/downloads.erb b/website/layouts/downloads.erb
similarity index 100%
rename from website/source/layouts/downloads.erb
rename to website/layouts/downloads.erb
diff --git a/website/source/layouts/guides.erb b/website/layouts/guides.erb
similarity index 100%
rename from website/source/layouts/guides.erb
rename to website/layouts/guides.erb
diff --git a/website/source/layouts/intro.erb b/website/layouts/intro.erb
similarity index 100%
rename from website/source/layouts/intro.erb
rename to website/layouts/intro.erb
diff --git a/website/packer.json b/website/packer.json
deleted file mode 100644
index 76c536bcf..000000000
--- a/website/packer.json
+++ /dev/null
@@ -1,37 +0,0 @@
-{
- "variables": {
- "aws_access_key_id": "{{ env `AWS_ACCESS_KEY_ID` }}",
- "aws_secret_access_key": "{{ env `AWS_SECRET_ACCESS_KEY` }}",
- "aws_region": "{{ env `AWS_REGION` }}",
- "fastly_api_key": "{{ env `FASTLY_API_KEY` }}"
- },
- "builders": [
- {
- "type": "docker",
- "image": "hashicorp/middleman-hashicorp:0.3.26",
- "discard": "true",
- "run_command": ["-d", "-i", "-t", "{{ .Image }}", "/bin/sh"]
- }
- ],
- "provisioners": [
- {
- "type": "file",
- "source": ".",
- "destination": "/website"
- },
- {
- "type": "shell",
- "environment_vars": [
- "AWS_ACCESS_KEY_ID={{ user `aws_access_key_id` }}",
- "AWS_SECRET_ACCESS_KEY={{ user `aws_secret_access_key` }}",
- "AWS_REGION={{ user `aws_region` }}",
- "FASTLY_API_KEY={{ user `fastly_api_key` }}"
- ],
- "inline": [
- "bundle check || bundle install",
- "bundle exec middleman build --verbose",
- "/bin/sh ./scripts/deploy.sh"
- ]
- }
- ]
-}
diff --git a/website/scripts/deploy.sh b/website/scripts/deploy.sh
deleted file mode 100755
index 9eef86c84..000000000
--- a/website/scripts/deploy.sh
+++ /dev/null
@@ -1,125 +0,0 @@
-#!/bin/sh
-set -e
-
-PROJECT="terraform"
-PROJECT_URL="www.terraform.io"
-FASTLY_SERVICE_ID="7GrxRJP3PVBuqQbyxYQ0MV"
-
-# Ensure the proper AWS environment variables are set
-if [ -z "$AWS_ACCESS_KEY_ID" ]; then
- echo "Missing AWS_ACCESS_KEY_ID!"
- exit 1
-fi
-
-if [ -z "$AWS_SECRET_ACCESS_KEY" ]; then
- echo "Missing AWS_SECRET_ACCESS_KEY!"
- exit 1
-fi
-
-# Ensure the proper Fastly keys are set
-if [ -z "$FASTLY_API_KEY" ]; then
- echo "Missing FASTLY_API_KEY!"
- exit 1
-fi
-
-# Ensure we have s3cmd installed
-if ! command -v "s3cmd" >/dev/null 2>&1; then
- echo "Missing s3cmd!"
- exit 1
-fi
-
-# Get the parent directory of where this script is and cd there
-DIR="$(cd "$(dirname "$(readlink -f "$0")")/.." && pwd)"
-
-# Delete any .DS_Store files for our OS X friends.
-find "$DIR" -type f -name '.DS_Store' -delete
-
-# Upload the files to S3 - we disable mime-type detection by the python library
-# and just guess from the file extension because it's surprisingly more
-# accurate, especially for CSS and javascript. We also tag the uploaded files
-# with the proper Surrogate-Key, which we will later purge in our API call to
-# Fastly.
-if [ -z "$NO_UPLOAD" ]; then
- echo "Uploading to S3..."
-
- # Check that the site has been built
- if [ ! -d "$DIR/build" ]; then
- echo "Missing compiled website! Run 'make build' to compile!"
- exit 1
- fi
-
- # Set browser-side cache-control to ~4h, but tell Fastly to cache for much
- # longer. We manually purge the Fastly cache, so setting it to a year is more
- # than fine.
- s3cmd \
- --quiet \
- --delete-removed \
- --guess-mime-type \
- --no-mime-magic \
- --acl-public \
- --recursive \
- --add-header="Cache-Control: max-age=14400" \
- --add-header="x-amz-meta-surrogate-control: max-age=31536000, stale-white-revalidate=86400, stale-if-error=604800" \
- --add-header="x-amz-meta-surrogate-key: site-$PROJECT" \
- sync "$DIR/build/" "s3://hc-sites/$PROJECT/latest/"
-
- # The s3cmd guessed mime type for text files is often wrong. This is
- # problematic for some assets, so force their mime types to be correct.
- echo "Overriding javascript mime-types..."
- s3cmd \
- --mime-type="application/javascript" \
- --add-header="Cache-Control: max-age=31536000" \
- --exclude "*" \
- --include "*.js" \
- --recursive \
- modify "s3://hc-sites/$PROJECT/latest/"
-
- echo "Overriding css mime-types..."
- s3cmd \
- --mime-type="text/css" \
- --add-header="Cache-Control: max-age=31536000" \
- --exclude "*" \
- --include "*.css" \
- --recursive \
- modify "s3://hc-sites/$PROJECT/latest/"
-
- echo "Overriding svg mime-types..."
- s3cmd \
- --mime-type="image/svg+xml" \
- --add-header="Cache-Control: max-age=31536000" \
- --exclude "*" \
- --include "*.svg" \
- --recursive \
- modify "s3://hc-sites/$PROJECT/latest/"
-fi
-
-# Perform a purge of the surrogate key.
-if [ -z "$NO_PURGE" ]; then
- echo "Purging Fastly cache..."
- curl \
- --fail \
- --silent \
- --output /dev/null \
- --request "POST" \
- --header "Accept: application/json" \
- --header "Fastly-Key: $FASTLY_API_KEY" \
- --header "Fastly-Soft-Purge: 1" \
- "https://api.fastly.com/service/$FASTLY_SERVICE_ID/purge/site-$PROJECT"
-fi
-
-# Warm the cache with recursive wget.
-if [ -z "$NO_WARM" ]; then
- echo "Warming Fastly cache..."
- echo ""
- echo "If this step fails, there are likely missing or broken assets or links"
- echo "on the website. Run the following command manually on your laptop, and"
- echo "search for \"ERROR\" in the output:"
- echo ""
- echo "wget --recursive --delete-after https://$PROJECT_URL/"
- echo ""
- wget \
- --recursive \
- --delete-after \
- --quiet \
- "https://$PROJECT_URL/"
-fi
diff --git a/website/source/404.html.md b/website/source/404.html.md
deleted file mode 100644
index e99ce088b..000000000
--- a/website/source/404.html.md
+++ /dev/null
@@ -1,14 +0,0 @@
----
-layout: "inner"
-page_title: "Not Found"
-noindex: true
-description: |-
- Page not found!
----
-
-# Page Not Found
-
-Sorry, the page you tried to visit does not exist. This could be our fault,
-and if so we will fix that up right away.
-
-Please go back, or go back to get back on track.
diff --git a/website/source/android-manifest.json.erb b/website/source/android-manifest.json.erb
deleted file mode 100644
index 224636a38..000000000
--- a/website/source/android-manifest.json.erb
+++ /dev/null
@@ -1,18 +0,0 @@
-{
- "name": "HashiCorp Terraform",
- "icons": [
- {
- "src": "<%= image_path('favicons/android-chrome-192x192.png') %>",
- "sizes": "192x192",
- "type": "image/png"
- },
- {
- "src": "<%= image_path('favicons/android-chrome-512x512.png') %>",
- "sizes": "512x512",
- "type": "image/png"
- }
- ],
- "theme_color": "#ffffff",
- "background_color": "#ffffff",
- "display": "standalone"
-}
diff --git a/website/source/assets/files/press-kit.zip b/website/source/assets/files/press-kit.zip
deleted file mode 100644
index dde265f79..000000000
Binary files a/website/source/assets/files/press-kit.zip and /dev/null differ
diff --git a/website/source/assets/images/docs/graph-example.png b/website/source/assets/images/docs/graph-example.png
deleted file mode 100644
index 0dbd6060a..000000000
Binary files a/website/source/assets/images/docs/graph-example.png and /dev/null differ
diff --git a/website/source/assets/images/docs/module_graph.png b/website/source/assets/images/docs/module_graph.png
deleted file mode 100644
index 482f4bb55..000000000
Binary files a/website/source/assets/images/docs/module_graph.png and /dev/null differ
diff --git a/website/source/assets/images/docs/module_graph_expand.png b/website/source/assets/images/docs/module_graph_expand.png
deleted file mode 100644
index 32459c873..000000000
Binary files a/website/source/assets/images/docs/module_graph_expand.png and /dev/null differ
diff --git a/website/source/assets/images/docs/tfe-organization-variables.png b/website/source/assets/images/docs/tfe-organization-variables.png
deleted file mode 100644
index 0980c60c0..000000000
Binary files a/website/source/assets/images/docs/tfe-organization-variables.png and /dev/null differ
diff --git a/website/source/assets/images/docs/tfe-variables.png b/website/source/assets/images/docs/tfe-variables.png
deleted file mode 100644
index 7b0e735a0..000000000
Binary files a/website/source/assets/images/docs/tfe-variables.png and /dev/null differ
diff --git a/website/source/assets/images/enterprise-callout-bg copy.svg b/website/source/assets/images/enterprise-callout-bg copy.svg
deleted file mode 100644
index 14a97072a..000000000
--- a/website/source/assets/images/enterprise-callout-bg copy.svg
+++ /dev/null
@@ -1,18 +0,0 @@
-
diff --git a/website/source/assets/images/enterprise-callout-bg.svg b/website/source/assets/images/enterprise-callout-bg.svg
deleted file mode 100644
index ee4ba0afc..000000000
--- a/website/source/assets/images/enterprise-callout-bg.svg
+++ /dev/null
@@ -1,8 +0,0 @@
-
diff --git a/website/source/assets/images/favicons/android-chrome-192x192.png b/website/source/assets/images/favicons/android-chrome-192x192.png
deleted file mode 100644
index c35bdbed5..000000000
Binary files a/website/source/assets/images/favicons/android-chrome-192x192.png and /dev/null differ
diff --git a/website/source/assets/images/favicons/android-chrome-512x512.png b/website/source/assets/images/favicons/android-chrome-512x512.png
deleted file mode 100644
index 83a03ab49..000000000
Binary files a/website/source/assets/images/favicons/android-chrome-512x512.png and /dev/null differ
diff --git a/website/source/assets/images/favicons/apple-touch-icon.png b/website/source/assets/images/favicons/apple-touch-icon.png
deleted file mode 100644
index 91cb96176..000000000
Binary files a/website/source/assets/images/favicons/apple-touch-icon.png and /dev/null differ
diff --git a/website/source/assets/images/favicons/favicon-16x16.png b/website/source/assets/images/favicons/favicon-16x16.png
deleted file mode 100644
index a474f146f..000000000
Binary files a/website/source/assets/images/favicons/favicon-16x16.png and /dev/null differ
diff --git a/website/source/assets/images/favicons/favicon-32x32.png b/website/source/assets/images/favicons/favicon-32x32.png
deleted file mode 100644
index 576c5402f..000000000
Binary files a/website/source/assets/images/favicons/favicon-32x32.png and /dev/null differ
diff --git a/website/source/assets/images/favicons/favicon.ico b/website/source/assets/images/favicons/favicon.ico
deleted file mode 100644
index fd3d1442e..000000000
Binary files a/website/source/assets/images/favicons/favicon.ico and /dev/null differ
diff --git a/website/source/assets/images/favicons/mstile-150x150.png b/website/source/assets/images/favicons/mstile-150x150.png
deleted file mode 100644
index d81ed5df7..000000000
Binary files a/website/source/assets/images/favicons/mstile-150x150.png and /dev/null differ
diff --git a/website/source/assets/images/favicons/safari-pinned-tab.svg b/website/source/assets/images/favicons/safari-pinned-tab.svg
deleted file mode 100644
index d331c170c..000000000
--- a/website/source/assets/images/favicons/safari-pinned-tab.svg
+++ /dev/null
@@ -1,22 +0,0 @@
-
-
-
diff --git a/website/source/assets/images/feature-card-create.svg b/website/source/assets/images/feature-card-create.svg
deleted file mode 100644
index c45fc221e..000000000
--- a/website/source/assets/images/feature-card-create.svg
+++ /dev/null
@@ -1,6 +0,0 @@
-
diff --git a/website/source/assets/images/feature-card-plan.svg b/website/source/assets/images/feature-card-plan.svg
deleted file mode 100644
index c1a010733..000000000
--- a/website/source/assets/images/feature-card-plan.svg
+++ /dev/null
@@ -1,3 +0,0 @@
-
diff --git a/website/source/assets/images/feature-card-write.svg b/website/source/assets/images/feature-card-write.svg
deleted file mode 100644
index 9be021d9a..000000000
--- a/website/source/assets/images/feature-card-write.svg
+++ /dev/null
@@ -1,3 +0,0 @@
-
diff --git a/website/source/assets/images/feature-create-bg.svg b/website/source/assets/images/feature-create-bg.svg
deleted file mode 100644
index 0392a2c6a..000000000
--- a/website/source/assets/images/feature-create-bg.svg
+++ /dev/null
@@ -1,45 +0,0 @@
-
diff --git a/website/source/assets/images/feature-plan-bg.svg b/website/source/assets/images/feature-plan-bg.svg
deleted file mode 100644
index 9d9e3e012..000000000
--- a/website/source/assets/images/feature-plan-bg.svg
+++ /dev/null
@@ -1,30 +0,0 @@
-
diff --git a/website/source/assets/images/feature-write-bg.svg b/website/source/assets/images/feature-write-bg.svg
deleted file mode 100644
index 32224066e..000000000
--- a/website/source/assets/images/feature-write-bg.svg
+++ /dev/null
@@ -1,18 +0,0 @@
-
diff --git a/website/source/assets/images/logo-hashicorp.svg b/website/source/assets/images/logo-hashicorp.svg
deleted file mode 100644
index e98f8b71f..000000000
--- a/website/source/assets/images/logo-hashicorp.svg
+++ /dev/null
@@ -1,7 +0,0 @@
-
diff --git a/website/source/assets/images/logo-text.svg b/website/source/assets/images/logo-text.svg
deleted file mode 100644
index 67c40535c..000000000
--- a/website/source/assets/images/logo-text.svg
+++ /dev/null
@@ -1,7 +0,0 @@
-
diff --git a/website/source/assets/images/news/webinar-Terraform-4-4-2017.png b/website/source/assets/images/news/webinar-Terraform-4-4-2017.png
deleted file mode 100644
index 8db9560c9..000000000
Binary files a/website/source/assets/images/news/webinar-Terraform-4-4-2017.png and /dev/null differ
diff --git a/website/source/assets/images/news/webinar-register-2000w.png b/website/source/assets/images/news/webinar-register-2000w.png
deleted file mode 100644
index 57a2e2b9e..000000000
Binary files a/website/source/assets/images/news/webinar-register-2000w.png and /dev/null differ
diff --git a/website/source/assets/images/news/webinar-register-585w.png b/website/source/assets/images/news/webinar-register-585w.png
deleted file mode 100644
index 1f23bd903..000000000
Binary files a/website/source/assets/images/news/webinar-register-585w.png and /dev/null differ
diff --git a/website/source/assets/images/og-image.png b/website/source/assets/images/og-image.png
deleted file mode 100644
index dbc7d61cc..000000000
Binary files a/website/source/assets/images/og-image.png and /dev/null differ
diff --git a/website/source/assets/images/terraform-enterprise-logo.svg b/website/source/assets/images/terraform-enterprise-logo.svg
deleted file mode 100644
index 7bbdd0055..000000000
--- a/website/source/assets/images/terraform-enterprise-logo.svg
+++ /dev/null
@@ -1,27 +0,0 @@
-
diff --git a/website/source/assets/javascripts/application.js b/website/source/assets/javascripts/application.js
deleted file mode 100644
index ad181b4cc..000000000
--- a/website/source/assets/javascripts/application.js
+++ /dev/null
@@ -1,5 +0,0 @@
-//= require turbolinks
-//= require jquery
-
-//= require hashicorp/mega-nav
-//= require hashicorp/sidebar
diff --git a/website/source/assets/stylesheets/_buttons.scss b/website/source/assets/stylesheets/_buttons.scss
deleted file mode 100755
index e1037e818..000000000
--- a/website/source/assets/stylesheets/_buttons.scss
+++ /dev/null
@@ -1,37 +0,0 @@
-.button {
- background: $button-background;
- border: 1px solid $button-font-color;
- box-shadow: 3px 4px 0 rgba(0,0,0,0.1);
- color: $button-font-color;
- display: inline-block;
- font-family: $button-font-family;
- font-size: $button-font-size;
- font-weight: $button-font-weight;
- letter-spacing: 1px;
- margin-bottom: 4px;
- padding: 10px 30px;
- text-transform: uppercase;
- text-decoration: none;
-
- &:hover,
- &:active,
- &:focus {
- text-decoration: none;
- }
-
- &:hover {
- background: $button-font-color;
- border: 1px solid $button-font-color;
- color: $button-background;
- }
-
- &.primary {
- background: $button-primary-background;
- border: 1px solid darken($button-primary-background, 5%);
- color: $button-primary-font-color;
-
- &:hover {
- background: lighten($button-primary-background, 5%);
- }
- }
-}
diff --git a/website/source/assets/stylesheets/_community.scss b/website/source/assets/stylesheets/_community.scss
deleted file mode 100644
index 1ff047de6..000000000
--- a/website/source/assets/stylesheets/_community.scss
+++ /dev/null
@@ -1,22 +0,0 @@
-#inner {
- .people {
- margin-top: 30px;
-
- .person {
- &:after {
- display: block;
- clear: both;
- content: ' ';
- }
-
- img {
- width: 125px;
- margin: auto auto;
- }
-
- .bio {
- padding-left: 150px;
- }
- }
- }
-}
diff --git a/website/source/assets/stylesheets/_docs.scss b/website/source/assets/stylesheets/_docs.scss
deleted file mode 100755
index 475aa46d8..000000000
--- a/website/source/assets/stylesheets/_docs.scss
+++ /dev/null
@@ -1,91 +0,0 @@
-#docs-sidebar {
- margin-bottom: 30px;
- margin-top: 50px;
- overflow: hidden;
-
- h1,
- h2,
- h3,
- h4,
- h5,
- h6 {
- margin-top: 30px;
- }
-
- ul.nav.docs-sidenav {
- display: block;
- padding-bottom: 15px;
-
- li {
- a {
- color: $sidebar-link-color;
- font-size: $sidebar-font-size;
- padding: 10px 0 10px 15px;
-
- &:before {
- color: $sidebar-link-color-active;
- content: '\203A';
- font-size: $font-size;
- left: 0;
- line-height: 100%;
- opacity: 0.4;
- position: absolute;
-
- height: 100%;
- width: 8px
- }
-
- &:focus,
- &:hover {
- background-color: transparent;
- color: $sidebar-link-color-hover;
-
- &:before {
- opacity: 1;
- }
- }
-
- &.back {
- &:before {
- content: '\2039';
- }
- }
- }
-
- // For forcing sub-navs to appear - in the long term, this should not
- // be a thing anymore...
- > ul.nav-visible {
- display: block;
- }
- }
-
- li.active {
- > a {
- color: $sidebar-link-color-active;
-
- &:before {
- opacity: 1;
- }
- }
-
- // Open nested navigations
- > ul.nav {
- display: block;
- }
- }
-
- // subnav
- ul.nav {
- display: none;
- margin: 10px;
-
- li {
- margin-left: 10px;
-
- a {
- padding: 6px 15px;
- }
- }
- }
- }
-}
diff --git a/website/source/assets/stylesheets/_downloads.scss b/website/source/assets/stylesheets/_downloads.scss
deleted file mode 100644
index 97a4dfc66..000000000
--- a/website/source/assets/stylesheets/_downloads.scss
+++ /dev/null
@@ -1,60 +0,0 @@
-body.layout-downloads {
- #inner {
- .downloads {
- margin-top: 20px;
-
- .description {
- margin-bottom: 20px;
- }
-
- .download {
- align-items: center;
- border-bottom: 1px solid #b2b2b2;
- display: flex;
- padding: 15px;
-
- .details {
- padding-left: 20px;
-
- h2 {
- margin-top: 4px;
- border: none;
- }
-
- ul {
- padding-left: 0px;
- margin: -8px 0 0 0;
- }
-
- li {
- display: inline-block;
-
- &:after {
- content: " | ";
- }
-
- &:last-child:after {
- content: "";
- }
- }
- }
-
- .icon {
- svg {
- width: 75px;
- }
- }
-
- .os-name {
- font-size: 40px;
- margin-bottom: -3px;
- }
- }
-
- .poweredby {
- margin-top: 20px;
- text-align: center;
- }
- }
- }
-}
diff --git a/website/source/assets/stylesheets/_footer.scss b/website/source/assets/stylesheets/_footer.scss
deleted file mode 100644
index ae34a057a..000000000
--- a/website/source/assets/stylesheets/_footer.scss
+++ /dev/null
@@ -1,24 +0,0 @@
-#footer {
- padding-top: 50px;
-
- ul.footer-links {
- li {
- a {
- color: $footer-link-color;
- font-size: $footer-font-size;
- font-family: $font-family-open-sans;
- text-decoration: none;
-
- &:hover, &:focus, &:active {
- background-color: transparent;
- color: $footer-link-color-hover;
- outline: 0;
- }
-
- @media (max-width: 992px) {
- text-align: center;
- }
- }
- }
- }
-}
diff --git a/website/source/assets/stylesheets/_global.scss b/website/source/assets/stylesheets/_global.scss
deleted file mode 100755
index fc46858d6..000000000
--- a/website/source/assets/stylesheets/_global.scss
+++ /dev/null
@@ -1,43 +0,0 @@
-html {
- height: 100%;
- min-height: 100%;
- text-rendering: optimizeLegibility;
- -webkit-font-smoothing: antialiased;
-}
-
-body {
- -webkit-font-smoothing: antialiased;
- color: $body-font-color;
- background-color: $white;
- font-size: $font-size;
- font-family: $font-family-open-sans;
- font-weight: $font-weight-reg;
- height: 100%;
- min-height: 100%;
-}
-
-h1, h2, h3, h4, h5 {
- font-family: $font-family-klavika;
- -webkit-font-smoothing: antialiased;
-}
-
-h1 {
- margin-bottom: 24px;
-}
-
-// Avoid FOUT
-.wf-loading {
- visibility: hidden;
-}
-
-.wf-active, .wf-inactive {
- visibility: visible;
-}
-
-@media (min-width: $screen-sm) and (max-width: $screen-md) {
- .container {
- padding: 0;
- max-width: $screen-md;
- min-width: $screen-sm;
- }
-}
diff --git a/website/source/assets/stylesheets/_header.scss b/website/source/assets/stylesheets/_header.scss
deleted file mode 100755
index dde70d7d6..000000000
--- a/website/source/assets/stylesheets/_header.scss
+++ /dev/null
@@ -1,78 +0,0 @@
-#header {
- background: $header-background-color;
-
- .navbar-toggle {
- height: $header-height;
- margin: 0;
- padding-right: 15px;
- border-radius: 0;
-
- .icon-bar {
- border: 1px solid $white;
- border-radius: 0;
- }
- }
-
- .navbar-brand {
- display: block;
- margin: 0;
- padding: 0;
-
- a {
- display: flex;
- align-items: center;
- height: $header-height;
- line-height: $header-height;
-
- svg.logo {
- transition: opacity 0.15s ease-in-out;
- @extend svg.logo.white;
-
- &:hover, &:focus, &:active {
- opacity: 0.6;
- outline: 0;
- text-decoration: none;
- }
- }
- }
- }
-
- ul.nav {
- li {
- a {
- color: $header-link-color;
- font-size: $header-font-size;
- font-family: $font-family-open-sans;
- font-weight: $font-weight-bold;
- height: $header-height;
- line-height: $header-height;
- padding: 0 10px;
- margin: 0;
- text-decoration: none;
-
- &:hover, &:focus, &:active {
- background-color: transparent;
- color: $header-link-color-hover;
- outline: 0;
-
- svg {
- fill: $header-link-color-hover;
- }
- }
-
- svg {
- fill: $header-link-color;
- position: relative;
- top: 2px;
- width: 14px;
- height: 14px;
- margin-right: 3px;
- }
- }
- }
- }
-
- .buttons {
- margin-top: 2px;
- }
-}
diff --git a/website/source/assets/stylesheets/_home.scss b/website/source/assets/stylesheets/_home.scss
deleted file mode 100644
index 442638906..000000000
--- a/website/source/assets/stylesheets/_home.scss
+++ /dev/null
@@ -1,338 +0,0 @@
-#page-home {
- // Override the main header
- #header {
- background: $home-header-background-color;
-
- .navbar-toggle {
- .icon-bar {
- border: 1px solid $home-header-link-color;
- }
- }
-
- .navbar-brand {
- a {
- svg.logo {
- @extend svg.logo.color;
- }
- }
- }
-
- ul.nav {
- li {
- a {
- color: $home-header-link-color;
-
- &:hover, &:focus, &:active {
- background-color: transparent;
- color: $home-header-link-color-hover;
-
- svg {
- fill: $home-header-link-color-hover;
- }
- }
-
- svg {
- fill: $home-header-link-color;
- }
- }
- }
- }
- }
-
- header {
- .hero {
- margin: 140px auto 160px auto;
- text-align: center;
-
- .button {
- margin: 5px;
-
- @media (max-width: 768px) {
- display: block;
- margin-top: 10px;
- text-align: center;
- }
- }
-
- svg {
- max-width: 90%;
- }
- }
- }
-
- section {
- background: $white;
- padding: 100px 0;
- }
-
- section.marketing {
- h2 {
- font-family: $font-family-klavika;
- font-size: 36px;
- font-weight: $font-weight-bold;
- line-height: 1.25;
- letter-spacing: -0.02em;
- margin: 20px 0 10px 0;
- padding: 0;
- text-transform: uppercase;
- }
-
- h3 {
- color: $black;
- font-size: 20px;
- font-weight: $font-weight-bold;
- line-height: 1.2;
- margin: 50px 0 15px 0;
- text-transform: uppercase;
- }
-
- p {
- font-family: $font-family-open-sans;
- font-size: 16px;
- letter-spacing: 0.01em;
- line-height: 1.5;
- margin: 0 0 10px;
- }
-
- p.lead {
- font-size: 20px;
- margin: 15px 0 30px 0;
- }
-
- span.callout {
- background: $black;
- color: $white;
- display: inline-block;
- font-family: $font-family-klavika;
- font-size: 18px;
- font-weight: $font-weight-bold;
- line-height: 1;
- margin: 0;
- padding: 5px;
- letter-spacing: 0.05em;
- text-transform: uppercase;
- }
-
- &.purple {
- background: $terraform-purple;
-
- h2 {
- color: $white;
- }
-
- p {
- color: $white;
- }
-
- span.callout {
- background: $white;
- color: $terraform-purple;
- }
-
- .button {
- border: none;
- }
- }
-
- &.black {
- background: $black;
-
- h2 {
- color: $white;
- }
-
- p {
- color: $white;
- }
-
- span.callout {
- background: $white;
- color: $black;
- }
-
- .button {
- border: none;
- }
- }
-
- features {
- .feature-card {
- border: 1px solid $gray-darker;
- color: $gray-darker;
- display: block;
- height: 200px;
- font-weight: $font-weight-bold;
- padding: 20px;
- margin-bottom: 15px;
- text-transform: uppercase;
- transition: all 0.1s ease-in-out;
-
- p {
- line-height: 1.25em;
- }
-
- span {
- color: $terraform-purple;
- display: block;
- font-weight: $font-weight-bold;
- margin-bottom: 10px;
- }
-
- svg {
- display: block;
- margin-top: 20px;
- max-width: 100%;
-
- path {
- transition: all 0.1s ease-in-out;
- fill: $gray-dark;
- }
- }
-
- &:hover, &:active, &:focus {
- border: 1px solid $terraform-purple;
- color: $terraform-purple;
- text-decoration: none;
-
- svg {
- path {
- fill: $terraform-purple;
- transition: all 0.1s ease-in-out;
- }
- }
- }
- }
- }
-
- plan {
- h3 {
- color: $white;
- }
- }
-
- news {
- img, iframe {
- box-shadow: 5px 5px 0 rgba(0, 0, 0, 0.25);
- margin: 0 0 20px 0;
- max-width: 100%;
- }
-
- iframe {
- @media (min-width: $screen-sm) and (max-width: $screen-md) {
- width: 100vw;
- height: 56.25vw;
- }
- }
-
- h2 {
- margin-bottom: 50px;
- }
-
- h3 {
- color: $white;
- margin-top: 0;
- text-transform: none;
- }
-
- div.latest-item {
- + .latest-item {
- border-top: 1px solid rgba(white, .5);
- margin-top: 20px;
- padding-top: 20px;
- }
-
- @media (max-width: $screen-sm-max) {
- &:last-child {
- border-bottom: 1px solid rgba(white, .5);
- margin-bottom: 20px;
- padding-bottom: 20px;
- }
- }
-
- p {
- padding: 0 0 10px 0;
- }
-
- .button {
- &:hover {
- background: $white;
- color: $terraform-purple;
- }
- }
- }
- }
-
- examples {
- background: #EDEDED;
-
- h1, h2, h3, h4, h5, h6, p {
- color: $black;
- }
- }
-
- enterprise {
- background-image: image-url('enterprise-callout-bg.svg');
- background-position: right bottom;
- background-repeat: no-repeat;
- background-size: 80%;
- }
- }
-
- .terminal {
- border: 1px solid $white;
- background-color: $black;
- box-sizing: border-box;
- color: $white;
- font-family: $font-family-monospace;
- font-size: 16px;
- line-height: 1.8;
- margin: 20px auto;
- padding: 10px 20px 20px 20px;
-
- .terminal-content {
- margin-top: 15px;
- overflow-x: scroll;
- width: 100%;
- white-space: nowrap;
-
- span {
- display: block;
- white-space: pre;
-
- span {
- display: inline;
- }
-
- &.text-pink {
- color: lighten($consul-pink, 20%);
- }
- }
- }
-
- span.circle {
- &:before {
- content: '\25CF';
- color: $white;
- font-size: 20px;
- line-height: 100%;
- height: 100%;
- }
- }
-
- span.txt-spe {
- color: #7190EA;
- }
-
- span.txt-str {
- color: #64E86C;
- }
-
- span.txt-int {
- color: #E89264;
- }
-
- span.txt-var {
- color: #9A56f9;
- }
- }
-}
diff --git a/website/source/assets/stylesheets/_inner.scss b/website/source/assets/stylesheets/_inner.scss
deleted file mode 100644
index 1fd41e3f3..000000000
--- a/website/source/assets/stylesheets/_inner.scss
+++ /dev/null
@@ -1,95 +0,0 @@
-#inner {
- p, li, .alert {
- font-size: $font-size;
- font-family: $font-family-open-sans;
- font-weight: $font-weight-reg;
- line-height: 1.84em;
- margin: 0 0 $font-size;
- -webkit-font-smoothing: antialiased;
- }
-
- .alert p:last-child {
- margin-bottom: 0;
- }
-
- pre,
- code,
- pre code,
- tt {
- font-family: $font-family-monospace;
- font-size: $font-size - 2;
- line-height: 1.6;
- }
-
- pre {
- padding: 20px;
- margin: 0 0 $font-size;
-
- // This will force the code to scroll horizontally on small screens
- // instead of wrapping.
- code {
- overflow-wrap: normal;
- white-space: pre;
- }
- }
-
- a {
- color: $body-link-color;
- text-decoration: none;
-
- &:hover {
- text-decoration: underline;
- }
-
- code {
- background: inherit;
- color: $body-link-color;
- }
- }
-
- img {
- display: block;
- margin: 25px auto;
- max-width: 650px;
- height: auto;
- width: 90%;
- }
-
- h1,
- h2,
- h3,
- h4 {
- color: $body-font-color;
- margin-top: 54px;
- margin-bottom: $font-size;
- line-height: 1.3;
- }
-
- h2 {
- padding-bottom: 3px;
- border-bottom: 1px solid $gray-light;
- }
-
- h1 > code,
- h2 > code,
- h3 > code,
- h4 > code,
- h5 > code
- h6 > code,
- li code,
- table code,
- p code,
- tt,
- .alert code {
- font-family: $font-family-monospace;
- font-size: 90%;
- background-color: transparent;
- color: inherit;
- padding: 0;
- }
-
- table {
- @extend .table;
- @extend .table-striped;
- }
-}
diff --git a/website/source/assets/stylesheets/_logos.scss b/website/source/assets/stylesheets/_logos.scss
deleted file mode 100644
index 6e8f39a48..000000000
--- a/website/source/assets/stylesheets/_logos.scss
+++ /dev/null
@@ -1,41 +0,0 @@
-svg.logo {
- &.color {
- opacity: 1.0;
-
- path.text {
- fill: $black;
- opacity: 1.0;
- }
-
- path.rect-light {
- fill: $terraform-purple;
- opacity: 1.0;
- }
-
- path.rect-dark {
- fill: $terraform-purple-dark;
- opacity: 1.0;
- }
- }
-
- // The default logo class is the colored version
- @extend .color;
-
- &.white {
- opacity: 1.0;
-
- path.text {
- fill: $white;
- }
-
- path.rect-light {
- fill: $white;
- opacity: 1.0;
- }
-
- path.rect-dark {
- fill: $white;
- opacity: 0.7;
- }
- }
-}
diff --git a/website/source/assets/stylesheets/_syntax.scss.erb b/website/source/assets/stylesheets/_syntax.scss.erb
deleted file mode 100644
index b7ddc9e5b..000000000
--- a/website/source/assets/stylesheets/_syntax.scss.erb
+++ /dev/null
@@ -1,14 +0,0 @@
-pre.highlight code {
- color: #333333;
-}
-
-<%= Rouge::Themes::Github.render(scope: ".highlight") %>
-
-pre.highlight {
- border: 1px solid #CCCCCC;
-}
-
-pre.highlight code span.c1 {
- font-style: normal;
- opacity: 0.8;
-}
diff --git a/website/source/assets/stylesheets/_variables.scss b/website/source/assets/stylesheets/_variables.scss
deleted file mode 100755
index 20473f940..000000000
--- a/website/source/assets/stylesheets/_variables.scss
+++ /dev/null
@@ -1,63 +0,0 @@
-// Colors
-$white: #FFFFFF;
-$black: #000000;
-$gray-darker: #555555;
-
-$consul-pink: #D62783;
-$consul-pink-dark: #961D59;
-$packer-blue: #1DAEFF;
-$packer-blue-dark: #1D94DD;
-$terraform-purple: #5C4EE5;
-$terraform-purple-dark: #4040B2;
-$vagrant-blue: #1563FF;
-$vagrant-blue-dark: #104EB2;
-$vault-black: #000000;
-$vault-blue: #00ABE0;
-$vault-gray: #919FA8;
-
-// Typography
-$font-family-klavika: 'klavika-web', Helvetica, sans-serif;
-$font-family-open-sans: 'Open Sans', sans-serif;
-$font-family-monospace: 'Fira Mono', monospace;
-$font-size: 15px;
-$font-weight-reg: 400;
-$font-weight-bold: 600;
-
-// Body
-$body-font-color: $gray-darker;
-$body-link-color: $terraform-purple;
-
-// Home
-$home-header-background-color: transparent;
-$home-header-link-color: $gray-darker;
-$home-header-link-color-hover: $black;
-
-// Sidebar
-$sidebar-background-color: $white;
-$sidebar-font-size: $font-size - 2;
-$sidebar-link-color: $body-font-color;
-$sidebar-link-color-hover: $black;
-$sidebar-link-color-active: $body-link-color;
-$sidebar-font-family: $font-family-open-sans;
-$sidebar-font-weight: $font-weight-reg;
-
-// Header
-$header-background-color: $terraform-purple;
-$header-font-size: $font-size - 2;
-$header-height: 92px;
-$header-link-color: rgba($white, 0.85);
-$header-link-color-hover: $white;
-
-// Footer
-$footer-font-size: $font-size - 2;
-$footer-link-color: $body-font-color;
-$footer-link-color-hover: $black;
-
-// Button
-$button-background: $white;
-$button-font-color: $terraform-purple;
-$button-font-family: $font-family-klavika;
-$button-font-size: $font-size;
-$button-font-weight: $font-weight-bold;
-$button-primary-background: $terraform-purple;
-$button-primary-font-color: $white;
diff --git a/website/source/assets/stylesheets/application.scss b/website/source/assets/stylesheets/application.scss
deleted file mode 100755
index e08595d39..000000000
--- a/website/source/assets/stylesheets/application.scss
+++ /dev/null
@@ -1,33 +0,0 @@
-@import 'bootstrap-sprockets';
-@import 'bootstrap';
-
-@import url('https://fonts.googleapis.com/css?family=Fira+Mono|Open+Sans:400,600');
-
-// Mega Nav
-@import 'hashicorp/mega-nav';
-
-// Anchor links
-@import 'hashicorp/anchor-links';
-
-// Core variables and mixins
-@import '_variables';
-
-// Sidebar
-@import 'hashicorp/sidebar';
-
-//Global Site
-@import '_global';
-
-// Components
-@import '_header';
-@import '_footer';
-@import '_inner';
-@import '_buttons';
-@import '_syntax';
-@import '_logos';
-
-// Pages
-@import '_community';
-@import '_docs';
-@import '_downloads';
-@import '_home';
diff --git a/website/source/community.html.erb b/website/source/community.html.erb
deleted file mode 100644
index 6f5c19b25..000000000
--- a/website/source/community.html.erb
+++ /dev/null
@@ -1,143 +0,0 @@
----
-layout: "inner"
-page_title: "Community"
-description: |-
- Terraform is a new project with a growing community. Despite this, there are active, dedicated users willing to help you through various mediums.
----
-
-
Community
-
-
- Terraform is a new project with a growing community. Despite this,
- there are active, dedicated users willing to help you through various
- mediums.
-
-
- Stack Exchange: Terraform questions often get asked and
- answered on
- Server Fault and
- Stack Overflow. Use the tag
- "terraform" to help your question be found by Terraform experts, and please
- be respectful of the "How to Ask" guidelines in each community.
-
- Bug Tracker:Issue tracker on GitHub. Please only use this for reporting bugs. Do not ask for general help here; use a Stack Exchange community, Gitter chat, or the mailing list for that.
-
-
- Training: Paid HashiCorp training courses are also available in a city near you. Private training courses are also available.
-
-
-
People
-
- The following people are some of the faces behind Terraform. They each
- contribute to Terraform in some core way. Over time, faces may appear and
- disappear from this list as contributors come and go.
-
- Mitchell Hashimoto is the creator of Terraform and works on all
- layers of Terraform from the core to providers. In addition to Terraform,
- Mitchell is the creator of
- Vagrant,
- Packer, and
- Consul.
-
- Paul Hinze is the Project Lead of Terraform. He helps organize the team
- of HashiCorp employees and community members that work on Terraform
- day-to-day. He works on Terraform's core and providers.
-
- Clint Shryock is a HashiCorp Engineer working on Terraform. He is the
- primary maintainer of the AWS provider, and works across all providers.
- Clint is also the primary author of the Fastly provider.
-
- Radek Simko is a HashiCorp Engineer working on Terraform. His focus
- is the provider land (AWS mainly), but you can find him working
- on some core features from time to time as well.
- Radek is also the primary author of the Kubernetes provider.
-
- Martin Atkins is a community contributor turned HashiCorp Engineer working
- on Terraform with a focus on the core.
-
-
-
-
-
-
diff --git a/website/source/docs/enterprise/api/configurations.html.md b/website/source/docs/enterprise/api/configurations.html.md
deleted file mode 100755
index 884a0d2b9..000000000
--- a/website/source/docs/enterprise/api/configurations.html.md
+++ /dev/null
@@ -1,174 +0,0 @@
----
-layout: "enterprise"
-page_title: "Configurations - API - Terraform Enterprise"
-sidebar_current: "docs-enterprise-api-configurations"
-description: |-
- A configuration represents settings associated with a resource that runs
- Terraform with versions of Terraform configuration.
----
-
-# Configuration API
-
-A configuration version represents versions of Terraform configuration. Each set
-of changes to Terraform HCL files or the scripts used in the files should have
-an associated configuration version.
-
-When creating versions via the API, the variables attribute can be sent to
-include the necessary variables for the Terraform configuration. A configuration
-represents settings associated with a resource that runs Terraform with versions
-of Terraform configuration. Configurations have many configuration versions
-which represent versions of Terraform configuration templates and other
-associated configuration. Most operations take place on the configuration
-version, not the configuration.
-
-## Get Latest Configuration Version
-
-This endpoint gets the latest configuration version.
-
-| Method | Path |
-| :----- | :------------- |
-| `GET` | `/terraform/configurations/:username/:name/versions/latest` |
-
-### Parameters
-
-- `:username` `(string: )` - Specifies the username or organization
- name under which to get the latest configuration version. This username must
- already exist in the system, and the user must have permission to create new
- configuration versions under this namespace. This is specified as part of the
- URL.
-
-- `:name` `(string: )` - Specifies the name of the configuration for
- which to get the latest configuration. This is specified as part of the URL.
-
-### Sample Request
-
-```text
-$ curl \
- --header "X-Atlas-Token: ..." \
- https://atlas.hashicorp.com/api/v1/terraform/configurations/my-organization/my-configuration/versions/latest
-```
-
-### Sample Response
-
-```json
-{
- "version": {
- "version": 6,
- "metadata": {
- "foo": "bar"
- },
- "tf_vars": [],
- "variables": {}
- }
-}
-```
-
-- `version` `(int)` - the unique version instance number.
-
-- `metadata` `(map)` - a map of arbitrary metadata for this
- version.
-
-## Create Configuration Version
-
-This endpoint creates a new configuration version.
-
-| Method | Path |
-| :----- | :------------- |
-| `POST` | `/terraform/configurations/:username/:name/versions` |
-
-### Parameters
-
-- `:username` `(string: )` - Specifies the username or organization
- name under which to create this configuration version. This username must
- already exist in the system, and the user must have permission to create new
- configuration versions under this namespace. This is specified as part of the
- URL.
-
-- `:name` `(string: )` - Specifies the name of the configuration for
- which to create a new version. This is specified as part of the URL.
-
-- `metadata` `(map)` - Specifies an arbitrary hash of key-value
- metadata pairs. This is specified as the payload as JSON.
-
-- `variables` `(map)` - Specifies a hash of key-value pairs that
- will be made available as variables to this version.
-
-### Sample Payload
-
-```json
-{
- "version": {
- "metadata": {
- "git_branch": "master",
- "remote_type": "atlas",
- "remote_slug": "hashicorp/atlas"
- },
- "variables": {
- "ami_id": "ami-123456",
- "target_region": "us-east-1",
- "consul_count": "5",
- "consul_ami": "ami-123456"
- }
- }
-}
-```
-
-### Sample Request
-
-```text
-$ curl \
- --request POST \
- --header "X-Atlas-Token: ..." \
- --header "Content-Type: application/json" \
- --data @payload.json \
- https://atlas.hashicorp.com/api/v1/terraform/configurations/my-organization/my-configuration/versions
-```
-
-### Sample Response
-
-```json
-{
- "version": 6,
- "upload_path": "https://binstore.hashicorp.com/ddbd7db6-f96c-4633-beb6-22fe2d74eeed",
- "token": "ddbd7db6-f96c-4633-beb6-22fe2d74eeed"
-}
-```
-
-- `version` `(int)` - the unique version instance number. This is
- auto-incrementing.
-
-- `upload_path` `(string)` - the path where the archive should be uploaded via a
- `POST` request.
-
-- `token` `(string)` - the token that should be used when uploading the archive
- to the `upload_path`.
-
-## Check Upload Progress
-
-This endpoint retrieves the progress for an upload of a configuration version.
-
-| Method | Path |
-| :----- | :------------- |
-| `GET` | `/terraform/configurations/:username/:name/versions/progress/:token` |
-
-### Parameters
-
-- `:username` `(string: )` - Specifies the username or organization to
- read progress. This is specified as part of the URL.
-
-- `:name` `(string: )` - Specifies the name of the configuration for
- to read progress. This is specified as part of the URL.
-
-- `:token` `(string: )` - Specifies the token that was returned from
- the create option. **This is not an Atlas Token!** This is specified as part
- of the URL.
-
-### Sample Request
-
-```text
-$ curl \
- --header "X-Atlas-Token: ..." \
- https://atlas.hashicorp.com/api/v1/terraform/configurations/my-organization/my-configuration/versions/progress/ddbd7db6-f96c-4633-beb6-22fe2d74eeed
-```
-
-### Sample Response
diff --git a/website/source/docs/enterprise/api/environments.html.md b/website/source/docs/enterprise/api/environments.html.md
deleted file mode 100755
index e92027c48..000000000
--- a/website/source/docs/enterprise/api/environments.html.md
+++ /dev/null
@@ -1,69 +0,0 @@
----
-layout: "enterprise"
-page_title: "Environments - API - Terraform Enterprise"
-sidebar_current: "docs-enterprise-api-environments"
-description: |-
- Environments represent running infrastructure managed by Terraform.
----
-
-# Environments API
-
-Environments represent running infrastructure managed by Terraform.
-
-Environments can also be connected to Consul clusters. This documentation covers
-the environment interactions with Terraform.
-
-## Get Latest Configuration Version
-
-This endpoint updates the Terraform variables for an environment. Due to the
-sensitive nature of variables, they are not returned on success.
-
-| Method | Path |
-| :----- | :------------- |
-| `PUT` | `/environments/:username/:name/variables` |
-
-### Parameters
-
-- `:username` `(string: )` - Specifies the username or organization
- name under which to update variables. This username must already exist in the
- system, and the user must have permission to create new configuration versions
- under this namespace. This is specified as part of the URL.
-
-- `:name` `(string: )` - Specifies the name of the environment for
- which to update variables. This is specified as part of the URL.
-
-- `variables` `(map)` - Specifies a key-value map of Terraform
- variables to be updated. Existing variables will only be removed when their
- value is empty. Variables of the same key will be overwritten.
-
- -> Note: Only string variables can be updated via the API currently. Creating or updating HCL variables is not yet supported.
-
-### Sample Payload
-
-```json
-{
- "variables": {
- "desired_capacity": "15",
- "foo": "bar"
- }
-}
-```
-
-### Sample Request
-
-```text
-$ curl \
- --header "X-Atlas-Token: ..." \
- --header "Content-Type: application/json" \
- --request PUT \
- --data @payload.json \
- https://atlas.hashicorp.com/api/v1/environments/my-organization/my-environment/variables
-```
-
-### Sample Response
-
-
-```text
-```
-
-(empty body)
diff --git a/website/source/docs/enterprise/api/index.html.md b/website/source/docs/enterprise/api/index.html.md
deleted file mode 100755
index 69fded303..000000000
--- a/website/source/docs/enterprise/api/index.html.md
+++ /dev/null
@@ -1,59 +0,0 @@
----
-layout: "enterprise"
-page_title: "API - Terraform Enterprise"
-sidebar_current: "docs-enterprise-api"
-description: |-
- Terraform Enterprise provides an API for a **subset of features**.
----
-
-# Terraform Enterprise API Documentation
-
-Terraform Enterprise provides an API for a **subset of features** available. For
-questions or requests for new API features please email
-[support@hashicorp.com](mailto:support@hashicorp.com).
-
-The list of available endpoints are on the navigation.
-
-## Authentication
-
-All requests must be authenticated with an `X-Atlas-Token` HTTP header. This
-token can be generated or revoked on the account tokens page. Your token will
-have access to all resources your account has access to.
-
-For organization level resources, we recommend creating a separate user account
-that can be added to the organization with the specific privilege level
-required.
-
-## Response Codes
-
-Standard HTTP response codes are returned. `404 Not Found` codes are returned
-for all resources that a user does not have access to, as well as for resources
-that don't exist. This is done to avoid a potential attacker discovering the
-existence of a resource.
-
-## Errors
-
-Errors are returned in JSON format:
-
-```json
-{
- "errors": {
- "name": [
- "has already been taken"
- ]
- }
-}
-```
-
-## Versioning
-
-The API currently resides under the `/v1` prefix. Future APIs will increment
-this version leaving the `/v1` API intact, though in the future certain features
-may be deprecated. In that case, ample notice to migrate to the new API will be
-provided.
-
-## Content Type
-
-The API accepts namespaced attributes in either JSON or
-`application/x-www-form-urlencoded`. We recommend using JSON, but for simplicity
-form style requests are supported.
diff --git a/website/source/docs/enterprise/api/runs.html.md b/website/source/docs/enterprise/api/runs.html.md
deleted file mode 100755
index 8f2d41d3b..000000000
--- a/website/source/docs/enterprise/api/runs.html.md
+++ /dev/null
@@ -1,65 +0,0 @@
----
-layout: "enterprise"
-page_title: "Runs - API - Terraform Enterprise"
-sidebar_current: "docs-enterprise-api-runs"
-description: |-
- Runs in Terraform Enterprise represents a two step Terraform plan and a subsequent apply.
----
-
-# Runs API
-
-Runs in Terraform Enterprise represents a two step Terraform plan and a
-subsequent apply.
-
-Runs are queued under [environments](/docs/enterprise/api/environments.html)
-and require a two-step confirmation workflow. However, environments
-can be configured to auto-apply to avoid this.
-
-## Queue Run
-
-Starts a new run (plan) in the environment. Requires a configuration version to
-be present on the environment to succeed, but will otherwise 404.
-
-| Method | Path |
-| :----- | :------------- |
-| `POST` | `/environments/:username/:name/plan` |
-
-### Parameters
-
-- `:username` `(string: )` - Specifies the username or organization
- name under which to get the latest configuration version. This username must
- already exist in the system, and the user must have permission to create new
- configuration versions under this namespace. This is specified as part of the
- URL.
-
-- `:name` `(string: )` - Specifies the name of the configuration for
- which to get the latest configuration. This is specified as part of the URL.
-
-- `destroy` `(bool: false)` - Specifies if the plan should be a destroy plan.
-
-### Sample Payload
-
-```json
-{
- "destroy": false
-}
-```
-
-### Sample Request
-
-```text
-$ curl \
- --request POST \
- --header "X-Atlas-Token: ..." \
- --header "Content-Type: application/json" \
- --data @payload.json \
- https://atlas.hashicorp.com/api/v1/environments/my-organization/my-environment/plan
-```
-
-### Sample Response
-
-```json
-{
- "success": true
-}
-```
diff --git a/website/source/docs/enterprise/api/states.html.md b/website/source/docs/enterprise/api/states.html.md
deleted file mode 100755
index 71ad109e0..000000000
--- a/website/source/docs/enterprise/api/states.html.md
+++ /dev/null
@@ -1,67 +0,0 @@
----
-layout: "enterprise"
-page_title: "State - API - Terraform Enterprise"
-sidebar_current: "docs-enterprise-api-states"
-description: |-
- State represents the status of your infrastructure at the last time Terraform was run.
----
-
-# State API
-
-State represents the status of your infrastructure at the last time Terraform
-was run. States can be pushed to Terraform Enterprise from Terraform's CLI after
-an apply is done locally, or state is automatically stored if the apply is done
-in Terraform Enterprise.
-
-## List of States
-
-This endpoint gets a list of states accessible to the user corresponding to the
-provided token.
-
-| Method | Path |
-| :----- | :------------- |
-| `GET` | `/terraform/state` |
-
-### Parameters
-
-- `?username` `(string: "")` - Specifies the organization/username to filter
- states
-
-- `?page` `(int: 1)` - Specifies the pagination, which defaults to page 1.
-
-### Sample Requests
-
-```text
-$ curl \
- --header "X-Atlas-Token: ..." \
- https://atlas.hashicorp.com/api/v1/terraform/state
-```
-
-```text
-$ curl \
- --header "X-Atlas-Token: ..." \
- https://atlas.hashicorp.com/api/v1/terraform/state?username=acme
-```
-
-### Sample Response
-
-```json
-{
- "states": [
- {
- "updated_at": "2017-02-03T19:52:37.693Z",
- "environment": {
- "username": "my-organization",
- "name": "docs-demo-one"
- }
- },
- {
- "updated_at": "2017-04-06T15:48:49.677Z",
- "environment": {
- "username": "my-organization",
- "name": "docs-demo-two"
- }
- }
- ]
-}
-```
diff --git a/website/source/docs/enterprise/api/users.html.md b/website/source/docs/enterprise/api/users.html.md
deleted file mode 100755
index aca40df05..000000000
--- a/website/source/docs/enterprise/api/users.html.md
+++ /dev/null
@@ -1,49 +0,0 @@
----
-layout: "enterprise"
-page_title: "Users - API - Terraform Enterprise"
-sidebar_current: "docs-enterprise-api-users"
-description: |-
- Users are both users and organizations in Terraform Enterprise. They are the parent resource of all resources.
----
-
-# Users API
-
-Users are both users and organizations in Terraform Enterprise. They are the
-parent resource of all resources.
-
-Currently, only the retrieval of users is available on the API. Additionally,
-only Vagrant box resources will be listed. Boxes will be returned based on
-permissions over the organization, or user.
-
-## Read User
-
-This endpoint retrieves information about a single user.
-
-| Method | Path |
-| :----- | :------------- |
-| `GET` | `/user/:username` |
-
-### Parameters
-
-- `:username` `(string: )` - Specifies the username to search. This is
- specified as part of the URL.
-
-### Sample Request
-
-```text
-$ curl \
- --header "X-Atlas-Token: ..." \
- https://atlas.hashicorp.com/api/v1/user/my-user
-```
-
-### Sample Response
-
-```json
-{
- "username": "sally-seashell",
- "avatar_url": "https://www.gravatar.com/avatar/...",
- "profile_html": "Sally is...",
- "profile_markdown": "Sally is...",
- "boxes": []
-}
-```
diff --git a/website/source/docs/enterprise/artifacts/artifact-provider.html.md b/website/source/docs/enterprise/artifacts/artifact-provider.html.md
deleted file mode 100755
index 41b5a8de1..000000000
--- a/website/source/docs/enterprise/artifacts/artifact-provider.html.md
+++ /dev/null
@@ -1,55 +0,0 @@
----
-layout: "enterprise"
-page_title: "Provider - Artifacts - Terraform Enterprise"
-sidebar_current: "docs-enterprise-artifacts-provider"
-description: |-
- Terraform has a provider for managing artifacts called `atlas_artifact`.
----
-
-# Artifact Provider
-
-Terraform has a [provider](https://terraform.io/docs/providers/index.html) for managing Terraform Enterprise artifacts called `atlas_artifact`.
-
-This is used to make data stored in Artifacts available to Terraform for
-interpolation. In the following example, an artifact is defined and references
-an AMI ID stored in Terraform Enterprise.
-
-~> **Why is this called "atlas"?** Atlas was previously a commercial offering
-from HashiCorp that included a full suite of enterprise products. The products
-have since been broken apart into their individual products, like **Terraform
-Enterprise**. While this transition is in progress, you may see references to
-"atlas" in the documentation. We apologize for the inconvenience.
-
-```hcl
-provider "atlas" {
- # You can also set the atlas token by exporting ATLAS_TOKEN into your env
- token = "${var.atlas_token}"
-}
-
-data "atlas_artifact" "web-worker" {
- name = "my-username/web-worker"
- type = "amazon.image"
- version = "latest"
-}
-
-resource "aws_instance" "worker-machine" {
- ami = "${atlas_artifact.web-worker.metadata_full.region-us-east-1}"
- instance_type = "m1.small"
-}
-```
-
-This automatically pulls the "latest" artifact version.
-
-Following a new artifact version being created via a Packer build, the following
-diff would be generated when running `terraform plan`.
-
-```
--/+ aws_instance.worker-machine
- ami: "ami-168f9d7e" => "ami-2f3a9df2" (forces new resource)
- instance_type: "m1.small" => "m1.small"
-```
-
-This allows you to reference changing artifacts and trigger new deployments upon
-pushing subsequent Packer builds.
-
-Read more about artifacts in the [Terraform documentation](https://terraform.io/docs/providers/terraform-enterprise/r/artifact.html).
diff --git a/website/source/docs/enterprise/artifacts/creating-amis.html.md b/website/source/docs/enterprise/artifacts/creating-amis.html.md
deleted file mode 100755
index 1469c65a7..000000000
--- a/website/source/docs/enterprise/artifacts/creating-amis.html.md
+++ /dev/null
@@ -1,14 +0,0 @@
----
-layout: "enterprise"
-page_title: "Creating AMIs - Artifacts - Terraform Enterprise"
-sidebar_current: "docs-enterprise-artifacts-amis"
-description: |-
- Creating AMI Artifacts with Packer.
----
-
-
-# Creating AMI Artifacts with Packer and Terraform Enterprise
-
-Currently, the best way to create AWS AMI artifacts is with Packer.
-
-We detail how to do this in the [Packer section of the documentation](/docs/enterprise/packer/artifacts/creating-amis.html).
diff --git a/website/source/docs/enterprise/artifacts/index.html.md b/website/source/docs/enterprise/artifacts/index.html.md
deleted file mode 100755
index 4655f1eb0..000000000
--- a/website/source/docs/enterprise/artifacts/index.html.md
+++ /dev/null
@@ -1,21 +0,0 @@
----
-layout: "enterprise"
-page_title: "Artifacts - Terraform Enterprise"
-sidebar_current: "docs-enterprise-artifacts"
-description: |-
- Terraform Enterprise can be used to store artifacts for use by Terraform. Typically, artifacts are stored with Packer.
----
-
-# About Terraform Artifacts
-
-Terraform Enterprise can be used to store artifacts for use by Terraform.
-Typically, artifacts are [stored with Packer](https://packer.io/docs).
-
-Artifacts can be used in to deploy and manage images
-of configuration. Artifacts are generic, but can be of varying types
-like `amazon.image`. See the Packer [`artifact_type`](https://packer.io/docs/post-processors/atlas.html#artifact_type)
-docs for more information.
-
-Packer can create artifacts both while running in and out of Terraform
-Enterprise network. This is possible due to the post-processors use of the
-public artifact API to store the artifacts.
diff --git a/website/source/docs/enterprise/artifacts/managing-versions.html.md b/website/source/docs/enterprise/artifacts/managing-versions.html.md
deleted file mode 100755
index 50af422ae..000000000
--- a/website/source/docs/enterprise/artifacts/managing-versions.html.md
+++ /dev/null
@@ -1,70 +0,0 @@
----
-layout: "enterprise"
-page_title: "Managing Versions - Artifacts - Terraform Enterprise"
-sidebar_current: "docs-enterprise-artifacts-versions"
-description: |-
- Artifacts are versioned and assigned a version number, here is how to manage the versions.
----
-
-# Managing Artifact Versions
-
-Artifacts stored in Terraform Enterprise are versioned and assigned a version
-number. Versions are useful to roll back, audit and deploy images specific
-versions of images to certain environments in a targeted way.
-
-This assumes you are familiar with the [artifact provider](https://terraform.io/docs/providers/terraform-enterprise/index.html)
-in Terraform.
-
-### Finding the Version of an Artifact
-
-Artifact versions can be found with the [`terraform show` command](https://terraform.io/docs/commands/show.html),
-or by looking at the Packer logs generated during builds. After a
-successful artifact upload, version numbers are displayed. "latest" can
-be used to use the latest version of the artifact.
-
-The following output is from `terraform show`.
-
-```text
-atlas_artifact.web-worker:
- id = us-east-1:ami-3a0a1d52
- build = latest
- metadata_full.# = 1
- metadata_full.region-us-east-1 = ami-3a0a1d52
- name = my-username/web-worker
- slug = my-username/web-worker/amazon.image/7
- type = amazon.image
-```
-
-In this case, the version is 7 and can be found in the persisted slug
-attribute.
-
-### Pinning Artifacts to Specific Versions
-
-You can pin artifacts to a specific version. This allows for a targeted
-deploy.
-
-```hcl
-data "atlas_artifact" "web-worker" {
- name = "my-username/web-worker"
- type = "amazon.image"
- version = 7
-}
-```
-
-This will use version 7 of the `web-worker` artifact.
-
-### Pinning Artifacts to Specific Builds
-
-Artifacts can also be pinned to an Terraform build number. This is only
-possible if Terraform Enterprise was used to build the artifact with Packer.
-
-```hcl
-data "atlas_artifact" "web-worker" {
- name = "my-username/web-worker"
- type = "amazon.image"
- build = 5
-}
-```
-
-It's recommended to use versions, instead of builds, as it will be easier to
-track when building outside of the Terraform Enterprise environment.
diff --git a/website/source/docs/enterprise/faq/index.html.md b/website/source/docs/enterprise/faq/index.html.md
deleted file mode 100755
index c99a37afc..000000000
--- a/website/source/docs/enterprise/faq/index.html.md
+++ /dev/null
@@ -1,15 +0,0 @@
----
-layout: "enterprise"
-page_title: "FAQ - Terraform Enterprise"
-sidebar_current: "docs-enterprise-faq"
-description: |-
- Frequently Asked Questions.
----
-
-# Frequently Asked Questions
-
-[Monolithic Artifacts](/docs/enterprise/faq/monolithic-artifacts.html) - *How do I build multiple applications into one artifact?*
-
-[Rolling Deployments](/docs/enterprise/faq/rolling-deployments.html) - *How do I configure rolling deployments?*
-
-[Vagrant Cloud Migration](/docs/enterprise/faq/vagrant-cloud-migration.html) - *How can I prepare for the Vagrant Cloud Mirgration?*
diff --git a/website/source/docs/enterprise/faq/monolithic-artifacts.html.md b/website/source/docs/enterprise/faq/monolithic-artifacts.html.md
deleted file mode 100755
index e44f12331..000000000
--- a/website/source/docs/enterprise/faq/monolithic-artifacts.html.md
+++ /dev/null
@@ -1,159 +0,0 @@
----
-layout: "enterprise"
-page_title: "Monolithic Artifacts - FAQ - Terraform Enterprise"
-sidebar_current: "docs-enterprise-faq-monolithic"
-description: |-
- How do I build multiple applications into one artifact?
----
-
-# Monolithic Artifacts
-
-*How do I build multiple applications into one artifact?*
-
-Create your new Applications in Terraform Enterprise using the application
-compilation feature.
-
-You can either link each Application to the single Build Template you will be
-using to create the monolithic artifact, or run periodic Packer builds.
-
-Each time an Application is pushed, it will store the new application version in
-the artifact registry as a tarball. These will be available for you to download
-at build-time on the machines they belong.
-
-Here's an example `compile.json` template that you will include with the rest of
-your application files that do the compiling:
-
-
-```json
-{
- "variables": {
- "app_slug": "{{ env `ATLAS_APPLICATION_SLUG` }}"
- },
- "builders": [
- {
- "type": "docker",
- "image": "ubuntu:14.04",
- "commit": true
- }
- ],
- "provisioners": [
- {
- "type": "shell",
- "inline": [
- "apt-get -y update"
- ]
- },
- {
- "type": "file",
- "source": ".",
- "destination": "/tmp/app"
- },
- {
- "type": "shell",
- "inline": [
- "cd /tmp/app",
- "make"
- ]
- },
- {
- "type": "file",
- "source": "/tmp/compiled-app.tar.gz",
- "destination": "compiled-app.tar.gz",
- "direction": "download"
- }
- ],
- "post-processors": [
- [
- {
- "type": "artifice",
- "files": ["compiled-app.tar.gz"]
- },
- {
- "type": "atlas",
- "artifact": "{{user `app_slug` }}",
- "artifact_type": "archive"
- }
- ]
- ]
-}
-```
-
-In your Packer template, you can download each of the latest applications
-artifacts onto the host using the shell provisioner:
-
-
-```text
-$ curl -L -H "X-Atlas-Token: ${ATLAS_TOKEN}" https://atlas.hashicorp.com/api/v1/artifacts/hashicorp/example/archive/latest/file -o example.tar.gz
-```
-
-Here's an example Packer template:
-
-```json
-{
- "variables": {
- "atlas_username": "{{env `ATLAS_USERNAME`}}",
- "aws_access_key": "{{env `AWS_ACCESS_KEY_ID`}}",
- "aws_secret_key": "{{env `AWS_SECRET_ACCESS_KEY`}}",
- "aws_region": "{{env `AWS_DEFAULT_REGION`}}",
- "instance_type": "c3.large",
- "source_ami": "ami-9a562df2",
- "name": "example",
- "ssh_username": "ubuntu",
- "app_dir": "/app"
- },
- "push": {
- "name": "{{user `atlas_username`}}/{{user `name`}}",
- "vcs": false
- },
- "builders": [
- {
- "type": "amazon-ebs",
- "access_key": "{{user `aws_access_key`}}",
- "secret_key": "{{user `aws_secret_key`}}",
- "region": "{{user `aws_region`}}",
- "vpc_id": "",
- "subnet_id": "",
- "instance_type": "{{user `instance_type`}}",
- "source_ami": "{{user `source_ami`}}",
- "ami_regions": [],
- "ami_name": "{{user `name`}} {{timestamp}}",
- "ami_description": "{{user `name`}} AMI",
- "run_tags": { "ami-create": "{{user `name`}}" },
- "tags": { "ami": "{{user `name`}}" },
- "ssh_username": "{{user `ssh_username`}}",
- "ssh_timeout": "10m",
- "ssh_private_ip": false,
- "associate_public_ip_address": true
- }
- ],
- "provisioners": [
- {
- "type": "shell",
- "execute_command": "echo {{user `ssh_username`}} | {{ .Vars }} sudo -E -S sh '{{ .Path }}'",
- "inline": [
- "apt-get -y update",
- "apt-get -y upgrade",
- "apt-get -y install curl unzip tar",
- "mkdir -p {{user `app_dir`}}",
- "chmod a+w {{user `app_dir`}}",
- "cd /tmp",
- "curl -L -H 'X-Atlas-Token: ${ATLAS_TOKEN}' https://atlas.hashicorp.com/api/v1/artifacts/{{user `atlas_username`}}/{{user `name`}}/archive/latest/file -o example.tar.gz",
- "tar -xzf example.tar.gz -C {{user `app_dir`}}"
- ]
- }
- ],
- "post-processors": [
- {
- "type": "atlas",
- "artifact": "{{user `atlas_username`}}/{{user `name`}}",
- "artifact_type": "amazon.image",
- "metadata": {
- "created_at": "{{timestamp}}"
- }
- }
- ]
-}
-```
-
-Once downloaded, you can place each application slug where it needs to go to
-produce the monolithic artifact your are accustom to.
diff --git a/website/source/docs/enterprise/faq/rolling-deployments.html.md b/website/source/docs/enterprise/faq/rolling-deployments.html.md
deleted file mode 100755
index 0753af323..000000000
--- a/website/source/docs/enterprise/faq/rolling-deployments.html.md
+++ /dev/null
@@ -1,94 +0,0 @@
----
-layout: "enterprise"
-page_title: "Rolling Deployments - FAQ - Terraform Enterprise"
-sidebar_current: "docs-enterprise-faq-deployments"
-description: |-
- How do I configure rolling deployments in Terraform Enterprise?
----
-
-# Rolling Deployments
-
-*How do I configure rolling deployments?*
-
-User are able to quickly change out an Artifact version that is being utilized
-by Terraform, using variables within Terraform Enterprise. This is particularly
-useful when testing specific versions of the given artifact without performing a
-full rollout. This configuration also allows one to deploy any version of an
-artifact with ease, simply by changing a version variable in Terraform and
-re-deploying.
-
-Here is an example:
-
-```hcl
-variable "type" { default = "amazon.image" }
-variable "region" {}
-variable "atlas_username" {}
-variable "pinned_name" {}
-variable "pinned_version" { default = "latest" }
-
-data "atlas_artifact" "pinned" {
- name = "${var.atlas_username}/${var.pinned_name}"
- type = "${var.type}"
- version = "${var.pinned_version}"
-
- lifecycle { create_before_destroy = true }
-
- metadata {
- region = "${var.region}"
- }
-}
-
-output "pinned" { value = "${atlas_artifact.pinned.metadata_full.ami_id}" }
-```
-
-
-In the above example we have an `atlas_artifact` resource where you pass in the
-version number via the variable `pinned_version`. (_note: this variable defaults
-to latest_). If you ever want to deploy any other version, you just update the
-variable `pinned_version` and redeploy.
-
-Below is similar to the first example, but it is in the form of a module that
-handles the creation of artifacts:
-
-```hcl
-variable "type" { default = "amazon.image" }
-variable "region" {}
-variable "atlas_username" {}
-variable "artifact_name" {}
-variable "artifact_version" { default = "latest" }
-
-data "atlas_artifact" "artifact" {
- name = "${var.atlas_username}/${var.artifact_name}"
- type = "${var.type}"
- count = "${length(split(",", var.artifact_version))}"
- version = "${element(split(",", var.artifact_version), count.index)}"
-
- lifecycle { create_before_destroy = true }
- metadata { region = "${var.region}" }
-}
-
-output "amis" { value = "${join(",", atlas_artifact.artifact.*.metadata_full.ami_id)}" }
-```
-
-One can then use the module as follows (_note: the source will likely be
-different depending on the location of the module_):
-
-```hcl
-module "artifact_consul" {
- source = "../../../modules/aws/util/artifact"
-
- type = "${var.artifact_type}"
- region = "${var.region}"
- atlas_username = "${var.atlas_username}"
- artifact_name = "${var.consul_artifact_name}"
- artifact_version = "${var.consul_artifacts}"
-}
-```
-
-
-In the above example, we have created artifacts for Consul. In this example, we
-can create two versions of the artifact, "latest" and "pinned". This is useful
-when rolling a cluster (like Consul) one node at a time, keeping some nodes
-pinned to current version and others deployed with the latest Artifact.
-
-There are additional details for implementing rolling deployments in the [Best-Practices Repo](https://github.com/hashicorp/best-practices/blob/master/terraform/providers/aws/us_east_1_prod/us_east_1_prod.tf#L105-L123), as there are some things uncovered in this FAQ (i.e Using the Terraform Enterprise Artifact in an instance).
diff --git a/website/source/docs/enterprise/faq/vagrant-cloud-migration.html.md b/website/source/docs/enterprise/faq/vagrant-cloud-migration.html.md
deleted file mode 100644
index d279de8b5..000000000
--- a/website/source/docs/enterprise/faq/vagrant-cloud-migration.html.md
+++ /dev/null
@@ -1,23 +0,0 @@
----
-layout: "enterprise"
-page_title: "Vagrant Cloud Migration - FAQ - Terraform Enterprise"
-sidebar_current: "docs-enterprise-faq-vagrant-cloud-migration"
-description: |-
- Vagrant-related functionality will be moved from Terraform Enterprise into its own product, Vagrant Cloud. This migration is currently planned for June 27th, 2017.
----
-
-# Vagrant Cloud Migration
-
-Vagrant-related functionality will be moved from Terraform Enterprise into its own product, Vagrant Cloud. This migration is currently planned for **June 27th, 2017**.
-
-All existing Vagrant boxes will be moved to the new system on that date. All users, organizations, and teams will be copied as well.
-
-## Authentication Tokens
-
-No existing Terraform Enterprise authentication tokens will be transferred. To prevent a disruption of service for Vagrant-related operations, users must create a new authentication token and check "Migrate to Vagrant Cloud" and begin using these tokens for creating and modifying Vagrant boxes. These tokens will be moved on the migration date.
-
-Creating a token via `vagrant login` will also mark a token as "Migrate to Vagrant Cloud".
-
-## More Information
-
-At least 1 month prior to the migration, we will be releasing more information on the specifics and impact of the migration.
\ No newline at end of file
diff --git a/website/source/docs/enterprise/glossary/index.html.md b/website/source/docs/enterprise/glossary/index.html.md
deleted file mode 100755
index a3da0bd95..000000000
--- a/website/source/docs/enterprise/glossary/index.html.md
+++ /dev/null
@@ -1,194 +0,0 @@
----
-layout: "enterprise"
-page_title: "Glossary - Terraform Enterprise"
-sidebar_current: "docs-enterprise-glossary"
-description: |-
- Terminology for Terraform Enterprise.
----
-
-# Glossary
-
-Terraform Enterprise, and this documentation, covers a large set of terminology
-adopted from tools, industry standards and the community. This glossary seeks to
-define as many of those terms as possible to help increase understanding in
-interfacing with the platform and reading documentation.
-
-## Authentication Tokens
-
-Authentication tokens are tokens used to authenticate with Terraform Enterprise
-via APIs or through tools. Authentication tokens can be revoked, expired or
-created under any user.
-
-## ACL
-
-ACL is an acronym for access control list. This defines access to a set of
-resources. Access to an object in Terraform Enterprise limited to "read" for
-certain users is an example of an ACL.
-
-## Alert
-
-An alert represents a health check status change on a Consul node that is sent
-to Terraform Enterprise, and then recorded and distributed to various
-notification methods.
-
-## Application
-
-An application is a set of code that represents an application that should be
-deployed. Applications can be linked to builds to be made available in the
-Packer environment.
-
-## Apply
-
-An apply is the second step of the two steps required for Terraform to make
-changes to infrastructure. The apply is the process of communicating with
-external APIs to make the changes.
-
-## Artifact
-
-An artifact is an abstract representation of something you wish to store and use
-again that has undergone configuration, compilation or some other build process.
-An artifact is typically an image created by Packer that is then deployed by
-Terraform, or used locally with Vagrant.
-
-## Box
-
-Boxes are a Vagrant specific package format. Vagrant can install and uses images
-in box format.
-
-## Build
-
-Builds are resources that represent Packer configurations. A build is a generic
-name, sometimes called a "Build Configuration" when defined in the Terraform
-Enterprise UI.
-
-## Build Configuration
-
-A build configuration are settings associated with a resource that creates
-artifacts via builds. A build configuration is the name in `packer push -name
-acemeinc/web`.
-
-## Catalog
-
-The box catalog is a publicly available index of Vagrant Boxes that can be
-downloaded from Terraform Enterprise and used for development.
-
-## Consul
-
-[Consul](https://consul.io) is a HashiCorp tool for service discovery,
-configuration, and orchestration. Consul enables rapid deployment,
-configuration, monitoring and maintenance of service-oriented architectures.
-
-## Datacenter
-
-A datacenter represents a group of nodes in the same network or datacenter
-within Consul.
-
-## Environment
-
-Environments show the real-time status of your infrastructure, any pending
-changes, and its change history. Environments can be configured to use any or
-all of these three components.
-
-Environments are the namespace of your Terraform Enterprise managed
-infrastructure. As an example, if you to have a production environment for a
-company named Acme Inc., your environment may be named
-`my-username/production`.
-
-To read more about features provided under environments, read the
-[Terraform](/docs/enterprise) sections.
-
-## Environment Variables
-
-Environment variables injected into the environment of Packer builds or
-Terraform Runs (plans and applies).
-
-## Flapping
-
-Flapping is something entering and leaving a healthy state rapidly. It is
-typically associated with a health checks that briefly report unhealthy status
-before recovering.
-
-## Health Check
-
-Health checks trigger alerts by changing status on a Consul node. That status
-change is seen by Terraform Enterprise, when connected, and an associated alert
-is recorded and sent to any configured notification methods, like email.
-
-## Infrastructure
-
-An infrastructure is a stateful representation of a set of Consul datacenters.
-
-## Operator
-
-An operator is a person who is making changes to infrastructure or settings.
-
-## Packer
-
-[Packer](https://packer.io) is a tool for creating images for platforms such as
-Amazon AWS, OpenStack, VMware, VirtualBox, Docker, and more — all from a single
-source configuration.
-
-## Packer Template
-
-A Packer template is a JSON file that configure the various components of Packer
-in order to create one or more machine images.
-
-## Plan
-
-A plan is the second step of the two steps required for Terraform to make
-changes to infrastructure. The plan is the process of determining what changes
-will be made to.
-
-## Providers
-
-Providers are often referenced when discussing Packer or Terraform. Terraform
-providers manage resources in Terraform.
-[Read more](https://terraform.io/docs/providers/index.html).
-
-## Post-Processors
-
-The post-processor section within a Packer template configures any
-post-processing that will be done to images built by the builders. Examples of
-post-processing would be compressing files, uploading artifacts, etc..
-
-## Registry
-
-Often referred to as the "Artifact Registry", the registry stores artifacts, be
-it images or IDs for cloud provider images.
-
-## Run
-
-A run represents a two step Terraform plan and a subsequent apply.
-
-## Service
-
-A service in Consul represents an application or service, which could be active
-on any number of nodes.
-
-## Share
-
-Shares are let you instantly share public access to your running Vagrant
-environment (virtual machine).
-
-## State
-
-Terraform state is the state of your managed infrastructure from the last time
-Terraform was run. By default this state is stored in a local file named
-`terraform.tfstate`, but it can also be stored in Terraform Enterprise and is
-then called "Remote state".
-
-## Terraform
-
-[Terraform](https://terraform.io) is a tool for safely and efficiently changing
-infrastructure across providers.
-
-## Terraform Configuration
-
-Terraform configuration is the configuration files and any files that may be
-used in provisioners like `remote-exec`.
-
-## Terraform Variables
-
-Variables in Terraform, uploaded with `terraform push` or set in the UI. These
-differ from environment variables as they are a first class Terraform variable
-used in interpolation.
diff --git a/website/source/docs/enterprise/index.html.md b/website/source/docs/enterprise/index.html.md
deleted file mode 100755
index 8ff9ba3ea..000000000
--- a/website/source/docs/enterprise/index.html.md
+++ /dev/null
@@ -1,19 +0,0 @@
----
-layout: "enterprise"
-page_title: "Terraform Enterprise"
-sidebar_current: "docs-enterprise-home"
-description: |-
- Terraform Enterprise is a tool for safely and efficiently changing infrastructure across providers.
----
-
-# Terraform Enterprise Features
-
-[Terraform Enterprise](https://www.hashicorp.com/products/terraform/) is a tool for safely and
-efficiently changing infrastructure across providers.
-
-This is a list of features specific to Terraform Enterprise.
-
-- [Terraform Plans and Applies](/docs/enterprise/runs)
-- [Terraform Artifact Registry](/docs/enterprise/artifacts)
-- [Terraform Remote State Storage](/docs/enterprise/state)
-- [Terraform Run Notifications](/docs/enterprise/runs/notifications.html)
diff --git a/website/source/docs/enterprise/organizations/authentication-policy.html.md b/website/source/docs/enterprise/organizations/authentication-policy.html.md
deleted file mode 100755
index ed91863c6..000000000
--- a/website/source/docs/enterprise/organizations/authentication-policy.html.md
+++ /dev/null
@@ -1,32 +0,0 @@
----
-layout: "enterprise"
-page_title: "Authentication Policy - Organizations - Terraform Enterprise"
-sidebar_current: "docs-enterprise-organizations-policy"
-description: |-
- Owners can set organization-wide authentication policy in Terraform Enterprise.
----
-
-
-# Set an Organization Authentication Policy
-
-Because organization membership affords members access to potentially sensitive
-resources, owners can set organization-wide authentication policy in Terraform
-Enterprise.
-
-## Requiring Two-Factor Authentication
-
-Organization owners can require that all organization team members use
-[two-factor authentication](/docs/enterprise/user-accounts/authentication.html).
-Those that lack two-factor authentication will be locked out of the web
-interface until they enable it or leave the organization.
-
-Visit your organization's configuration page to enable this feature. All
-organization owners must have two-factor authentication enabled to require the
-practice organization-wide. Note: locked-out users are still be able to interact
-with Terraform Enterprise using their `ATLAS_TOKEN`.
-
-## Disabling Two-Factor Authentication Requirement
-
-Organization owners can disable the two-factor authentication requirement from
-their organization's configuration page. Locked-out team members (those who have
-not enabled two-factor authentication) will have their memberships reinstated.
diff --git a/website/source/docs/enterprise/organizations/create.html.md b/website/source/docs/enterprise/organizations/create.html.md
deleted file mode 100755
index 0da8757cc..000000000
--- a/website/source/docs/enterprise/organizations/create.html.md
+++ /dev/null
@@ -1,17 +0,0 @@
----
-layout: "enterprise"
-page_title: "Create - Organizations - Terraform Enterprise"
-sidebar_current: "docs-enterprise-organizations-create"
-description: |-
- How to create a Terraform Enterprise account.
----
-
-# Create an Organization Account
-
-To create an organization:
-
-1. Create a personal account. You'll use this to create and administrate the
-organization. You'll be able to add other users as owners of the organization,
-so it won't be tied solely to your account.
-
-1. Visit your new organization page to create the organization.
diff --git a/website/source/docs/enterprise/organizations/credit-card.html.md b/website/source/docs/enterprise/organizations/credit-card.html.md
deleted file mode 100755
index 5827293d5..000000000
--- a/website/source/docs/enterprise/organizations/credit-card.html.md
+++ /dev/null
@@ -1,16 +0,0 @@
----
-layout: "enterprise"
-page_title: "Add a Credit Card - Organizations - Terraform Enterprise"
-sidebar_current: "docs-enterprise-organizations-credit"
-description: |-
- You must add a credit card to your organization's account to setup auto billing.
----
-
-# Add credit card details to an organization
-
-To setup automated billing for your Terraform usage, you must add a credit card
-to your organization's account. To do so, go into your account settings, then go
-to the proper organization settings in the left navigation. Select billing in
-the organization settings, and then enter your credit card information.
-
-If you have any questions regarding billing or payment, contact [sales@hashicorp.com](mailto:sales@hashicorp.com).
diff --git a/website/source/docs/enterprise/organizations/index.html.md b/website/source/docs/enterprise/organizations/index.html.md
deleted file mode 100755
index 96676664b..000000000
--- a/website/source/docs/enterprise/organizations/index.html.md
+++ /dev/null
@@ -1,16 +0,0 @@
----
-layout: "enterprise"
-page_title: "Organizations - Terraform Enterprise"
-sidebar_current: "docs-enterprise-organizations"
-description: |-
- Organizations are a group of users in Terraform Enterprise that have access and ownership over shared resources.
----
-
-## Organizations in Terraform Enterprise
-
-Organizations are a group of users in Terraform Enterprise that have access and
-ownership over shared resources. When operating within a team, we recommend
-creating an organization to manage access control, auditing, billing and
-authorization.
-
-Each individual member of your organization should have their own account.
diff --git a/website/source/docs/enterprise/organizations/migrate.html.md b/website/source/docs/enterprise/organizations/migrate.html.md
deleted file mode 100755
index b74578dfb..000000000
--- a/website/source/docs/enterprise/organizations/migrate.html.md
+++ /dev/null
@@ -1,26 +0,0 @@
----
-layout: "enterprise"
-page_title: "Migrate - Organizations - Terraform Enterprise"
-sidebar_current: "docs-enterprise-organizations-migrate"
-description: |-
- How to migrate existing organization.
----
-
-# Migrate Organization
-
-To migrate an existing user account to an organization:
-
-1. Create or retrieve the username of a new personal account. You'll add this
-account as an "owner" for the new organization during the migration process. If
-you already have another account, write down your username.
-
-2. Sign in as the account you wish to migrate and visit the migration page.
-
-3. Put the username of the personal account you wish to make an owner of the
-organization into the username text field and press "Migrate".
-
-4. You should now be logged out and receive a confirmation email with the
-personal account you migrated to.
-
-5. Now, sign in with your personal account. If you visit you settings page, you
-should see your migrated organization available to administrate.
diff --git a/website/source/docs/enterprise/organizations/trials.html.md b/website/source/docs/enterprise/organizations/trials.html.md
deleted file mode 100755
index 73887a907..000000000
--- a/website/source/docs/enterprise/organizations/trials.html.md
+++ /dev/null
@@ -1,13 +0,0 @@
----
-layout: "enterprise"
-page_title: "Trial - Organizations - Terraform Enterprise"
-sidebar_current: "docs-enterprise-organizations-trials"
-description: |-
- Terraform Enterprise offers a 30-day trial.
----
-
-# Start a trial
-
-Terraform Enterprise offers organizations 30-day trials for [Terraform Enterprise](https://www.hashicorp.com/products/terraform/), [Consul Enterprise](https://www.hashicorp.com/consul.html), and Vagrant Enterprise. Note that trials are available for organizations, not users.
-
-[Request a trial](https://www.hashicorp.com/products/terraform/) for your organization.
diff --git a/website/source/docs/enterprise/packer/artifacts/creating-amis.html.md b/website/source/docs/enterprise/packer/artifacts/creating-amis.html.md
deleted file mode 100755
index 61257bc00..000000000
--- a/website/source/docs/enterprise/packer/artifacts/creating-amis.html.md
+++ /dev/null
@@ -1,65 +0,0 @@
----
-layout: "enterprise"
-page_title: "Creating AMIs - Packer Artifacts - Terraform Enterprise"
-sidebar_current: "docs-enterprise-packerartifacts-amis"
-description: |-
- Creating AMI artifacts with Terraform Enterprise.
----
-
-# Creating AMI Artifacts with Terraform Enterprise
-
-In an immutable infrastructure workflow, it's important to version and store
-full images (artifacts) to be deployed. This section covers storing [AWS
-AMI](http://docs.aws.amazon.com/AWSEC2/latest/UserGuide/AMIs.html) images in
-Terraform Enterprise to be queried and used later.
-
-Note the actual AMI does _not get stored_. Terraform Enterprise simply keeps the
-AMI ID as a reference to the target image. Tools like Terraform can then use
-this in a deploy.
-
-### Steps
-
-If you run Packer in Terraform Enterprise, the following will happen after a [push](/docs/enterprise/packer/builds/starting.html):
-
-1. Terraform Enterprise will run `packer build` against your template in our
-infrastructure. This spins up an AWS instance in your account and provisions it
-with any specified provisioners
-
-2. Packer stops the instance and stores the result as an AMI in AWS under your
-account. This then returns an ID (the artifact) that it passes to the
-post-processor
-
-3. The post-processor creates and uploads the new artifact version with the ID
-in Terraform Enterprise of the type `amazon.image` for use later
-
-### Example
-
-Below is a complete example Packer template that starts an AWS instance.
-
-```json
-{
- "push": {
- "name": "my-username/frontend"
- },
- "provisioners": [],
- "builders": [
- {
- "type": "amazon-ebs",
- "access_key": "",
- "secret_key": "",
- "region": "us-east-1",
- "source_ami": "ami-2ccc7a44",
- "instance_type": "c3.large",
- "ssh_username": "ubuntu",
- "ami_name": "Terraform Enterprise Example {{ timestamp }}"
- }
- ],
- "post-processors": [
- {
- "type": "atlas",
- "artifact": "my-username/web-server",
- "artifact_type": "amazon.image"
- }
- ]
-}
-```
diff --git a/website/source/docs/enterprise/packer/artifacts/creating-vagrant-boxes.html.md b/website/source/docs/enterprise/packer/artifacts/creating-vagrant-boxes.html.md
deleted file mode 100755
index 4e0c80aeb..000000000
--- a/website/source/docs/enterprise/packer/artifacts/creating-vagrant-boxes.html.md
+++ /dev/null
@@ -1,141 +0,0 @@
----
-layout: "enterprise"
-page_title: "Creating Vagrant Boxes - Packer Artifacts - Terraform Enterprise"
-sidebar_current: "docs-enterprise-packerartifacts-vagrant"
-description: |-
- Creating Vagrant artifacts with Terraform Enterprise.
----
-
-# Creating Vagrant Boxes with Packer
-
-We recommend using Packer to create boxes, as is it is fully repeatable and
-keeps a strong history of changes within Terraform Enterprise.
-
-## Getting Started
-
-Using Packer requires more up front effort, but the repeatable and automated
-builds will end any manual management of boxes. Additionally, all boxes will be
-stored and served from Terraform Enterprise, keeping a history along the way.
-
-## Post-Processors
-
-Packer uses
-[post-processors](https://packer.io/docs/templates/post-processors.html) to
-define how to process images and artifacts after provisioning. Both the
-`vagrant` and `atlas` post-processors must be used in order to upload Vagrant
-Boxes to Terraform Enterprise via Packer.
-
-It's important that they are [sequenced](https://packer.io/docs/templates/post-processors.html)
-in the Packer template so they run in order. This is done by nesting arrays:
-
-```javascript
-{
- "post-processors": [
- [
- {
- "type": "vagrant"
- // ...
- },
- {
- "type": "atlas"
- // ...
- }
- ]
- ]
-}
-```
-
-Sequencing automatically passes the resulting artifact from one
-post-processor to the next – in this case, the `.box` file.
-
-### Vagrant Post-Processor
-
-The [Vagrant post-processor](https://packer.io/docs/post-processors/vagrant.html) is required to package the image
-from the build (an `.ovf` file, for example) into a `.box` file before
-passing it to the `atlas` post-processor.
-
-```json
-{
- "type": "vagrant",
- "keep_input_artifact": false
-}
-```
-
-The input artifact (i.e and `.ovf` file) does not need to be kept when building Vagrant Boxes,
-as the resulting `.box` will contain it.
-
-### Post-Processor
-
-The [post-processor](https://packer.io/docs/post-processors/atlas.html) takes the resulting `.box` file and uploads it adding metadata about the box version.
-
-```json
-{
- "type": "atlas",
- "artifact": "my-username/dev-environment",
- "artifact_type": "vagrant.box",
- "metadata": {
- "provider": "vmware_desktop",
- "version": "0.0.1"
- }
-}
-```
-
-#### Attributes Required
-
-These are all of the attributes for that post-processor
-required for uploading Vagrant Boxes. A complete example is shown below.
-
-- `artifact`: The username and box name (`username/name`) you're creating the version
-of the box under. If the box doesn't exist, it will be automatically
-created
-- `artifact_type`: This must be `vagrant.box`. Terraform Enterprise uses this to determine
-how to treat this artifact.
-
-For `vagrant.box` type artifacts, you can specify keys in the metadata block:
-
-- `provider`: The Vagrant provider for the box. Common providers are
-`virtualbox`, `vmware_desktop`, `aws` and so on _(required)_
-- `version`: This is the Vagrant box version and is constrained to the
-same formatting as in the web UI: `*.*.*` _(optional, but required for boxes
-with multiple providers). The version will increment on the minor version if left blank (e.g the initial version will be set to 0.1.0, the subsequent version will be set to 0.2.0)._
-- `description`: This is the description that will be shown with the
-version of the box. You can use Markdown for links and style. _(optional)_
-
-## Example
-
-An example post-processor block for Terraform Enterprise and Vagrant is below. In this example,
-the build runs on both VMware and Virtualbox creating two
-different providers for the same box version (`0.0.1`).
-
-```json
-{
- "post-processors": [
- [
- {
- "type": "vagrant",
- "keep_input_artifact": false
- },
- {
- "type": "atlas",
- "only": ["vmware-iso"],
- "artifact": "my-username/dev-environment",
- "artifact_type": "vagrant.box",
- "metadata": {
- "provider": "vmware_desktop",
- "version": "0.0.1"
- }
- },
- {
- "type": "atlas",
- "only": ["virtualbox-iso"],
- "artifact": "my-username/dev-environment",
- "artifact_type": "vagrant.box",
- "metadata": {
- "provider": "virtualbox",
- "version": "0.0.1"
- }
- }
- ]
- ]
-}
-```
diff --git a/website/source/docs/enterprise/packer/artifacts/index.html.md b/website/source/docs/enterprise/packer/artifacts/index.html.md
deleted file mode 100755
index 1913c9a21..000000000
--- a/website/source/docs/enterprise/packer/artifacts/index.html.md
+++ /dev/null
@@ -1,40 +0,0 @@
----
-layout: "enterprise"
-page_title: "Packer Artifacts - Terraform Enterprise"
-sidebar_current: "docs-enterprise-packerartifacts"
-description: |-
- Packer creates and uploads artifacts to Terraform Enterprise.
----
-
-# About Packer and Artifacts
-
-Packer creates and uploads artifacts to Terraform Enterprise. This is done
-with the [post-processor](https://packer.io/docs/post-processors/atlas.html).
-
-Artifacts can then be used to deploy services or access via Vagrant. Artifacts
-are generic, but can be of varying types. These types define different behavior
-within Terraform Enterprise.
-
-For uploading artifacts `artifact_type` can be set to any unique identifier,
-however, the following are recommended for consistency.
-
-- `amazon.image`
-- `azure.image`
-- `digitalocean.image`
-- `docker.image`
-- `google.image`
-- `openstack.image`
-- `parallels.image`
-- `qemu.image`
-- `virtualbox.image`
-- `vmware.image`
-- `custom.image`
-- `application.archive`
-- `vagrant.box`
-
-Packer can create artifacts when running in Terraform Enterprise or locally.
-This is possible due to the post-processors use of the public artifact API to
-store the artifacts.
-
-You can read more about artifacts and their use in the
-[Terraform section](/docs/enterprise/) of the documentation.
diff --git a/website/source/docs/enterprise/packer/builds/build-environment.html.md b/website/source/docs/enterprise/packer/builds/build-environment.html.md
deleted file mode 100755
index 6072463fb..000000000
--- a/website/source/docs/enterprise/packer/builds/build-environment.html.md
+++ /dev/null
@@ -1,174 +0,0 @@
----
-layout: "enterprise"
-page_title: "Build Environment - Packer Builds - Terraform Enterprise"
-sidebar_current: "docs-enterprise-packerbuilds-environment"
-description: |-
- This page outlines the environment that Packer runs in within Terraform Enterprise.
----
-
-# Packer Build Environment
-
-This page outlines the environment that Packer runs in within Terraform
-Enterprise.
-
-### Supported Builders
-
-Terraform Enterprise currently supports running the following Packer builders:
-
-- amazon-chroot
-- amazon-ebs
-- amazon-instance
-- digitalocean
-- docker
-- googlecompute
-- null
-- openstack
-- qemu
-- virtualbox-iso
-- vmware-iso
-
-### Files
-
-All files in the uploading package (via [Packer push or GitHub](/docs/enterprise/packer/builds/starting.html)),
-and the application from the build pipeline are available on the filesystem
-of the build environment.
-
-You can use the file icon on the running build to show a list of
-available files.
-
-Files can be copied to the destination image Packer is provisioning
-with [Packer Provisioners](https://packer.io/docs/templates/provisioners.html).
-
-An example of this with the Shell provisioner is below.
-
-```json
-{
- "provisioners": [
- {
- "type": "shell",
- "scripts": [
- "scripts/vagrant.sh",
- "scripts/dependencies.sh",
- "scripts/cleanup.sh"
- ]
- }
- ]
-}
-```
-
-We encourage use of relative paths over absolute paths to maintain portability
-between Terraform Enterprise and local builds.
-
-The total size of all files in the package being uploaded via
-[Packer push or GitHub](/docs/enterprise/packer/builds/starting.html) must be 5 GB or less.
-
-If you need to upload objects that are larger, such as dmgs, see the
-[`packer push` "Limits" documentation](https://packer.io/docs/command-line/push.html)
-for ways around this limitation.
-
-### Hardware Limitations
-
-Currently, each builder defined in the Packer template receives
-the following hardware resources. This is subject to change.
-
-- 1 CPU core
-- 2 GB of memory
-- 20 GBs of disk space
-
-### Environment Variables
-
-You can set any number of environment variables that will be injected
-into your build environment at runtime. These variables can be
-used to configure your build with secrets or other key value configuration.
-
-Variables are encrypted and stored securely.
-
-Additionally, the following environment variables are automatically injected. All injected environment variables will be prefixed with `ATLAS_`
-
-- `ATLAS_TOKEN` - This is a unique, per-build token that expires at the end of
- build execution (e.g. `"abcd.atlasv1.ghjkl..."`)
-- `ATLAS_BUILD_ID` - This is a unique identifier for this build (e.g. `"33"`)
-- `ATLAS_BUILD_NUMBER` - This is a unique identifier for all builds in the same
- scope (e.g. `"12"`)
-- `ATLAS_BUILD_NAME` - This is the name of the build (e.g. `"mybuild"`).
-- `ATLAS_BUILD_SLUG` - This is the full name of the build
- (e.g. `"company/mybuild"`).
-- `ATLAS_BUILD_USERNAME` - This is the username associated with the build
- (e.g. `"sammy"`)
-- `ATLAS_BUILD_CONFIGURATION_VERSION` - This is the unique, auto-incrementing
- version for the [Packer build configuration](/docs/enterprise/glossary/index.html) (e.g. `"34"`).
-- `ATLAS_BUILD_GITHUB_BRANCH` - This is the name of the branch
- that the associated Packer build configuration version was ingressed from
- (e.g. `master`).
-- `ATLAS_BUILD_GITHUB_COMMIT_SHA` - This is the full commit hash
- of the commit that the associated Packer build configuration version was
- ingressed from (e.g. `"abcd1234..."`).
-- `ATLAS_BUILD_GITHUB_TAG` - This is the name of the tag
- that the associated Packer build configuration version was ingressed from
- (e.g. `"v0.1.0"`).
-
-If the build was triggered by a new application version, the following
-environment variables are also available:
-
-- `ATLAS_APPLICATION_NAME` - This is the name of the application connected to
- the Packer build (e.g. `"myapp"`).
-- `ATLAS_APPLICATION_SLUG` - This is the full name of the application connected
- to the Packer build (e.g. `"company/myapp"`).
-- `ATLAS_APPLICATION_USERNAME` - This is the username associated with the
- application connected to the Packer build (e.g. `"sammy"`)
-- `ATLAS_APPLICATION_VERSION` - This is the version of the application connected
- to the Packer build (e.g. `"2"`).
-- `ATLAS_APPLICATION_GITHUB_BRANCH` - This is the name of the branch that the
- associated application version was ingressed from (e.g. `master`).
-- `ATLAS_APPLICATION_GITHUB_COMMIT_SHA` - This is the full commit hash
- of the commit that the associated application version was ingressed from
- (e.g. `"abcd1234..."`).
-- `ATLAS_APPLICATION_GITHUB_TAG` - This is the name of the tag that the
- associated application version was ingressed from (e.g. `"v0.1.0"`).
-
-For any of the `GITHUB_` attributes, the value of the environment variable will
-be the empty string (`""`) if the resource is not connected to GitHub or if the
-resource was created outside of GitHub (like using `packer push` or
-`vagrant push`).
-
-
-### Base Artifact Variable Injection
-
-A base artifact can be selected on the "Settings" page for a build
-configuration. During each build, the latest artifact version will have it's
-external ID (such as an AMI for AWS) injected as an environment variable for the
-environment.
-
-The keys for the following artifact types will be injected:
-
-- `aws.ami`: `ATLAS_BASE_ARTIFACT_AWS_AMI_ID`
-- `amazon.ami`: `ATLAS_BASE_ARTIFACT_AMAZON_AMI_ID`
-- `amazon.image`: `ATLAS_BASE_ARTIFACT_AMAZON_IMAGE_ID`
-- `google.image`: `ATLAS_BASE_ARTIFACT_GOOGLE_IMAGE_ID`
-
-You can then reference this artifact in your Packer template, like this
-AWS example:
-
-```json
-{
- "variables": {
- "base_ami": "{{env `ATLAS_BASE_ARTIFACT_AWS_AMI_ID`}}"
- },
- "builders": [
- {
- "type": "amazon-ebs",
- "access_key": "",
- "secret_key": "",
- "region": "us-east-1",
- "source_ami": "{{user `base_ami`}}"
- }
- ]
-}
-```
-
-## Notes on Security
-
-Packer environment variables in Terraform Enterprise are encrypted using [Vault](https://vaultproject.io)
-and closely guarded and audited. If you have questions or concerns
-about the safety of your configuration, please contact our security team
-at [security@hashicorp.com](mailto:security@hashicorp.com).
diff --git a/website/source/docs/enterprise/packer/builds/how-builds-run.html.md b/website/source/docs/enterprise/packer/builds/how-builds-run.html.md
deleted file mode 100755
index 4d8ae817a..000000000
--- a/website/source/docs/enterprise/packer/builds/how-builds-run.html.md
+++ /dev/null
@@ -1,37 +0,0 @@
----
-layout: "enterprise"
-page_title: "Running - Packer Builds - Terraform Enterprise"
-sidebar_current: "docs-enterprise-packerbuilds-runbuilds"
-description: |-
- This briefly covers the internal process of running builds in Terraform Enterprise.
----
-
-# How Packer Builds Run in Terraform Enterprise
-
-This briefly covers the internal process of running builds in Terraform
-Enterprise. It's not necessary to know this information, but may be valuable to
-help understand implications of running or debugging failing builds.
-
-### Steps of Execution
-
-1. A Packer template and directory of files is uploaded via Packer Push or
-GitHub
-
-2. Terraform Enterprise creates a version of the build configuration and waits
-for the upload to complete. At this point, the version will be visible in the UI
-even if the upload has not completed
-
-3. Once the upload finishes, the build is queued. This is potentially split
-across multiple machines for faster processing
-
-4. In the build environment, the package including the files and Packer template
-are downloaded
-
-5. `packer build` is run against the template in the build environment
-
-6. Logs are streamed into the UI and stored
-
-7. Any artifacts as part of the build are then uploaded via the public artifact
-API, as they would be if Packer was executed locally
-
-8. The build completes, the environment is teared down and status updated
diff --git a/website/source/docs/enterprise/packer/builds/index.html.md b/website/source/docs/enterprise/packer/builds/index.html.md
deleted file mode 100755
index d060d9a13..000000000
--- a/website/source/docs/enterprise/packer/builds/index.html.md
+++ /dev/null
@@ -1,35 +0,0 @@
----
-layout: "enterprise"
-page_title: "Packer Builds - Terraform Enterprise"
-sidebar_current: "docs-enterprise-packerbuilds"
-description: |-
- Builds are instances of `packer build` being run within Terraform Enterprise.
----
-
-# About Builds
-
-Builds are instances of `packer build` being run within Terraform Enterprise.
-Every build belongs to a build configuration.
-
-__Build configurations__ represent a set of Packer configuration versions and
-builds run. It is used as a namespace within Terraform Enterprise, Packer
-commands and URLs. Packer configuration sent to Terraform Enterprise are stored
-and versioned under these build configurations.
-
-These __versions__ of Packer configuration can contain:
-
-- The Packer template, a JSON file which define one or more builds by
- configuring the various components of Packer
-
-- Any provisioning scripts or packages used by the template
-
-- Applications that use the build as part of the pipeline and merged into the
- version prior to running Packer on it
-
-When a new version of Packer configuration and associated scripts from GitHub or
-`packer push` is received, it automatically starts a new Packer build. That
-Packer build runs in an isolated machine environment with the contents of that
-version available to it.
-
-You can be alerted of build events with
-[Build Notifications](/docs/enterprise/packer/builds/notifications.html).
diff --git a/website/source/docs/enterprise/packer/builds/installing-software.html.md b/website/source/docs/enterprise/packer/builds/installing-software.html.md
deleted file mode 100755
index dfa91945f..000000000
--- a/website/source/docs/enterprise/packer/builds/installing-software.html.md
+++ /dev/null
@@ -1,32 +0,0 @@
----
-layout: "enterprise"
-page_title: "Installing Software - Packer Builds - Terraform Enterprise"
-sidebar_current: "docs-enterprise-packerbuilds-installing"
-description: |-
- Installing software with Packer.
----
-
-# Installing Software
-
-Please review the [Packer Build Environment](/docs/enterprise/packer/builds/build-environment.html)
-specification for important information on isolation, security, and hardware
-limitations before continuing.
-
-In some cases, it may be necessary to install custom software to build your
-artifact using Packer. The easiest way to install software on the Packer builder
-is via the `shell-local` provisioner. This will execute commands on the host
-machine running Packer.
-
- {
- "provisioners": [
- {
- "type": "shell-local",
- "command": "sudo apt-get install -y customsoftware"
- }
- ]
- }
-
-Please note that nothing is persisted between Packer builds, so you will need
-to install custom software on each run.
-
-The Packer builders run the latest version of Ubuntu LTS.
diff --git a/website/source/docs/enterprise/packer/builds/managing-packer-versions.html.md b/website/source/docs/enterprise/packer/builds/managing-packer-versions.html.md
deleted file mode 100755
index b33914df5..000000000
--- a/website/source/docs/enterprise/packer/builds/managing-packer-versions.html.md
+++ /dev/null
@@ -1,28 +0,0 @@
----
-layout: "enterprise"
-page_title: "Managing Packer Versions - Packer Builds - Terraform Enterprise"
-sidebar_current: "docs-enterprise-packerbuilds-versions"
-description: |-
- Terraform Enterprise does not automatically upgrade the version of Packer used to run builds or compiles.
----
-
-# Managing Packer Versions
-
-Terraform Enterprise does not automatically upgrade the version of Packer used
-to run builds or compiles. This is intentional, as occasionally there can be
-backwards incompatible changes made to Packer that cause templates to stop
-building properly, or new versions that produce some other unexpected behavior.
-
-All upgrades must be performed by a user, but Terraform Enterprise will display
-a notice above any builds run with out of date versions. We encourage the use of
-the latest version when possible.
-
-### Upgrading Packer
-
-1. Go the Settings tab of a build configuration or application
-
-2. Go to the "Packer Version" section and select the version you wish to use
-
-3. Review the changelog for that version and previous versions
-
-4. Click the save button. At this point, future builds will use that version
diff --git a/website/source/docs/enterprise/packer/builds/notifications.html.md b/website/source/docs/enterprise/packer/builds/notifications.html.md
deleted file mode 100755
index ec7b607f3..000000000
--- a/website/source/docs/enterprise/packer/builds/notifications.html.md
+++ /dev/null
@@ -1,22 +0,0 @@
----
-layout: "enterprise"
-page_title: "Build Notifications - Packer Builds - Terraform Enterprise"
-sidebar_current: "docs-enterprise-packerbuilds-notifications"
-description: |-
- Terraform Enterprise can send build notifications to your organization.
----
-
-# About Packer Build Notifications
-
-Terraform Enterprise can send build notifications to your organization for the
-following events:
-
-- **Starting** - The build has begun.
-- **Finished** - All build jobs have finished successfully.
-- **Errored** - An error has occurred during one of the build jobs.
-- **Canceled** - A user has canceled the build.
-
-> Emails will include logs for the **Finished** and **Errored** events.
-
-You can toggle notifications for each of these events on the "Integrations" tab
-of a build configuration.
diff --git a/website/source/docs/enterprise/packer/builds/rebuilding.html.md b/website/source/docs/enterprise/packer/builds/rebuilding.html.md
deleted file mode 100755
index 31aa13ebd..000000000
--- a/website/source/docs/enterprise/packer/builds/rebuilding.html.md
+++ /dev/null
@@ -1,20 +0,0 @@
----
-layout: "enterprise"
-page_title: "Rebuilding - Packer Builds - Terraform Enterprise"
-sidebar_current: "docs-enterprise-packerbuilds-rebuilding"
-description: |-
- Sometimes builds fail due to temporary or remotely controlled conditions.
----
-
-# Rebuilding Builds
-
-Sometimes builds fail due to temporary or remotely controlled conditions.
-
-In this case, it may make sense to "rebuild" a Packer build. To do so, visit the
-build you wish to run again and click the Rebuild button. This will take that
-exact version of configuration and run it again.
-
-You can rebuild at any point in history, but this may cause side effects that
-are not wanted. For example, if you were to rebuild an old version of a build,
-it may create the next version of an artifact that is then released, causing a
-rollback of your configuration to occur.
diff --git a/website/source/docs/enterprise/packer/builds/scheduling-builds.html.md b/website/source/docs/enterprise/packer/builds/scheduling-builds.html.md
deleted file mode 100755
index d610312ae..000000000
--- a/website/source/docs/enterprise/packer/builds/scheduling-builds.html.md
+++ /dev/null
@@ -1,34 +0,0 @@
----
-layout: "enterprise"
-page_title: "Schedule Periodic Builds - Packer Builds - Terraform Enterprise"
-sidebar_current: "docs-enterprise-packerbuilds-scheduling"
-description: |-
- Terraform Enterprise can automatically run a Packer build and create artifacts on a specified schedule.
----
-
-# Schedule Periodic Builds in Terraform Enterprise
-
-Terraform Enterprise can automatically run a Packer build and
-create artifacts on a specified schedule. This option is disabled by default and can be enabled by an
-organization owner on a per-[environment](/docs/enterprise/glossary#environment) basis.
-
-On the specified interval, builds will be automatically queued that run Packer
-for you, creating any artifacts and sending the appropriate notifications.
-
-If your artifacts are used in any other environments and you have activated the
-plan on artifact upload feature, this may also queue Terraform plans.
-
-This feature is useful for maintenance of images and automatic updates, or to
-build nightly style images for staging or development environments.
-
-## Enabling Periodic Builds
-
-To enable periodic builds for a build, visit the build settings page and select
-the desired interval and click the save button to persist the changes. An
-initial build may immediately run, depending on the history, and then will
-automatically build at the specified interval.
-
-If you have run a build separately, either manually or triggered from GitHub or
-Packer configuration version uploads, Terraform Enterprise will not queue a new
-build until the allowed time after the manual build ran. This ensures that a
-build has been executed at the specified schedule.
diff --git a/website/source/docs/enterprise/packer/builds/starting.html.md b/website/source/docs/enterprise/packer/builds/starting.html.md
deleted file mode 100755
index 1e31c881d..000000000
--- a/website/source/docs/enterprise/packer/builds/starting.html.md
+++ /dev/null
@@ -1,74 +0,0 @@
----
-layout: "enterprise"
-page_title: "Starting - Packer Builds - Terraform Enterprise"
-sidebar_current: "docs-enterprise-packerbuilds-starting"
-description: |-
- Packer builds can be started in Terraform Enterprise in two ways. This post is about how.
----
-
-# Starting Packer Builds in Terraform Enterprise
-
-Packer builds can be started in in two ways: `packer push` to upload the
-template and directory or via a GitHub connection that retrieves the contents of
-a repository after changes to the default branch (usually master).
-
-### Packer Push
-
-Packer `push` is a
-[Packer command](https://packer.io/docs/command-line/push.html) that packages
-and uploads a Packer template and directory. This then creates a build which
-performs `packer build` against the uploaded template and packaged directory.
-
-The directory is included in order to run any associated provisioners, builds or
-post-processors that all might use local files. For example, a shell script or
-set of Puppet modules used in a Packer build needs to be part of the upload for
-Packer to be run remotely.
-
-By default, everything in your directory is uploaded as part of the push.
-
-However, it's not always the case that the entire directory should be uploaded.
-Often, temporary or cache directories and files like `.git`, `.tmp` will be
-included by default. This can cause builds to fail at certain sizes and should
-be avoided. You can specify
-[exclusions](https://packer.io/docs/templates/push.html#exclude) to avoid this
-situation.
-
-Packer also allows for a
-[VCS option](https://packer.io/docs/templates/push.html#vcs) that will detect
-your VCS (if there is one) and only upload the files that are tracked by the
-VCS. This is useful for automatically excluding ignored files. In a VCS like
-git, this basically does a `git ls-files`.
-
-
-### GitHub Webhooks
-
-Optionally, GitHub can be used to import Packer templates and configurations.
-When used within an organization, this can be extremely valuable for keeping
-differences in environments and last mile changes from occurring before an
-upload.
-
-After you have [connected your GitHub account](/docs/enterprise/vcs/github.html) to Terraform Enterprise,
-you can connect your [Build Configuration](/docs/enterprise/glossary#build-configuration)
-to the target GitHub repository. The GitHub repository will be linked to the
-Packer configuration, and GitHub will start sending webhooks.
-Certain GitHub webhook events, detailed below, will cause the repository to be
-automatically ingressed into Terraform Enterprise and stored, along with references to the
-GitHub commits and authorship information.
-
-After each ingress the configuration will automatically build.
-
-You can disable an ingress by adding the text `[atlas skip]` or `[ci skip]` to
-your commit message.
-
-Supported GitHub webhook events:
-
-- push (on by default)
- - ingress when a tag is created
- - ingress when the default branch is updated
- - note: the default branch is either configured on your configuration's
- integrations tab in Terraform Enterprise, or if that is blank it is the GitHub
- repository's default branch
-- create (off by default)
- - ingress when a tag is created
- - note: if you want to only run on tag creation, turn on create events and
- turn off push events
diff --git a/website/source/docs/enterprise/packer/builds/troubleshooting.html.md b/website/source/docs/enterprise/packer/builds/troubleshooting.html.md
deleted file mode 100755
index 886c79cb9..000000000
--- a/website/source/docs/enterprise/packer/builds/troubleshooting.html.md
+++ /dev/null
@@ -1,120 +0,0 @@
----
-layout: "enterprise"
-page_title: "Troubleshooting - Packer Builds - Terraform Enterprise"
-sidebar_current: "docs-enterprise-packerbuilds-troubleshooting"
-description: |-
- Packer builds can fail in Terraform Enterprise for a number of reasons – improper configuration, transient networking errors, and hardware constraints are all possible.
----
-
-# Troubleshooting Failing Builds
-
-Packer builds can fail in Terraform Enterprise for a number of reasons –
-improper configuration, transient networking errors, and hardware constraints
-are all possible. Below is a list of debugging options you can use.
-
-### Verbose Packer Logging
-
-You can [set a variable](/docs/enterprise/packer/builds/build-environment.html#environment-variables) in the UI that increases the logging verbosity
-in Packer. Set the `PACKER_LOG` key to a value of `1` to accomplish this.
-
-After setting the variable, you'll need to [rebuild](/docs/enterprise/packer/builds/rebuilding.html).
-
-Verbose logging will be much louder than normal Packer logs and isn't
-recommended for day-to-day operations. Once enabled, you'll be able to see in
-further detail why things failed or what operations Packer was performing.
-
-This can also be used locally:
-
-```text
-$ PACKER_LOG=1 packer build ...
-```
-
-### Hanging Builds
-
-Some VM builds, such as VMware or VirtualBox, may hang at various stages,
-most notably `Waiting for SSH...`.
-
-Things to pay attention to when this happens:
-
-- SSH credentials must be properly configured. AWS keypairs should match, SSH
- usernames should be correct, passwords should match, etc.
-
-- Any VM pre-seed configuration should have the same SSH configuration as your
- template defines
-
-A good way to debug this is to manually attempt to use the same SSH
-configuration locally, running with `packer build -debug`. See
-more about [debugging Packer builds](https://packer.io/docs/other/debugging.html).
-
-### Hardware Limitations
-
-Your build may be failing by requesting larger memory or
-disk usage then is available. Read more about the [build environment](/docs/enterprise/packer/builds/build-environment.html#hardware-limitations).
-
-_Typically_ Packer builds that fail due to requesting hardware limits
-that exceed Terraform Enterprise's [hardware limitations](/docs/enterprise/packer/builds/build-environment.html#hardware-limitations)
-will fail with a _The operation was canceled_ error message as shown below:
-
-```text
-# ...
-==> vmware-iso: Starting virtual machine...
- vmware-iso: The VM will be run headless, without a GUI. If you want to
- vmware-iso: view the screen of the VM, connect via VNC without a password to
- vmware-iso: 127.0.0.1:5918
-==> vmware-iso: Error starting VM: VMware error: Error: The operation was canceled
-==> vmware-iso: Waiting 4.604392397s to give VMware time to clean up...
-==> vmware-iso: Deleting output directory...
-Build 'vmware-iso' errored: Error starting VM: VMware error: Error: The operation was canceled
-
-==> Some builds didn't complete successfully and had errors:
---> vmware-iso: Error starting VM: VMware error: Error: The operation was canceled
-```
-
-### Local Debugging
-
-Sometimes it's faster to debug failing builds locally. In this case,
-you'll want to [install Packer](https://www.packer.io/intro/getting-started/setup.html) and any providers (like Virtualbox) necessary.
-
-Because Terraform Enterprise runs the open source version of Packer, there
-should be no difference in execution between the two, other than the environment
-that Packer is running in. For more on hardware constraints in the Terraform
-Enterprise environment read below.
-
-Once your builds are running smoothly locally you can push it up to Terraform
-Enterprise for versioning and automated builds.
-
-### Internal Errors
-
-This is a short list of internal errors and what they mean.
-
-- SIC-001: Your data was being ingressed from GitHub but failed
-to properly unpack. This can be caused by bad permissions, using
-symlinks or very large repository sizes. Using symlinks inside of the
-packer directory, or the root of the repository, if the packer directory
-is unspecified, will result in this internal error.
-
- _**Note:** Most often this error occurs when applications or builds are
- linked to a GitHub repository and the directory and/or template paths are
- incorrect. Double check that the paths specified when you linked the GitHub
- repository match the actual paths to your template file._
-
-- SEC-001: Your data was being unpacked from a tarball uploaded
-and encountered an error. This can be caused by bad permissions, using
-symlinks or very large tarball sizes.
-
-### Community Resources
-
-Packer is an open source project with an active community. If you're
-having an issue specific to Packer, the best avenue for support is
-the mailing list or IRC. All bug reports should go to GitHub.
-
-- Website: [packer.io](https://packer.io)
-- GitHub: [github.com/mitchellh/packer](https://github.com/mitchellh/packer)
-- IRC: `#packer-tool` on Freenode
-- Mailing list: [Google Groups](http://groups.google.com/group/packer-tool)
-
-### Getting Support
-
-If you believe your build is failing as a result of a bug in Terraform
-Enterprise, or would like other support, please
-[email us](mailto:support@hashicorp.com).
diff --git a/website/source/docs/enterprise/runs/automatic-applies.html.md b/website/source/docs/enterprise/runs/automatic-applies.html.md
deleted file mode 100755
index 93f68cde4..000000000
--- a/website/source/docs/enterprise/runs/automatic-applies.html.md
+++ /dev/null
@@ -1,28 +0,0 @@
----
-layout: "enterprise"
-page_title: "Automatic Applies - Runs - Terraform Enterprise"
-sidebar_current: "docs-enterprise-runs-applies"
-description: |-
- How to automatically apply plans.
----
-
-# Automatic Terraform Applies
-
--> This is an unreleased beta feature. Please
-contact support if you are interested
-in helping us test this feature.
-
-You can automatically apply successful Terraform plans to your
-infrastructure. This option is disabled by default and can be enabled by an
-organization owner on a per-environment basis.
-
--> This is an advanced feature that enables changes to active infrastructure
-without user confirmation. Please understand the implications to your
-infrastructure before enabling.
-
-## Enabling Auto-Apply
-
-To enable auto-apply for an environment, visit the environment settings page
-check the box labeled "auto apply" and click the save button to persist the
-changes. The next successful Terraform plan for the environment will
-automatically apply without user confirmation.
diff --git a/website/source/docs/enterprise/runs/how-runs-execute.html.md b/website/source/docs/enterprise/runs/how-runs-execute.html.md
deleted file mode 100755
index aef54defb..000000000
--- a/website/source/docs/enterprise/runs/how-runs-execute.html.md
+++ /dev/null
@@ -1,57 +0,0 @@
----
-layout: "enterprise"
-page_title: "Execution - Runs - Terraform Enterprise"
-sidebar_current: "docs-enterprise-runs-execute"
-description: |-
- How runs execute in Terraform Enterprise.
----
-
-# How Terraform Runs Execute
-
-This briefly covers the internal process of running Terraform plan and applies.
-It is not necessary to know this information, but may be valuable to help
-understand implications of running or debugging failed runs.
-
-## Steps of Execution
-
-1. A set of Terraform configuration and directory of files is uploaded via Terraform Push or GitHub
-2. Terraform Enterprise creates a version of the Terraform configuration and waits for the upload
-to complete. At this point, the version will be visible in the UI even if the upload has
-not completed
-3. Once the upload finishes, Terraform Enterprise creates a run and queues a `terraform plan`
-4. In the run environment, the package including the files and Terraform
-configuration are downloaded
-5. `terraform plan` is run against the configuration in the run environment
-6. Logs are streamed into the UI and stored
-7. The `.tfplan` file created in the plan is uploaded and stored
-8. Once the plan completes, the environment is torn down and status is
-updated in the UI
-9. The plan then requires confirmation by an operator. It can optionally
-be discarded and ignored at this stage
-10. Once confirmed, the run then executes a `terraform apply` in a new
-environment against the saved `.tfplan` file
-11. The logs are streamed into the UI and stored
-12. Once the apply completes, the environment is torn down, status is
-updated in the UI and changed state is saved back
-
-Note: In the case of a failed apply, it's safe to re-run. This is possible
-because Terraform saves partial state and can "pick up where it left off".
-
-### Customizing Terraform Execution
-
-As described in the steps above, Terraform will be run against your configuration
-when changes are pushed via GitHub, `terraform push`, or manually queued in the
-UI. There are a few options available to customize the execution of Terraform.
-These are:
-
-- The directory that contains your environment's Terraform configuration can be customized
-to support directory structures with more than one set of Terraform configuration files.
-To customize the directory for your Environment, set the _Terraform Directory_
-property in the [_GitHub Integration_](/docs/enterprise/vcs/github.html) settings for your environment. This is equivalent to
-passing the `[dir]` argument when running Terraform in your local shell.
-- The directory in which Terraform is executed from can be customized to support directory
-structures with nested sub-directories or configurations that use Terraform modules with
-relative paths. To customize the directory used for Terraform execution in your Environment, set the `TF_ATLAS_DIR`
-[environment variable](/docs/enterprise/runs/variables-and-configuration.html#environment-variables)
-to the relative path of the directory - ie. `terraform/production`. This is equivalent to
-changing directories to the appropriate path in your local shell and then executing Terraform.
diff --git a/website/source/docs/enterprise/runs/index.html.md b/website/source/docs/enterprise/runs/index.html.md
deleted file mode 100755
index 92062b98c..000000000
--- a/website/source/docs/enterprise/runs/index.html.md
+++ /dev/null
@@ -1,70 +0,0 @@
----
-layout: "enterprise"
-page_title: "Runs - Terraform Enterprise"
-sidebar_current: "docs-enterprise-runs"
-description: |-
- A "run" in Atlas represents the logical grouping of two Terraform steps - a "plan" and an "apply".
----
-
-# About Terraform Enterprise Runs
-
-A "run" represents the logical grouping of two Terraform steps - a "plan" and an
-"apply". The distinction between these two phases of a Terraform run are
-documented below.
-
-When a [new run is created](/docs/enterprise/runs/starting.html), Terraform
-Enterprise automatically queues a Terraform plan. Because a plan does not change
-the state of infrastructure, it is safe to execute a plan multiple times without
-consequence. An apply executes the output of a plan and actively changes
-infrastructure. To prevent race conditions, the platform will only execute one
-plan/apply at a time (plans for validating GitHub Pull Requests are allowed to
-happen concurrently, as they do not modify state). You can read more about
-Terraform plans and applies below.
-
-## Plan
-
-During the plan phase of a run, the command `terraform plan` is executed.
-Terraform performs a refresh and then determines what actions are necessary to
-reach the desired state specified in the Terraform configuration files. A
-successful plan outputs an executable file that is securely stored in Terraform
-Enterprise and may be used in the subsequent apply.
-
-Terraform plans do not change the state of infrastructure, so it is
-safe to execute a plan multiple times. In fact, there are a number of components
-that can trigger a Terraform plan. You can read more about this in the
-[starting runs](/docs/enterprise/runs/starting.html) section.
-
-## Apply
-
-During the apply phase of a run, the command `terraform apply` is executed
-with the executable result of the prior Terraform plan. This phase **can change
-infrastructure** by applying the changes required to reach the desired state
-specified in the Terraform configuration file.
-
-While Terraform plans are safe to run multiple times, Terraform applies often
-change active infrastructure. Because of this, the default behavior
-is to require user confirmation as part of the
-[Terraform run execution](/docs/enterprise/runs/how-runs-execute.html). Upon
-user confirmation, the Terraform apply will be queued and executed. It is also
-possible to configure
-[automatic applies](/docs/enterprise/runs/automatic-applies.html), but this option is
-disabled by default.
-
-## Environment Locking
-
-During run execution, the environment will lock to prevent other plans
-and applies from executing simultaneously. When the run completes, the next
-pending run, if any, will be started.
-
-An administrator of the environment can also manually lock the environment, for
-example during a maintenance period.
-
-You can see the lock status of an environment, and lock/unlock the environment
-by visiting that environment's settings page.
-
-## Notifications
-
-To receive alerts when user confirmation is needed or for any other phase of the
-run process, you can
-[enable run notifications](/docs/enterprise/runs/notifications.html) for your
-organization or environment.
diff --git a/website/source/docs/enterprise/runs/installing-software.html.md b/website/source/docs/enterprise/runs/installing-software.html.md
deleted file mode 100755
index 19ed6d48a..000000000
--- a/website/source/docs/enterprise/runs/installing-software.html.md
+++ /dev/null
@@ -1,34 +0,0 @@
----
-layout: "enterprise"
-page_title: "Installing Software - Runs - Terraform Enterprise"
-sidebar_current: "docs-enterprise-runs-installing"
-description: |-
- Installing custom software on the Terraform Runners.
----
-
-# Installing Custom Software
-
-The machines that run Terraform exist in an isolated environment and are
-destroyed on each use. In some cases, it may be necessary to install certain
-software on the Terraform runner, such as a configuration management tool like
-Chef, Puppet, Ansible, or Salt.
-
-The easiest way to install software on the Packer builder is via the
-`local-exec` provisioner. This will execute commands on the host machine running
-Terraform.
-
-```hcl
-resource "null_resource" "local-software" {
- provisioner "local-exec" {
- command = < This is an advanced feature that enables changes to active infrastructure
-without user confirmation. Please understand the implications to your
-infrastructure before enabling.
-
-## Setting Up AWS Multi-Factor Authentication
-
-Before you are able to set up multi-factor authentication in Terraform
-Enterprise, you must set up an IAM user in AWS. More details about creating an
-IAM user can be found
-[here](http://docs.aws.amazon.com/IAM/latest/UserGuide/id_credentials_mfa_enable.html).
-Setting up an AWS IAM user will provide you with the serial number and access
-keys that you will need in order to connect to AWS Secure Token Service.
-
-In order to set up multi-factor authentication for your organization, you must
-have the following environment variables in your configuration:
-'AWS_ACCESS_KEY_ID", "AWS_SECRET_ACCESS_KEY", "AWS_MFA_SERIAL_NUMBER". You can
-set these variables at `/settings/organization_variables.`
-
-
-## Enabling AWS Multi-Factor Authentication
-
-To enable multi-factor authentication, visit the environment settings page:
-
-```text
-/terraform/:organization/environments/:environment/settings
-```
-
-Use the drop down labeled "AWS Multi-Factor Authentication ". There are
-currently three levels available: "never", "applies only", and "plans and
-applies". Once you have selected your desired level, save your settings. All
-subsequent runs on the environment will now require the selected level of
-authentication.
-
-## Using AWS Multi-Factor Authentication
-
-Once you have elected to use AWS MFA for your Terraform Runs, you will then be
-prompted to enter a token code each time you plan or apply the run depending on
-your settings. Your one time use token code will be sent to you via the method
-you selected when setting up your
-[IAM account](http://docs.aws.amazon.com/IAM/latest/UserGuide/id_credentials_mfa_enable.html).
-
-If you have selected "applies only", you will be able to queue and run a plan
-without entering your token code. Once the run finishes, you will need to enter
-your token code and click "Authenticate" before the applying the plan. Once you
-submit your token code, the apply will start, and you will see "Authenticated
-with MFA by `user`" in the UI. If for any case there is an error when submitting
-your token code, the lock icon in the UI will turn red, and an error will appear
-alerting you to the failure.
-
-If you have selected "plans and applies", you will be prompted to enter your
-token before queueing your plan. Once you enter the token and click
-"Authenticate", you will see "Authenticated with MFA by `user`" appear in the UI
-logs. The plan will queue and you may run the plan once it is queued. Then,
-before applying, you will be asked to authenticate with MFA again. Enter your
-token, click Authenticate, and note that "Authenticated with MFA by `user`"
-appears in the UI log after the apply begins. If for any case there is an error
-authenticating, the lock icon in the UI will turn red, and an error will appear
-alerting you to the failure.
-
-## Using AWS Multi-Factor Authentication with AWS STS AssumeRole
-
-The AWS Secure Token Service can be used to return a set of temporary security
-credentials that a user can use to access resources that they might not normally
-have access to (known as AssumeRole). The AssumeRole workflow is compatible with
-AWS multi-factor authentication in Terraform Enterprise.
-
-To use AssumeRole, you first need to create an IAM role and edit the trust
-relationship policy document to contain the following:
-
-```json
- {
- "Version": "2012-10-17",
- "Statement": [
- {
- "Effect": "Allow",
- "Principal": {
- "AWS": "arn:aws:iam::[INT]:user/[USER]"
- },
- "Action": "sts:AssumeRole",
- "Condition": {
- "Bool": {
- "aws:MultiFactorAuthPresent": "true"
- }
- }
- }
- ]
-}
-```
-
-You can then configure the Terraform AWS provider to assume a given role by specifying the role ARN within the nested assume_role block:
-
-```hcl
-provider "aws" {
- # ...
-
- assume_role {
- role_arn = "arn:aws:iam::[INT]:role/[ROLE]"
- }
-}
-```
diff --git a/website/source/docs/enterprise/runs/notifications.html.md b/website/source/docs/enterprise/runs/notifications.html.md
deleted file mode 100755
index 6a5e094bf..000000000
--- a/website/source/docs/enterprise/runs/notifications.html.md
+++ /dev/null
@@ -1,33 +0,0 @@
----
-layout: "enterprise"
-page_title: "Notifications - Runs - Terraform Enterprise"
-sidebar_current: "docs-enterprise-runs-notifications"
-description: |-
- Terraform Enterprise can send notifications to your organization. This post is on how.
----
-
-
-# Terraform Run Notifications
-
-Terraform Enterprise can send run notifications, the following events are
-configurable:
-
-- **Needs Confirmation** - The plan phase has succeeded, and there are changes
- that need to be confirmed before applying.
-
-- **Confirmed** - A plan has been confirmed, and it will begin applying shortly.
-
-- **Discarded** - A user has discarded the plan.
-
-- **Applying** - The plan has begun to apply and make changes to your
- infrastructure.
-
-- **Applied** - The plan was applied successfully.
-
-- **Errored** - An error has occurred during the plan or apply phase.
-
-> Emails will include logs for the **Needs Confirmation**, **Applied**, and
-> **Errored** events.
-
-You can toggle notifications for each of these events on the "Integrations" tab
-of an environment.
diff --git a/website/source/docs/enterprise/runs/scheduling-runs.html.md b/website/source/docs/enterprise/runs/scheduling-runs.html.md
deleted file mode 100755
index 928801232..000000000
--- a/website/source/docs/enterprise/runs/scheduling-runs.html.md
+++ /dev/null
@@ -1,40 +0,0 @@
----
-layout: "enterprise"
-page_title: "Scheduling - Runs - Terraform Enterprise"
-sidebar_current: "docs-enterprise-runs-schedule"
-description: |-
- Schedule periodic plan runs in Terraform.
----
-
-
-# Schedule Periodic Plan Runs
-
--> This is an unreleased beta feature. Please
-contact support if you are interested
-in helping us test this feature.
-
-Terraform can automatically run a plan against your infrastructure on a
-specified schedule. This option is disabled by default and can be enabled by an
-organization owner on a per-environment basis.
-
-On the specified interval, a plan can be run that for you, determining any
-changes and sending the appropriate notifications.
-
-When used with [automatic applies](/docs/enterprise/runs/automatic-applies.html), this feature can help converge
-changes to infrastructure without human input.
-
-Runs will not be queued while another plan or apply is in progress, or if the
-environment has been manually locked. Se
-[Environment Locking](/docs/enterprise/runs#environment-locking) for more
- information.
-
-## Enabling Periodic Plans
-
-To enable periodic plans for an environment, visit the environment settings page
-and select the desired interval and click the save button to persist the
-changes. An initial plan may immediately run, depending on the state of your
-environment, and then will automatically plan at the specified interval.
-
-If you have manually run a plan separately, a new plan will not be queued until
-the allotted time after the manual plan ran. This means that the platform simply
-ensures that a plan has been executed at the specified schedule.
diff --git a/website/source/docs/enterprise/runs/starting.html.md b/website/source/docs/enterprise/runs/starting.html.md
deleted file mode 100755
index 21e08aaf9..000000000
--- a/website/source/docs/enterprise/runs/starting.html.md
+++ /dev/null
@@ -1,117 +0,0 @@
----
-layout: "enterprise"
-page_title: "Starting - Runs - Terraform Enterprise"
-sidebar_current: "docs-enterprise-runs-starting"
-description: |-
- How to start runs in Terraform Enterprise.
----
-
-
-# Starting Terraform Runs
-
-There are a variety of ways to queue a Terraform run in Terraform Enterprise. In addition to
-`terraform push`, you can connect your environment
-to GitHub and runs based on new commits. You can
-also intelligently queue new runs when linked artifacts are uploaded or changed.
-Remember from the [previous section about Terraform runs](/docs/enterprise/runs)
-that it is safe to trigger many plans without consequence since Terraform plans
-do not change infrastructure.
-
-
-## Terraform Push
-
-Terraform `push` is a [Terraform command](https://terraform.io/docs/commands/push.html)
-that packages and uploads a set of Terraform configuration and directory to the platform. This then creates a run
-which performs `terraform plan` and `terraform apply` against the uploaded
-configuration.
-
-The directory is included in order to run any associated provisioners,
-that might use local files. For example, a remote-exec provisioner
-that executes a shell script.
-
-By default, everything in your directory is uploaded as part of the push.
-
-However, it's not always the case that the entire directory should be uploaded. Often,
-temporary or cache directories and files like `.git`, `.tmp` will be included by default, which
-can cause failures at certain sizes and should be avoided. You can
-specify [exclusions](https://terraform.io/docs/commands/push.html) to avoid this situation.
-
-Terraform also allows for a [VCS option](https://terraform.io/docs/commands/push.html#_vcs_true)
-that will detect your VCS (if there is one) and only upload the files that are tracked by the VCS. This is
-useful for automatically excluding ignored files. In a VCS like git, this
-basically does a `git ls-files`.
-
-
-## GitHub Webhooks
-
-Optionally, GitHub can be used to import Terraform configuration. When used
-within an organization, this can be extremely valuable for keeping differences
-in environments and last mile changes from occurring before an upload.
-
-After you have [connected your GitHub account to Terraform Enterprise](/docs/enterprise/vcs/github.html),
-you can connect your environment to the target
-GitHub repository. The GitHub repository will be linked to the Terraform Enterprise
-configuration, and GitHub will start sending webhooks. Certain
-GitHub webhook events, detailed below, will cause the repository to be
-automatically ingressed into Terraform and stored, along with references to the
-GitHub commits and authorship information.
-
-Currently, an environment must already exist to be connected to GitHub. You can
-create the environment with `terraform push`, detailed above, and then link it
-to GitHub.
-
-Each ingress will trigger a Terraform plan. If you have auto-apply enabled then
-the plan will also be applied.
-
-You can disable an ingress by adding the text `[atlas skip]` or `[ci skip]` to
-your commit message.
-
-Supported GitHub webhook events:
-
-- pull_request (on by default)
- - ingress when opened or reopened
- - ingress when synchronized (new commits are pushed to the branch)
-- push (on by default)
- - ingress when a tag is created
- - ingress when the default branch is updated
- - note: the default branch is either configured on your configuration's
- integrations tab, or if that is blank it is the GitHub
- repository's default branch
-- create (off by default)
- - ingress when a tag is created
- - note: if you want to only run on tag creation, turn on create events and
- turn off push events
-
-## Artifact Uploads
-
-Upon successful completion of a Terraform run, the remote state is parsed and
-any [artifacts](/docs/enterprise/artifacts/artifact-provider.html) are detected that
-were referenced. When new versions of those referenced artifacts are uploaded, you have the option to automatically queue a new Terraform run.
-
-For example, consider the following Terraform configuration which references an
-artifact named "worker":
-
-```hcl
-resource "aws_instance" "worker" {
- ami = "${atlas_artifact.worker.metadata_full.region-us-east-1}"
- instance_type = "m1.small"
-}
-```
-
-When a new version of the and artifact "worker" is uploaded either manually
-or as the output of a [Packer build](/docs/enterprise/packer/builds/starting.html), a Terraform plan can be automatically triggered with this new artifact version.
-You can enable this feature on a per-environment basis from the
-environment settings page.
-
-Combined with
-[Terraform auto apply](/docs/enterprise/runs/automatic-applies.html), you can
-continuously deliver infrastructure using Terraform and Terraform Enterprise.
-
-## Terraform Plugins
-
-If you are using a custom [Terraform Plugin](https://www.terraform.io/docs/plugins/index.html)
-binary for a provider or provisioner that's not currently in a released
-version of Terraform, you can still use this in Terraform Enterprise.
-
-All you need to do is include a Linux AMD64 binary for the plugin in the
-directory in which Terraform commands are run from; it will then be used next time you `terraform push` or ingress from GitHub.
diff --git a/website/source/docs/enterprise/runs/variables-and-configuration.html.md b/website/source/docs/enterprise/runs/variables-and-configuration.html.md
deleted file mode 100755
index 7f7711fc1..000000000
--- a/website/source/docs/enterprise/runs/variables-and-configuration.html.md
+++ /dev/null
@@ -1,196 +0,0 @@
----
-layout: "enterprise"
-page_title: "Variables and Configuration - Runs - Terraform Enterprise"
-sidebar_current: "docs-enterprise-runs-variables"
-description: |-
- How to configure runs and their variables.
----
-
-# Terraform Variables and Configuration
-
-There are several ways to configure Terraform runs:
-
-1. Terraform variables
-2. Environment variables
-3. Personal Environment and Personal Organization variables
-
-You can add, edit, and delete all Terraform, Environment, and Personal
-Environment variables from the "Variables" page on your environment:
-
-![Terraform Enterprise environment variable configuration](docs/tfe-variables.png)
-
-Personal Organization variables can be managed in your Account Settings under
-"Organization Variables":
-
-![Terraform Enterprise personal organization variables](docs/tfe-organization-variables.png)
-
-## Variable types
-
-### Terraform Variables
-
-Terraform variables are first-class configuration in Terraform. They define the
-parameterization of Terraform configurations and are important for sharing and
-removal of sensitive secrets from version control.
-
-Variables are sent with the `terraform push` command. Any variables in your local
-`.tfvars` files are securely uploaded. Once variables are uploaded, Terraform
-will prefer the stored variables over any changes you make locally. Please refer
-to the [Terraform push documentation](https://www.terraform.io/docs/commands/push.html)
-for more information.
-
-You can also add, edit, and delete variables. To update Terraform variables,
-visit the "variables" page on your environment.
-
-The maximum size for the value of Terraform variables is `256kb`.
-
-For detailed information about Terraform variables, please read the
-[Terraform variables](https://terraform.io/docs/configuration/variables.html)
-section of the Terraform documentation.
-
-### Environment Variables
-
-Environment variables are injected into the virtual environment that Terraform
-executes in during the `plan` and `apply` phases.
-
-You can add, edit, and delete environment variables from the "variables" page
-on your environment.
-
-Additionally, the following environment variables are automatically injected by
-Terraform Enterprise. All injected environment variables will be prefixed with `ATLAS_`
-
-- `ATLAS_TOKEN` - This is a unique, per-run token that expires at the end of
- run execution (e.g. `"abcd.atlasv1.ghjkl..."`).
-
-- `ATLAS_RUN_ID` - This is a unique identifier for this run (e.g. `"33"`).
-
-- `ATLAS_CONFIGURATION_NAME` - This is the name of the configuration used in
- this run. Unless you have configured it differently, this will also be the
- name of the environment (e.g `"production"`).
-
-- `ATLAS_CONFIGURATION_SLUG` - This is the full slug of the configuration used
- in this run. Unless you have configured it differently, this will also be the
- name of the environment (e.g. `"company/production"`).
-
-- `ATLAS_CONFIGURATION_VERSION` - This is the unique, auto-incrementing version
- for the Terraform configuration (e.g. `"34"`).
-
-- `ATLAS_CONFIGURATION_VERSION_GITHUB_BRANCH` - This is the name of the branch
- that the associated Terraform configuration version was ingressed from
- (e.g. `master`).
-
-- `ATLAS_CONFIGURATION_VERSION_GITHUB_COMMIT_SHA` - This is the full commit hash
- of the commit that the associated Terraform configuration version was
- ingressed from (e.g. `"abcd1234..."`).
-
-- `ATLAS_CONFIGURATION_VERSION_GITHUB_TAG` - This is the name of the tag
- that the associated Terraform configuration version was ingressed from
- (e.g. `"v0.1.0"`).
-
-For any of the `GITHUB_` attributes, the value of the environment variable will
-be the empty string (`""`) if the resource is not connected to GitHub or if the
-resource was created outside of GitHub (like using `terraform push`).
-
-### Personal Environment and Personal Organization Variables
-
-Personal variables can be created at the Environment or Organization level and
-are private and scoped to the user that created them. Personal Environment
-variables are scoped to just the environment they are attached to, while Personal
-Organization variables are applied across any environment a user triggers a
-Terraform run in. Just like shared Environment variables, they are injected into
-the virtual environment during the `plan` and `apply` phases.
-
-Both Personal Environment and Personal Organization variables can be used to
-override Environment variables on a per-user basis.
-
-## Variable Hierarchy
-
-It is possible to create the same variable in multiple places for more granular
-control. Variables are applied in the following order from least to most
-precedence:
-
-1. Environment
-2. Personal Organization
-3. Personal Environment
-
-Here's an example:
-
-* For the `SlothCorp/petting_zoo` environment, User 1 creates
-an Environment variable called `SECRET_GATE_ACCESS_KEY` and sets the value to
-`"orange-turtleneck"`
-* User 2 adds a Personal Environment variable for
-`SECRET_GATE_ACCESS_KEY` and sets the value to `"pink-overalls"`
-* When User 2 submits a `plan` or `apply`, the `SECRET_GATE_ACCESS_KEY`
-will use `"pink-overalls"`
-* When User 1, or any other user, submits a `plan` or `apply`, the
-`SECRET_GATE_ACCESS_KEY` will use `"orange-turtleneck"`
-
-## Managing Secret Multi-Line Files
-
-Terraform Enterprise has the ability to store multi-line files as variables. The
-recommended way to manage your secret or sensitive multi-line files (private key,
-SSL cert, SSL private key, CA, etc.) is to add them as
-[Terraform Variables](#terraform-variables) or
-[Environment Variables](#environment-variables).
-
-Just like secret strings, it is recommended that you never check in these
-multi-line secret files to version control by following the below steps.
-
-Set the [variables](https://www.terraform.io/docs/configuration/variables.html)
-in your Terraform template that resources utilizing the secret file will
-reference:
-
-```hcl
-variable "private_key" {}
-
-resource "aws_instance" "example" {
- # ...
-
- provisioner "remote-exec" {
- connection {
- host = "${self.private_ip}"
- private_key = "${var.private_key}"
- }
-
- # ...
- }
-}
-```
-
-`terraform push` any "Terraform Variables":
-
- $ terraform push -name $ATLAS_USERNAME/example -var "private_key=$MY_PRIVATE_KEY"
-
-`terraform push` any "Environment Variables":
-
- $ TF_VAR_private_key=$MY_PRIVATE_KEY terraform push -name $ATLAS_USERNAME/example
-
-Alternatively, you can add or update variables manually by going to the
-"Variables" section of your Environment and pasting the contents of the file in
-as the value.
-
-Now, any resource that consumes that variable will have access to the variable value, without having to check the file into version control. If you want to run Terraform locally, that file will still need to be passed in as a variable in the CLI. View the [Terraform Variable Documentation](https://www.terraform.io/docs/configuration/variables.html) for more info on how to accomplish this.
-
-A few things to note...
-
-The `.tfvars` file does not support multi-line files. You can still use
-`.tfvars` to define variables, however, you will not be able to actually set the
-variable in `.tfvars` with the multi-line file contents like you would a
-variable in a `.tf` file.
-
-If you are running Terraform locally, you can pass in the variables at the
-command line:
-
- $ terraform apply -var "private_key=$MY_PRIVATE_KEY"
- $ TF_VAR_private_key=$MY_PRIVATE_KEY terraform apply
-
-You can update variables locally by using the `-overwrite` flag with your `terraform push` command:
-
- $ terraform push -name $ATLAS_USERNAME/example -var "private_key=$MY_PRIVATE_KEY" -overwrite=private_key
- $ TF_VAR_private_key=$MY_PRIVATE_KEY terraform push -name $ATLAS_USERNAME/example -overwrite=private_key
-
-## Notes on Security
-
-Terraform variables and environment variables are encrypted using
-[Vault](https://vaultproject.io) and closely guarded and audited. If you have
-questions or concerns about the safety of your configuration, please contact
-our security team at [security@hashicorp.com](mailto:security@hashicorp.com).
diff --git a/website/source/docs/enterprise/state/collaborating.html.md b/website/source/docs/enterprise/state/collaborating.html.md
deleted file mode 100755
index 417887e5a..000000000
--- a/website/source/docs/enterprise/state/collaborating.html.md
+++ /dev/null
@@ -1,22 +0,0 @@
----
-layout: "enterprise"
-page_title: "Collaborating - State - Terraform Enterprise"
-sidebar_current: "docs-enterprise-state-collaborating"
-description: |-
- How to collaborate on states.
----
-
-# Collaborating on Terraform Remote State
-
-Terraform Enterprise is one of a few options to store [remote state](/docs/state/remote.html).
-
-Remote state gives you the ability to version and collaborate on Terraform
-changes. It stores information about the changes Terraform makes based on
-configuration.
-
-In order to collaborate safely on remote state, we recommend
-[creating an organization](/docs/enterprise/organizations/create.html) to
-manage teams of users.
-
-Then, following a [Terraform Enterprise Run](/docs/enterprise/runs) or [`apply`](/docs/commands/apply.html)
-you can view state versions in the `States` list of the environment.
diff --git a/website/source/docs/enterprise/state/index.html.md b/website/source/docs/enterprise/state/index.html.md
deleted file mode 100755
index 3b8a0589b..000000000
--- a/website/source/docs/enterprise/state/index.html.md
+++ /dev/null
@@ -1,24 +0,0 @@
----
-layout: "enterprise"
-page_title: "State - Terraform Enterprise"
-sidebar_current: "docs-enterprise-state"
-description: |-
- Terraform stores the state of your managed infrastructure from the last time Terraform was run. This section is about states.
----
-
-# State
-
-Terraform Enterprise stores the state of your managed infrastructure from the
-last time Terraform was run. The state is stored remotely, which works better in a
-team environment, allowing you to store, version and collaborate on state.
-
-Remote state gives you more than just easier version control and safer storage.
-It also allows you to delegate the outputs to other teams. This allows your
-infrastructure to be more easily broken down into components that multiple teams
-can access.
-
-Remote state is automatically updated when you run [`apply`](/docs/commands/apply.html)
-locally. It is also updated when an `apply` is executed in a [Terraform Enterprise
-Run](/docs/enterprise/runs/index.html).
-
-Read [more about remote state](/docs/state/remote.html).
diff --git a/website/source/docs/enterprise/state/pushing.html.md b/website/source/docs/enterprise/state/pushing.html.md
deleted file mode 100644
index ad058d144..000000000
--- a/website/source/docs/enterprise/state/pushing.html.md
+++ /dev/null
@@ -1,25 +0,0 @@
----
-layout: "enterprise"
-page_title: "Pushing - State - Terraform Enterprise"
-sidebar_current: "docs-enterprise-state-pushing"
-description: |-
- Pushing remote states.
----
-
-# Pushing Terraform Remote State to Terraform Enterprise
-
-Terraform Enterprise is one of a few options to store [remote state](/docs/enterprise/state).
-
-Remote state gives you the ability to version and collaborate on Terraform
-changes. It stores information about the changes Terraform makes based on
-configuration.
-
-To use Terraform Enterprise to store remote state, you'll first need to have the
-`ATLAS_TOKEN` environment variable set and run the following command.
-
-**NOTE:** `terraform remote config` command has been deprecated in 0.9.X. Remote configuration is now managed as a [backend configuration](/docs/backends/config.html).
-
-```shell
-$ terraform remote config \
- -backend-config="name=$USERNAME/product"
-```
\ No newline at end of file
diff --git a/website/source/docs/enterprise/state/resolving-conflicts.html.md b/website/source/docs/enterprise/state/resolving-conflicts.html.md
deleted file mode 100755
index e31a7fafa..000000000
--- a/website/source/docs/enterprise/state/resolving-conflicts.html.md
+++ /dev/null
@@ -1,56 +0,0 @@
----
-layout: "enterprise"
-page_title: "Resolving Conflicts - State - Terraform Enterprise"
-sidebar_current: "docs-enterprise-state-resolving"
-description: |-
- Resolving conflicts with remote states.
----
-
-# Resolving Conflicts in Remote States
-
-Resolving state conflicts can be time consuming and error prone, so it's
-important to approach it carefully.
-
-There are several tools provided by Terraform Enterprise to help resolve
-conflicts and fix remote state issues. First, you can navigate between state
-versions in the changes view of your environment (after toggling on the remote
-state checkbox) and view plain-text differences between versions.
-
-This allows you to pinpoint where things may have gone wrong and make a educated
-decision about resolving the conflict.
-
-### Rolling Back to a Specific State Version
-
-The rollback feature allows you to choose a new version to set as the "Head"
-version of the state. Rolling back to a version means it will then return that
-state upon request from a client. It will not increment the serial in the state,
-but perform a hard rollback to the exact version of the state provided.
-
-This allows you to reset the state to an older version, essentially forgetting
-changes made in versions after that point.
-
-To roll back to a specific version, navigate to it in the changes view and use
-the rollback link. You'll need to confirm the version number to perform the
-operation.
-
-### Using Terraform Locally
-
-Another way to resolve remote state conflicts is by manual intervention of the
-state file.
-
-Use the [`state pull`](/docs/commands/state/pull.html) subcommand to pull the
-remote state into a local state file.
-
-```shell
-$ terraform state pull > example.tfstate
-```
-
-Once a conflict has been resolved locally by editing the state file, the serial
-can be incremented past the current version and pushed with the
-[`state push`](/docs/commands/state/push.html) subcommand:
-
-```shell
-$ terraform state push example.tfstate
-```
-
-This will upload the manually resolved state and set it as the head version.
diff --git a/website/source/docs/enterprise/support.html.md b/website/source/docs/enterprise/support.html.md
deleted file mode 100755
index 60c8f1c14..000000000
--- a/website/source/docs/enterprise/support.html.md
+++ /dev/null
@@ -1,38 +0,0 @@
----
-layout: "enterprise"
-page_title: "Support - Terraform Enterprise"
-sidebar_current: "docs-enterprise-support"
-description: |-
- All users of Terraform Enterprise are urged to email feedback, questions or requests to the HashiCorp team.
----
-
-# Contacting Support
-
-All users of Terraform Enterprise are urged to email feedback, questions or
-requests to the HashiCorp team.
-
-### Free Support
-
-We do not currently publish support SLAs for free accounts, but endeavor to
-respond as quickly as possible. We respond to most requests within less than 24
-hours.
-
-## HashiCorp Tools Support
-
-It's often the case that Terraform Enterprise questions or feedback relates to
-the HashiCorp tooling. We encourage all Terraform Enterprise users to search for
-related issues and problems in the open source repositories and mailing lists
-prior to contacting us to help make our support more efficient and to help
-resolve problems faster.
-
-Visit the updating tools section for a list of our tools and their project
-websites.
-
-## Documentation Feedback
-
-Due to the dynamic nature of Terraform Enterprise and the broad set of features
-it provides, there may be information lacking in the documentation.
-
-In this case, we appreciate any feedback to be emailed to us so
-we can make improvements. Please email feedback to
-support@hashicorp.com.
diff --git a/website/source/docs/enterprise/user-accounts/authentication.html.md b/website/source/docs/enterprise/user-accounts/authentication.html.md
deleted file mode 100755
index fd73c6211..000000000
--- a/website/source/docs/enterprise/user-accounts/authentication.html.md
+++ /dev/null
@@ -1,58 +0,0 @@
----
-layout: "enterprise"
-page_title: "Authentication - Accounts - Terraform Enterprise"
-sidebar_current: "docs-enterprise-accounts-authentication"
-description: |-
- Terraform Enterprise requires a username and password to sign up and login. However, there are several ways to authenticate with your account.
----
-
-# Authentication
-
-Terraform Enterprise requires a username and password to sign up and login.
-However, there are several ways to authenticate with your account.
-
-### Authentication Tokens
-
-Authentication tokens are keys used to access your account via tools or over the
-various APIs used in Terraform Enterprise.
-
-You can create new tokens in the token section of your account settings. It's
-important to keep tokens secure, as they are essentially a password and can be
-used to access your account or resources. Additionally, token authentication
-bypasses two factor authentication.
-
-### Authenticating Tools
-
-All HashiCorp tools look for the `ATLAS_TOKEN` environment variable:
-
-```shell
-$ export ATLAS_TOKEN=TOKEN
-```
-
-This will automatically authenticate all requests against this token. This is
-the recommended way to authenticate with our various tools. Care should be given
-to how this token is stored, as it is as good as a password.
-
-### Two Factor Authentication
-
-You can optionally enable Two Factor authentication, requiring an SMS or TOTP
-one-time code every time you log in, after entering your username and password.
-
-You can enable Two Factor authentication in the security section of your account
-settings.
-
-Be sure to save the generated recovery codes. Each backup code can be used once
-to sign in if you do not have access to your two-factor authentication device.
-
-### Sudo Mode
-
-When accessing certain admin-level pages (adjusting your user profile, for
-example), you may notice that you're prompted for your password, even though
-you're already logged in. This is by design, and aims to help guard protect you
-if your screen is unlocked and unattended.
-
-### Session Management
-
-You can see a list of your active sessions on your security settings page. From
-here, you can revoke sessions, in case you have lost access to a machine from
-which you were accessing.
diff --git a/website/source/docs/enterprise/user-accounts/index.html.md b/website/source/docs/enterprise/user-accounts/index.html.md
deleted file mode 100755
index 5c4942a27..000000000
--- a/website/source/docs/enterprise/user-accounts/index.html.md
+++ /dev/null
@@ -1,13 +0,0 @@
----
-layout: "enterprise"
-page_title: "Accounts - Terraform Enterprise"
-sidebar_current: "docs-enterprise-accounts"
-description: |-
- Users are the main identity system in Terraform Enterprise.
----
-
-# User Accounts
-
-Users are the main identity system in Terraform Enterprise. A user can be a
-member of multiple [organizations](/docs/enterprise/organizations/index.html),
-as well as individually collaborate on various resources.
diff --git a/website/source/docs/enterprise/user-accounts/recovery.html.md b/website/source/docs/enterprise/user-accounts/recovery.html.md
deleted file mode 100755
index 3e2243713..000000000
--- a/website/source/docs/enterprise/user-accounts/recovery.html.md
+++ /dev/null
@@ -1,15 +0,0 @@
----
-layout: "enterprise"
-page_title: "Recovery - Accounts - Terraform Enterprise"
-sidebar_current: "docs-enterprise-accounts-recovery"
-description: |-
- If you have lost access to your account, use the reset password form to send yourself a link to reset your password.
----
-
-# Account Recovery
-
-If you have lost access to your Terraform Enterprise account, use the reset
-password form on the login page to send yourself a link to reset your password.
-
-If an email is unknown, [contact us](mailto:support@hashicorp.com) for further
-help.
diff --git a/website/source/docs/enterprise/vcs/bitbucket.html.md b/website/source/docs/enterprise/vcs/bitbucket.html.md
deleted file mode 100644
index 75f96794b..000000000
--- a/website/source/docs/enterprise/vcs/bitbucket.html.md
+++ /dev/null
@@ -1,98 +0,0 @@
----
-layout: "enterprise"
-page_title: "Bitbucket Cloud - VCS Integrations - Terraform Enterprise"
-sidebar_current: "docs-enterprise-vcs-bitbucket-cloud"
-description: |-
- Bitbucket Cloud repositories can be integrated with Terraform Enterprise by using push command.
----
-# Bitbucket Cloud
-
-Bitbucket Cloud can be used to import Terraform configuration, automatically
-queuing runs when changes are merged into a repository's default branch.
-Additionally, plans are run when a pull request is created or updated. Terraform
-Enterprise will update the pull request with the result of the Terraform plan
-providing quick feedback on proposed changes.
-
-## Registering an OAuth Application & Client
-
-### Creating and Updating a Bitbucket Cloud OAuth Application
-
-You will need to register Terraform Enterprise as an OAuth Application within your Bitbucket Cloud account. Proceed to https://bitbucket.org/account/user/your-username/oauth-consumers/new. Fill out the required information and set the Redirect URI to a placeholder (ie: http://example.com), as you will need to register the Bitbucket Client with Terraform Enterprise prior to receiving this value. Check all of the permission fields that apply to you, and click Save
-
-Upon saving the application, you will be redirected to https://bitbucket.org/account/user/your-username/api. Scroll down to OAuth Consumers and click on the application you just created. Copy the Key and Secret. Leave this tab open in your browser as you will need to return to it in a moment.
-
-### Creating a Terraform Enterprise OAuth Client
-
-In a new tab, navigate to https://atlas.hashicorp.com/settings and, in the left-side panel, select the organization that you’d like to administer your Bitbucket connection, then click on “configuration” in the left-side panel.
-
-Within the “Add OAuthClient” pane, select Bitbucket Cloud and fill in your application key and secret. In the base url field, enter the root url of your Bitbucket instance (i.e. https://bitbucket.org). In the API url field, enter the base api url (i.e. https://api.bitbucket.org/2.0). Create the OAuth client.
-
-Once you have created your client, you will be redirected back to the configurations page for your chosen organization. On that page, find the “OAuth Clients” pane and copy the Callback URL for your Bitbucket Cloud OAuth Client. In the open Bitbucket tab, select the Terraform Enterprise OAuth Application and click edit. Enter the Callback URL you just copied in the field labeled Redirect URI. Save the application.
-
-Your OAuth Client should now be enabled for your Organization to use within Terraform Enterprise.
-
-## Using Terraform Enterprise with Bitbucket Cloud
-
-There are two ways to connect your preferred VCS Host to Terraform Enterprise.
-You can generate an OAuth token both at the user and organization level.
-
-### Linking your Terraform Enterprise Organization
-
-Return to the settings page for the organization in which you created the OAuth Client (https://atlas.hashicorp.com/settings/organizations/your-organization/configuration). Find the section entitled Organization Connections to OAuth Client and click connect beneath your Bitbucket Cloud integration. You will be briefly redirected to Bitbucket in order to authenticate the client.
-
-Once you are redirected back to Terraform Enterprise, you should see that the token was created with a unique identifier. If you don’t, check the values in your OAuth Client and make sure they match exactly with the values associated with your Bitbucket OAuth Application. There is also an option to destroy the token and disconnect the organization from your Bitbucket installation.
-
-You are now ready to use your organization's token to manage builds and configurations within Terraform Enterprise.
-
-### Linking your Terraform Enterprise User Account
-
-Navigate to https://atlas.hashicorp.com/settings/connections and click on “Connect Bitbucket Cloud to Atlas”. You will briefly be redirected to Bitbucket in order to authenticate your OAuth Client. Once redirected back to Terraform Enterprise, You should see a green flash banner with the message: "Successfully Linked to Bitbucket".
-
-You are now ready to use your personal token to manage builds and configurations within Terraform Enterprise.
-
-## Connecting Configurations
-
-Once you have linked a Bitbucket installation to your account or organization,
-you are ready to begin creating Packer Builds and Terraform Environments linked
-to your desired Bitbucket Cloud repository.
-
-Terraform Enterprise environments are linked to individual Bitbucket Cloud repositories.
-However, a single Bitbucket Cloud repository can be linked to multiple environments
-allowing a single set of Terraform configuration to be used across multiple
-environments.
-
-Environments can be linked when they're initially created using the New
-Environment process. Existing environments can be linked by setting Bitbucket Cloud
-details in their **Integrations**.
-
-To link a Terraform Enterprise environment to a Bitbucket Cloud repository, you need
-three pieces of information:
-
-- **Bitbucket Cloud repository** - The location of the repository being imported in the
-format _username/repository_.
-
-- **Bitbucket Cloud branch** - The branch from which to ingress new versions. This
-defaults to the value Bitbucket Cloud provides as the default branch for this repository.
-
-- **Path to directory of Terraform files** - The repository's subdirectory that
-contains its terraform files. This defaults to the root of the repository.
-
-**Note**: Users creating, updating, or deleting webhooks via the API must have `owner` or `admin` permissions enabled on the target Bitbucket Cloud repository. To update user permissions on the target repository the repository owner can visit: https://bitbucket.org/your-username/your-repository/admin/access
-
-### Connecting a Bitbucket Cloud Repository to a Terraform Environment
-
-Navigate to https://atlas.hashicorp.com/configurations/import and select Link to Bitbucket Cloud. A menu will appear asking you to name the environment. Then use the autocomplete field for repository and select the repository for which you'd like to create a webhook & environment. If you do not see the repository you would like to connect to in the drop down, manually enter it using the format: username/repository. If necessary, fill out information about the VCS branch to pull from as well as the directory where the Terraform files live within the repository. Click Create and Continue.
-
-Upon success, you will be redirected to the environment's runs page (https://atlas.hashicorp.com/terraform/your-organization/environments/your-environment/changes/runs). A message will display letting you know that the repository is ingressing from Bitbucket and once finished you will be able to Queue, Run, & Apply a Terraform Plan. Depending on your webhook settings, changes will be triggered through git events on the specified branch.
-
-The events currently supported are repository and branch push, pull request, and merge.
-
-### Connecting a Bitbucket Cloud Repository to a Packer Build Configuration
-
-Navigate to https://atlas.hashicorp.com/builds/new and select the organization for which you'd like to create a build configuration. Name your build & select Connect build configuration to a Git Repository. A form will appear asking you to select your Git Host. Select Bitbucket Cloud.
-
-Choose the repository for which you'd like to create a webhook. Fill out any other information in the form such as preferred branch to build from (your default branch will be selected should this field be left blank), Packer directory, and Packer Template.
-
-Upon clicking Create you will be redirected to the build configuration (https://atlas.hashicorp.com/packer/your-organization/build-configurations/your-build-configuration). On this page, you will have the opportunity to make any changes to your packer template, push changes via the CLI, or manually queue a Packer build.
-
-Depending on your webhook settings, changes will be triggered through git events on the specified branch. The events currently supported are repository and branch push, pull request, and merge.
diff --git a/website/source/docs/enterprise/vcs/git.html.md b/website/source/docs/enterprise/vcs/git.html.md
deleted file mode 100755
index 1a348b425..000000000
--- a/website/source/docs/enterprise/vcs/git.html.md
+++ /dev/null
@@ -1,66 +0,0 @@
----
-layout: "enterprise"
-page_title: "Git - VCS Integrations - Terraform Enterprise"
-sidebar_current: "docs-enterprise-vcs-git-"
-description: |-
- Git repositories can be integrated with Terraform Enterprise by using push command.
----
-
-# Git Integration
-
-Git repositories can be integrated with Terraform Enterprise by using
-[`terraform push`](/docs/commands/push.html) to import Terraform configuration
-when changes are committed. When Terraform configuration is imported using
-`terraform push` a plan is automatically queued.
-
--> This integration is for Git repositories **not** hosted on GitHub. For GitHub, please see the GitHub documentation instead.
-
-## Setup
-
-Terraform configuration can be manually imported by running `terraform push`
-like below:
-
-```shell
-$ terraform push -name=$USERNAME/ENV_NAME
-```
-
-A better option than having to manually run `terraform push` is to run it
-using a git commit hook. A client-side `pre-push` hook is suitable and will
-push your Terraform configuration when you push local changes to your Git
-server.
-
-### Client-side Commit Hook
-
-The script below will execute `terraform push` when you push local changes to
-your Git server. Place the script at `.git/pre-push` in your local Git
-repository, set the necessary variables, and ensure the script is executable.
-
-```shell
-#!/bin/bash
-#
-# An example hook script to push Terraform configuration to Terraform Enterprise.
-#
-# Set the following variables for your project:
-# - ENV_NAME - your environment name (e.g. org/env)
-# - TERRAFORM_DIR - the local directory to push
-# - DEFAULT_BRANCH - the branch to push. Other branches will be ignored.
-
-ENV_NAME="YOUR_ORG/YOUR_ENV"
-TERRAFORM_DIR="terraform"
-DEFAULT_BRANCH=""
-
-if [[ -z "$ENV_NAME" || -z "$TERRAFORM_DIR" || -z "$DEFAULT_BRANCH" ]]; then
- echo 'pre-push hook: One or more variables are undefined. Canceling push.'
- exit 1
-fi
-
-current_branch=$(git symbolic-ref HEAD | sed -e 's,.*/\(.*\),\1,')
-
-if [ "$current_branch" == "$DEFAULT_BRANCH" ]; then
- echo "pre-push hook: Pushing branch [$current_branch] to environment [$ENV_NAME]."
- terraform push -name="$ENV_NAME" $TERRAFORM_DIR
-else
- echo "pre-push hook: NOT pushing branch [$current_branch] to environment [$ENV_NAME]."
-fi
-
-```
diff --git a/website/source/docs/enterprise/vcs/github.html.md b/website/source/docs/enterprise/vcs/github.html.md
deleted file mode 100755
index 70a19b686..000000000
--- a/website/source/docs/enterprise/vcs/github.html.md
+++ /dev/null
@@ -1,38 +0,0 @@
----
-layout: "enterprise"
-page_title: "GitHub - VCS Integrations - Terraform Enterprise"
-sidebar_current: "docs-enterprise-vcs-github"
-description: |-
- GitHub repositories can be integrated with Terraform Enterprise by using push command.
----
-
-# GitHub Integration
-
-GitHub can be used to import Terraform configuration, automatically queuing runs
-when changes are merged into a repository's default branch. Additionally, plans
-are run when a pull request is created or updated. Terraform Enterprise will
-update the pull request with the result of the Terraform plan providing quick
-feedback on proposed changes.
-
-## Setup
-
-Terraform Enterprise environments are linked to individual GitHub repositories.
-However, a single GitHub repository can be linked to multiple environments
-allowing a single set of Terraform configuration to be used across multiple
-environments.
-
-Environments can be linked when they're initially created using the New
-Environment process. Existing environments can be linked by setting GitHub
-details in their **Integrations**.
-
-To link a Terraform Enterprise environment to a GitHub repository, you need
-three pieces of information:
-
-- **GitHub repository** - The location of the repository being imported in the
-format _username/repository_.
-
-- **GitHub branch** - The branch from which to ingress new versions. This
-defaults to the value GitHub provides as the default branch for this repository.
-
-- **Path to directory of Terraform files** - The repository's subdirectory that
-contains its terraform files. This defaults to the root of the repository.
diff --git a/website/source/docs/enterprise/vcs/gitlab.html.md b/website/source/docs/enterprise/vcs/gitlab.html.md
deleted file mode 100644
index deaa80892..000000000
--- a/website/source/docs/enterprise/vcs/gitlab.html.md
+++ /dev/null
@@ -1,87 +0,0 @@
----
-layout: "enterprise"
-page_title: "GitLab - VCS Integrations - Terraform Enterprise"
-sidebar_current: "docs-enterprise-vcs-gitlab"
-description: |-
- GitLab.com, GitLab Community, and GitLab Enterprise repositories can be integrated with Terraform Enterprise by using push command.
----
-
-# GitLab.com, GitLab Community, & GitLab Enterprise
-
-GitLab can be used to import Terraform configuration, automatically
-queuing runs when changes are merged into a repository's default branch.
-Additionally, plans are run when a pull request is created or updated. Terraform
-Enterprise will update the pull request with the result of the Terraform plan
-providing quick feedback on proposed changes.
-
-## Registering an OAuth Application & Client
-
-### Creating and Updating an GitLab OAuth Application
-
-You will need to register Terraform Enterprise as an OAuth Application within your GitLab account.
-
-Proceed to https://gitlab.com/profile/applications. Fill out the required information and set the `Redirect URI` to a placeholder (i.e. http://example.com), as you will need to register the GitLab OAuth Client with Terraform Enterprise before it can give you this value.
-
-When you save the form, you will be redirected to the OAuth Application view. Copy your Application Key and Secret as you will need them to connect GitLab to Terraform Enterprise.
-
-
-### Creating a Terraform Enterprise OAuth Client
-
-In a new tab, navigate to https://atlas.hashicorp.com/settings and in the left-side panel, click on the organization that you’d like to administer your GitLab connection, then click on “Configuration” in the left-side panel.
-
-In the “Add OAuthClient” pane, select your GitLab installation type (GitLab.com, GitLab Community Edition, or GitLab Enterprise) and fill in your application key and secret. In the base URL field, enter the root URL of your GitLab instance (i.e. https://gitlab.com for GitLab.com). In the API URL field, enter the base API URL (i.e. https://gitlab.com/api/v3 for GitLab.com). Create the OAuth client.
-
-Once you have created your client, you will be redirected back to the configurations page for your chosen organization. On that page, find the “OAuth Clients” pane and copy the `Callback URL` for your GitLab OAuth Client. In a new tab, navigate back to https://gitlab.com/profile/applications select the terraform-enterprise OAuth Application and click edit. Enter the `Callback URL` you just copied in the field labeled `Redirect URI`. Save the application.
-
-Your OAuth Client should now be enabled for your Organization to use within Terraform Enterprise.
-
-## Using Terraform Enterprise with GitLab
-
-There are two ways to connect your preferred VCS Host to Terraform Enterprise. You can generate an OAuth token both at the user and organization level.
-
-### Linking your Terraform Enterprise Organization
-
-Return to the settings page for the organization in which you created the OAuth Client (https://atlas.hashicorp.com/settings/organizations/your-organization/configuration). Find the section entitled `Organization Connections to OAuth Client` and click connect beneath your GitLab installation. You will be briefly redirected to GitLab in order to authenticate the client. Once you are redirected back to Terraform Enterprise, you should see that the token was created with a unique identifier. There is also an option to destroy the token and disconnect the organization from your preferred GitLab installation. You are now ready to use your organization's token to manage builds and configurations within Terraform Enterprise.
-
-### Linking your Terraform Enterprise User Account
-
-Navigate to https://atlas.hashicorp.com/settings/connections and click on “Connect “GitLab.com to Atlas”. You will briefly be redirected to GitLab in order to authenticate your OAuth Client. Once redirected back to Terraform Enterprise, You should see a green flash banner with the message: "Successfully Linked to GitLab".
-
-## Connecting Configurations
-
-Once you have linked a GitLab installation to your account or organization,
-you are ready to begin creating Packer Builds and Terraform Environments linked
-to your desired GitLab repository.
-
-Terraform Enterprise environments are linked to individual GitLab repositories.
-However, a single GitLab repository can be linked to multiple environments
-allowing a single set of Terraform configuration to be used across multiple
-environments.
-
-Environments can be linked when they're initially created using the New
-Environment process. Existing environments can be linked by setting GitLab
-details in their **Integrations**.
-
-To link a Terraform Enterprise environment to a GitLab repository, you need
-three pieces of information:
-
-- **GitLab repository** - The location of the repository being imported in the
-format _username/repository_.
-
-- **GitLab branch** - The branch from which to ingress new versions. This
-defaults to the value GitLab provides as the default branch for this repository.
-
-- **Path to directory of Terraform files** - The repository's subdirectory that
-contains its terraform files. This defaults to the root of the repository.
-
-### Connecting a GitLab Repository to a Terraform Environment
-
-Navigate to https://atlas.hashicorp.com/configurations/import and select Link to GitLab.com (or your preferred GitLab installation). A Menu will appear asking you to name the environment. Then use the autocomplete field for repository and select the repository for which you'd like to create a webhook & environment. If necessary, fill out information about the VCS branch to pull from as well as the directory where the Terraform files live within the repository. `Click Create and Continue`.
-
-Upon success, you will be redirected to the environment's runs page (https://atlas.hashicorp.com/terraform/your-organization/environments/your-environment/changes/runs). A message will display letting you know that the repository is ingressing from GitLab and once finished you will be able to Queue, Run, & Apply a Terraform Plan. Depending on your webhook settings, changes will be triggered through git events on the specified branch. The events currently supported are repository and branch push, merge request, and merge.
-
-### Connecting a GitLab Repository to a Packer Build Configuration
-
-Navigate to https://atlas.hashicorp.com/builds/new and select the organization for which you'd like to create a build configuration. Name your build & select `Connect build configuration to a Git Repository`. A form will appear asking you to select your Git Host. Select your preferred GitLab integration. Choose the repository for which you'd like to create a webhook. Fill out any other information in the form such as preferred branch to build from (your default branch will be selected should this field be left blank), Packer directory, and Packer Template.
-
-Upon clicking `Create` you will be redirected to the build configuration (https://atlas.hashicorp.com/packer/your-organization/build-configurations/your-build-configuration). On this page, you will have the opportunity to make any changes to your packer template, push changes via the CLI, or manually queue a Packer build. Depending on your webhook settings, changes will be triggered through git events on the specified branch. The events currently supported are repository and branch push, merge request, and merge.
diff --git a/website/source/docs/enterprise/vcs/index.html.md b/website/source/docs/enterprise/vcs/index.html.md
deleted file mode 100755
index c88012641..000000000
--- a/website/source/docs/enterprise/vcs/index.html.md
+++ /dev/null
@@ -1,17 +0,0 @@
----
-layout: "enterprise"
-page_title: "VCS Integrations - Terraform Enterprise"
-sidebar_current: "docs-enterprise-vcs"
-description: |-
- Terraform Enterprise can integrate with version control software Git and GitHub.
----
-
-# Integration with Version Control Software
-
-Terraform Enterprise can integrate with your version control software to
-automatically execute Terraform with your latest Terraform configuration as you
-commit changes to source control.
-
-Different capabilities within Terraform Enterprise are available depending on
-the integration in use. The available integration options are on the sidebar
-navigation.
diff --git a/website/source/docs/providers/alicloud/d/images.html.markdown b/website/source/docs/providers/alicloud/d/images.html.markdown
deleted file mode 100644
index cdd6478f1..000000000
--- a/website/source/docs/providers/alicloud/d/images.html.markdown
+++ /dev/null
@@ -1,49 +0,0 @@
----
-layout: "alicloud"
-page_title: "Alicloud: alicloud_images"
-sidebar_current: "docs-alicloud-datasource-images"
-description: |-
- Provides a list of images available to the user.
----
-
-# alicloud_images
-
-The Images data source list image resource list contains private images of the user and images of system resources provided by Alicloud, as well as other public images and those available on the image market.
-
-## Example Usage
-
-```hcl
-data "alicloud_images" "multi_image" {
- owners = "system"
- name_regex = "^centos_6"
-}
-```
-
-## Argument Reference
-
-The following arguments are supported:
-
-* `name_regex` - (Optional) A regex string to apply to the image list returned by Alicloud.
-* `most_recent` - (Optional) If more than one result is returned, use the most recent image.
-* `owners` - (Optional) Limit search to specific image owners. Valid items are `system`, `self`, `others`, `marketplace`.
-
-## Attributes Reference
-
-The following attributes are exported:
-
-* `id` - ID of the image.
-* `architecture` - Platform type of the image system:i386 | x86_64.
-* `creation_time` - Time of creation.
-* `description` - Description of the image.
-* `image_owner_alias` - Alias of the image owner.
-* `os_name` - Display name of the OS.
-* `status` - Status of the image, with possible values: `UnAvailable`, `Available`, `Creating` or `CreateFailed`.
-* `size` - Size of the image.
-* `disk_device_mappings` - Description of the system with disks and snapshots under an image.
- * `device` - Device information of the created disk: such as /dev/xvdb.
- * `size` - Size of the created disk.
- * `snapshot_id` - Snapshot ID.
-* `product_code` - Product code of the image on the image market.
-* `is_subscribed` - Whether the user has subscribed to the terms of service for the image product corresponding to the ProductCode.
-* `image_version` - Version of the image.
-* `progress` - Progress of image creation, presented in percentages.
diff --git a/website/source/docs/providers/alicloud/d/instance_types.html.markdown b/website/source/docs/providers/alicloud/d/instance_types.html.markdown
deleted file mode 100644
index e95ccf6cd..000000000
--- a/website/source/docs/providers/alicloud/d/instance_types.html.markdown
+++ /dev/null
@@ -1,46 +0,0 @@
----
-layout: "alicloud"
-page_title: "Alicloud: alicloud_instance_types"
-sidebar_current: "docs-alicloud-datasource-instance-types"
-description: |-
- Provides a list of Ecs Instance Types for use in alicloud_instance resource.
----
-
-# alicloud_instance_types
-
-The Instance Types data source list the ecs_instance_types of Alicloud.
-
-## Example Usage
-
-```hcl
-# Declare the data source
-data "alicloud_instance_types" "1c2g" {
- cpu_core_count = 1
- memory_size = 2
-}
-
-# Create ecs instance with the first matched instance_type
-resource "alicloud_instance" "instance" {
- instance_type = "${data.alicloud_instance_types.1c2g.instance_types.0.id}"
-
- # ...
-}
-```
-
-## Argument Reference
-
-The following arguments are supported:
-
-* `cpu_core_count` - (Optional) Limit search to specific cpu core count.
-* `memory_size` - (Optional) Limit search to specific memory size.
-* `instance_type_family` - (Optional) Allows to filter list of Instance Types based on their
-family name, for example 'ecs.n1'.
-
-## Attributes Reference
-
-The following attributes are exported:
-
-* `id` - ID of the instance type.
-* `cpu_core_count` - Number of CPU cores.
-* `memory_size` - Size of memory, measured in GB.
-* `family` - The instance type family.
diff --git a/website/source/docs/providers/alicloud/d/regions.html.markdown b/website/source/docs/providers/alicloud/d/regions.html.markdown
deleted file mode 100644
index 2ab9f22db..000000000
--- a/website/source/docs/providers/alicloud/d/regions.html.markdown
+++ /dev/null
@@ -1,33 +0,0 @@
----
-layout: "alicloud"
-page_title: "Alicloud: alicloud_regions"
-sidebar_current: "docs-alicloud-datasource-regions"
-description: |-
- Provides a list of Availability Regions which can be used by an Alicloud account.
----
-
-# alicloud_regions
-
-The Regions data source allows access to the list of Alicloud Regions.
-
-## Example Usage
-
-```hcl
-data "alicloud_regions" "current" {
- current = true
-}
-```
-
-## Argument Reference
-
-The following arguments are supported:
-
-* `name` - (Optional) The full name of the region to select.
-* `current` - (Optional) Set to true to match only the region configured in the provider.
-
-## Attributes Reference
-
-The following attributes are exported:
-
-* `id` - ID of the region.
-* `local_name` - Name of the region in the local language.
diff --git a/website/source/docs/providers/alicloud/d/zones.html.markdown b/website/source/docs/providers/alicloud/d/zones.html.markdown
deleted file mode 100644
index 2b208bb0d..000000000
--- a/website/source/docs/providers/alicloud/d/zones.html.markdown
+++ /dev/null
@@ -1,47 +0,0 @@
----
-layout: "alicloud"
-page_title: "Alicloud: alicloud_zones"
-sidebar_current: "docs-alicloud-datasource-zones"
-description: |-
- Provides a list of Availability Zones which can be used by an Alicloud account.
----
-
-# alicloud_zones
-
-The Zones data source allows access to the list of Alicloud Zones which can be accessed by an Alicloud account within the region configured in the provider.
-
-## Example Usage
-
-```hcl
-# Declare the data source
-data "alicloud_zones" "default" {
- "available_instance_type" = "ecs.s2.large"
- "available_disk_category" = "cloud_ssd"
-}
-
-# Create ecs instance with the first matched zone
-
-resource "alicloud_instance" "instance" {
- availability_zone = "${data.alicloud_zones.default.zones.0.id}"
-
- # ...
-}
-```
-
-## Argument Reference
-
-The following arguments are supported:
-
-* `available_instance_type` - (Optional) Limit search to specific instance type.
-* `available_resource_creation` - (Optional) Limit search to specific resource type. The following values are allowed `Instance`, `Disk` and `VSwitch`.
-* `available_disk_category` - (Optional) Limit search to specific disk category. Can be either `cloud`, `ephemeral`, or `ephemeral_ssd`.
-
-## Attributes Reference
-
-The following attributes are exported:
-
-* `id` - ID of the zone.
-* `local_name` - Name of the zone in the local language.
-* `available_instance_types` - Instance types allowed.
-* `available_resource_creation` - Type of resource that can be created.
-* `available_disk_categories` - Set of supported disk categories.
diff --git a/website/source/docs/providers/alicloud/index.html.markdown b/website/source/docs/providers/alicloud/index.html.markdown
deleted file mode 100644
index 3da317e76..000000000
--- a/website/source/docs/providers/alicloud/index.html.markdown
+++ /dev/null
@@ -1,112 +0,0 @@
----
-layout: "alicloud"
-page_title: "Provider: alicloud"
-sidebar_current: "docs-alicloud-index"
-description: |-
- The Alicloud provider is used to interact with many resources supported by Alicloud. The provider needs to be configured with the proper credentials before it can be used.
----
-
-# Alicloud Provider
-
-The Alicloud provider is used to interact with the
-many resources supported by [Alicloud](https://www.aliyun.com). The provider needs to be configured
-with the proper credentials before it can be used.
-
-Use the navigation to the left to read about the available resources.
-
-## Example Usage
-
-```hcl
-# Configure the Alicloud Provider
-provider "alicloud" {
- access_key = "${var.access_key}"
- secret_key = "${var.secret_key}"
- region = "${var.region}"
-}
-
-# Create a web server
-resource "alicloud_instance" "web" {
- # cn-beijing
- provider = "alicloud"
- availability_zone = "cn-beijing-b"
- image_id = "ubuntu_140405_32_40G_cloudinit_20161115.vhd"
-
- instance_network_type = "Classic"
- internet_charge_type = "PayByBandwidth"
-
- instance_type = "ecs.n1.medium"
- io_optimized = "optimized"
- system_disk_category = "cloud_efficiency"
- security_groups = ["${alicloud_security_group.default.id}"]
- instance_name = "web"
-}
-
-# Create security group
-resource "alicloud_security_group" "default" {
- name = "default"
- provider = "alicloud"
- description = "default"
-}
-```
-
-## Authentication
-
-The Alicloud provider offers a flexible means of providing credentials for authentication.
-The following methods are supported, in this order, and explained below:
-
-- Static credentials
-- Environment variables
-
-### Static credentials ###
-
-Static credentials can be provided by adding an `access_key` `secret_key` and `region` in-line in the
-alicloud provider block:
-
-Usage:
-
-```hcl
-provider "alicloud" {
- access_key = "${var.access_key}"
- secret_key = "${var.secret_key}"
- region = "${var.region}"
-}
-```
-
-
-###Environment variables
-
-You can provide your credentials via `ALICLOUD_ACCESS_KEY` and `ALICLOUD_SECRET_KEY`,
-environment variables, representing your Alicloud Access Key and Secret Key, respectively.
-`ALICLOUD_REGION` is also used, if applicable:
-
-```hcl
-provider "alicloud" {}
-```
-
-Usage:
-
-```shell
-$ export ALICLOUD_ACCESS_KEY="anaccesskey"
-$ export ALICLOUD_SECRET_KEY="asecretkey"
-$ export ALICLOUD_REGION="cn-beijing"
-$ terraform plan
-```
-
-
-## Argument Reference
-
-The following arguments are supported:
-
-* `access_key` - (Optional) This is the Alicloud access key. It must be provided, but
- it can also be sourced from the `ALICLOUD_ACCESS_KEY` environment variable.
-
-* `secret_key` - (Optional) This is the Alicloud secret key. It must be provided, but
- it can also be sourced from the `ALICLOUD_SECRET_KEY` environment variable.
-
-* `region` - (Required) This is the Alicloud region. It must be provided, but
- it can also be sourced from the `ALICLOUD_REGION` environment variables.
-
-
-## Testing
-
-Credentials must be provided via the `ALICLOUD_ACCESS_KEY`, and `ALICLOUD_SECRET_KEY` environment variables in order to run acceptance tests.
diff --git a/website/source/docs/providers/alicloud/r/db_instance.html.markdown b/website/source/docs/providers/alicloud/r/db_instance.html.markdown
deleted file mode 100644
index 557690327..000000000
--- a/website/source/docs/providers/alicloud/r/db_instance.html.markdown
+++ /dev/null
@@ -1,105 +0,0 @@
----
-layout: "alicloud"
-page_title: "Alicloud: alicloud_db_instance"
-sidebar_current: "docs-alicloud-resource-db-instance"
-description: |-
- Provides an RDS instance resource.
----
-
-# alicloud\_db\_instance
-
-Provides an RDS instance resource. A DB instance is an isolated database
-environment in the cloud. A DB instance can contain multiple user-created
-databases.
-
-## Example Usage
-
-```
-resource "alicloud_db_instance" "default" {
- engine = "MySQL"
- engine_version = "5.6"
- db_instance_class = "rds.mysql.t1.small"
- db_instance_storage = "10"
- db_instance_net_type = "Intranet"
-}
-```
-
-## Argument Reference
-
-The following arguments are supported:
-
-* `engine` - (Required) Database type. Value options: MySQL, SQLServer, PostgreSQL, and PPAS.
-* `engine_version` - (Required) Database version. Value options:
- - 5.5/5.6/5.7 for MySQL
- - 2008r2/2012 for SQLServer
- - 9.4 for PostgreSQL
- - 9.3 for PPAS
-* `db_instance_class` - (Required) Instance type. For details, see [Instance type table](https://intl.aliyun.com/help/doc-detail/26312.htm?spm=a3c0i.o26228en.a3.2.bRUHF3).
-* `db_instance_storage` - (Required) User-defined storage space. Value range:
- - [5, 2000] for MySQL/PostgreSQL/PPAS HA dual node edition;
- - [20,1000] for MySQL 5.7 basic single node edition;
- - [10, 2000] for SQL Server 2008R2;
- - [20,2000] for SQL Server 2012 basic single node edition
- Increase progressively at a rate of 5 GB. The unit is GB. For details, see [Instance type table](https://intl.aliyun.com/help/doc-detail/26312.htm?spm=a3c0i.o26228en.a3.3.bRUHF3).
-* `instance_charge_type` - (Optional) Valid values are `Prepaid`, `Postpaid`, The default is `Postpaid`.
-* `period` - (Optional) The time that you have bought the resource, in month. Only valid when instance_charge_type is set as `PrePaid`. Value range [1, 12].
-* `zone_id` - (Optional) Selected zone to create database instance. You cannot set the ZoneId parameter if the MultiAZ parameter is set to true.
-* `multi_az` - (Optional) Specifies if the database instance is a multiple Availability Zone deployment.
-* `db_instance_net_type` - (Optional) Network connection type of an instance. Internet: public network; Intranet: private network
-* `allocate_public_connection` - (Optional) If set to true will applies for an Internet connection string of an instance.
-* `instance_network_type` - (Optional) VPC: VPC instance; Classic: classic instance. If no value is specified, a classic instance will be created by default.
-* `vswitch_id` - (Optional) The virtual switch ID to launch in VPC. If you want to create instances in VPC network, this parameter must be set.
-* `master_user_name` - (Optional) The master user name for the database instance. Operation account requiring a uniqueness check. It may consist of lower case letters, numbers and underlines, and must start with a letter and have no more than 16 characters.
-* `master_user_password` - (Optional) The master password for the database instance. Operation password. It may consist of letters, digits, or underlines, with a length of 6 to 32 characters.
-* `preferred_backup_period` - (Optional) Backup period. Values: Monday, Tuesday, Wednesday, Thursday, Friday, Saturday, and Sunday.
-* `preferred_backup_time` - (Optional) Backup time, in the format ofHH:mmZ- HH:mm Z.
-* `backup_retention_period` - (Optional) Retention days of the backup (7 to 730 days). The default value is 7 days.
-* `security_ips` - (Optional) List of IP addresses under the IP address white list array. The list contains up to 1,000 IP addresses, separated by commas. Supported formats include 0.0.0.0/0, 10.23.12.24 (IP), and 10.23.12.24/24 (Classless Inter-Domain Routing (CIDR) mode. /24 represents the length of the prefix in an IP address. The range of the prefix length is [1,32]).
-* `db_mappings` - (Optional) Database mappings to attach to db instance. See [Block database](#block-database) below for details.
-
-
-## Block database
-
-The database mapping supports the following:
-
-* `db_name` - (Required) Name of the database requiring a uniqueness check. It may consist of lower case letters, numbers and underlines, and must start with a letter and have no more than 64 characters.
-* `character_set_name` - (Required) Character set. The value range is limited to the following:
- - MySQL type:
- + utf8
- + gbk
- + latin1
- + utf8mb4 (included in versions 5.5 and 5.6).
- - SQLServer type:
- + Chinese_PRC_CI_AS
- + Chinese_PRC_CS_AS
- + SQL_Latin1_General_CP1_CI_AS
- + SQL_Latin1_General_CP1_CS_AS
- + Chinese_PRC_BIN
-* `db_description` - (Optional) Database description, which cannot exceed 256 characters. NOTE: It cannot begin with https://.
-
-
-~> **NOTE:** We neither support modify any of database attribute, nor insert/remove item at the same time.
-We recommend split to two separate operations.
-
-## Attributes Reference
-
-The following attributes are exported:
-
-* `id` - The RDS instance ID.
-* `instance_charge_type` - The instance charge type.
-* `period` - The time that you have bought the resource.
-* `engine` - Database type.
-* `engine_version` - The database engine version.
-* `db_instance_class` - The RDS instance class.
-* `db_instance_storage` - The amount of allocated storage.
-* `port` - The database port.
-* `zone_id` - The zone ID of the DB instance.
-* `db_instance_net_type` - Network connection type of an instance, `Internet` or `Intranet`.
-* `instance_network_type` - The instance network type and it has two values: `vpc` and `classic`.
-* `db_mappings` - Database mappings attached to db instance.
-* `preferred_backup_period` - Backup period.
-* `preferred_backup_time` - Backup time.
-* `backup_retention_period` - Retention days of the backup.
-* `security_ips` - Security ips of instance whitelist.
-* `connections` - Views all the connection information of a specified instance.
-
diff --git a/website/source/docs/providers/alicloud/r/disk.html.markdown b/website/source/docs/providers/alicloud/r/disk.html.markdown
deleted file mode 100644
index 82814b732..000000000
--- a/website/source/docs/providers/alicloud/r/disk.html.markdown
+++ /dev/null
@@ -1,56 +0,0 @@
----
-layout: "alicloud"
-page_title: "Alicloud: alicloud_disk"
-sidebar_current: "docs-alicloud-resource-disk"
-description: |-
- Provides a ECS Disk resource.
----
-
-# alicloud\_disk
-
-Provides a ECS disk resource.
-
-~> **NOTE:** One of `size` or `snapshot_id` is required when specifying an ECS disk. If all of them be specified, `size` must more than the size of snapshot which `snapshot_id` represents. Currently, `alicloud_disk` doesn't resize disk.
-
-## Example Usage
-
-```
-# Create a new ECS disk.
-resource "alicloud_disk" "ecs_disk" {
- # cn-beijing
- availability_zone = "cn-beijing-b"
- name = "New-disk"
- description = "Hello ecs disk."
- category = "cloud_efficiency"
- size = "30"
-
- tags {
- Name = "TerraformTest"
- }
-}
-```
-## Argument Reference
-
-The following arguments are supported:
-
-* `availability_zone` - (Required, Forces new resource) The Zone to create the disk in.
-* `name` - (Optional) Name of the ECS disk. This name can have a string of 2 to 128 characters, must contain only alphanumeric characters or hyphens, such as "-",".","_", and must not begin or end with a hyphen, and must not begin with http:// or https://. Default value is null.
-* `description` - (Optional) Description of the disk. This description can have a string of 2 to 256 characters, It cannot begin with http:// or https://. Default value is null.
-* `category` - (Optional, Forces new resource) Category of the disk. Valid values are `cloud`, `cloud_efficiency` and `cloud_ssd`. Default is `cloud`.
-* `size` - (Required) The size of the disk in GiBs, and its value depends on `Category`. `cloud` disk value range: 5GB ~ 2000GB and other category disk value range: 20 ~ 32768.
-* `snapshot_id` - (Optional) A snapshot to base the disk off of. If it is specified, `size` will be invalid and the disk size is equals to the snapshot size.
-* `tags` - (Optional) A mapping of tags to assign to the resource.
-
-## Attributes Reference
-
-The following attributes are exported:
-
-* `id` - The disk ID.
-* `availability_zone` - The Zone to create the disk in.
-* `name` - The disk name.
-* `description` - The disk description.
-* `status` - The disk status.
-* `category` - The disk category.
-* `size` - The disk size.
-* `snapshot_id` - The disk snapshot ID.
-* `tags` - The disk tags.
\ No newline at end of file
diff --git a/website/source/docs/providers/alicloud/r/disk_attachment.html.markdown b/website/source/docs/providers/alicloud/r/disk_attachment.html.markdown
deleted file mode 100644
index d39e85a4c..000000000
--- a/website/source/docs/providers/alicloud/r/disk_attachment.html.markdown
+++ /dev/null
@@ -1,68 +0,0 @@
----
-layout: "alicloud"
-page_title: "Alicloud: alicloud_disk_attachment"
-sidebar_current: "docs-alicloud-resource-disk-attachment"
-description: |-
- Provides a ECS Disk Attachment resource.
----
-
-# alicloud\_disk\_attachment
-
-Provides an Alicloud ECS Disk Attachment as a resource, to attach and detach disks from ECS Instances.
-
-## Example Usage
-
-Basic usage
-
-```
-# Create a new ECS disk-attachment and use it attach one disk to a new instance.
-
-resource "alicloud_security_group" "ecs_sg" {
- name = "terraform-test-group"
- description = "New security group"
-}
-
-resource "alicloud_disk" "ecs_disk" {
- availability_zone = "cn-beijing-a"
- size = "50"
-
- tags {
- Name = "TerraformTest-disk"
- }
-}
-
-resource "alicloud_instance" "ecs_instance" {
- image_id = "ubuntu_140405_64_40G_cloudinit_20161115.vhd"
- instance_type = "ecs.s1.small"
- availability_zone = "cn-beijing-a"
- security_groups = ["${alicloud_security_group.ecs_sg.id}"]
- instance_name = "Hello"
- instance_network_type = "classic"
- internet_charge_type = "PayByBandwidth"
-
- tags {
- Name = "TerraformTest-instance"
- }
-}
-
-resource "alicloud_disk_attachment" "ecs_disk_att" {
- disk_id = "${alicloud_disk.ecs_disk.id}"
- instance_id = "${alicloud_instance.ecs_instance.id}"
- device_name = "/dev/xvdb"
-}
-```
-## Argument Reference
-
-The following arguments are supported:
-
-* `instance_id` - (Required, Forces new resource) ID of the Instance to attach to.
-* `disk_id` - (Required, Forces new resource) ID of the Disk to be attached.
-* `device_name` - (Required, Forces new resource) The device name to expose to the instance (for example, /dev/xvdb).
-
-## Attributes Reference
-
-The following attributes are exported:
-
-* `instance_id` - ID of the Instance.
-* `disk_id` - ID of the Disk.
-* `device_name` - The device name exposed to the instance.
\ No newline at end of file
diff --git a/website/source/docs/providers/alicloud/r/eip.html.markdown b/website/source/docs/providers/alicloud/r/eip.html.markdown
deleted file mode 100644
index ddb1b7689..000000000
--- a/website/source/docs/providers/alicloud/r/eip.html.markdown
+++ /dev/null
@@ -1,38 +0,0 @@
----
-layout: "alicloud"
-page_title: "Alicloud: alicloud_eip"
-sidebar_current: "docs-alicloud-resource-eip"
-description: |-
- Provides a ECS EIP resource.
----
-
-# alicloud\_eip
-
-Provides a ECS EIP resource.
-
-## Example Usage
-
-```
-# Create a new EIP.
-resource "alicloud_eip" "example" {
- bandwidth = "10"
- internet_charge_type = "PayByBandwidth"
-}
-```
-## Argument Reference
-
-The following arguments are supported:
-
-* `bandwidth` - (Optional) Maximum bandwidth to the elastic public network, measured in Mbps (Mega bit per second). If this value is not specified, then automatically sets it to 5 Mbps.
-* `internet_charge_type` - (Optional, Forces new resource) Internet charge type of the EIP, Valid values are `PayByBandwidth`, `PayByTraffic`. Default is `PayByBandwidth`.
-
-## Attributes Reference
-
-The following attributes are exported:
-
-* `id` - The EIP ID.
-* `bandwidth` - The elastic public network bandwidth.
-* `internet_charge_type` - The EIP internet charge type.
-* `status` - The EIP current status.
-* `ip_address` - The elastic ip address
-* `instance` - The ID of the instance which is associated with the EIP.
diff --git a/website/source/docs/providers/alicloud/r/eip_association.html.markdown b/website/source/docs/providers/alicloud/r/eip_association.html.markdown
deleted file mode 100644
index 6c0d7059a..000000000
--- a/website/source/docs/providers/alicloud/r/eip_association.html.markdown
+++ /dev/null
@@ -1,76 +0,0 @@
----
-layout: "alicloud"
-page_title: "Alicloud: alicloud_eip_association"
-sidebar_current: "docs-alicloud-resource-eip-association"
-description: |-
- Provides a ECS EIP Association resource.
----
-
-# alicloud\_eip\_association
-
-Provides an Alicloud EIP Association resource, to associate and disassociate Elastic IPs from ECS Instances.
-
-~> **NOTE:** `alicloud_eip_association` is useful in scenarios where EIPs are either
- pre-existing or distributed to customers or users and therefore cannot be changed.
- In addition, it only supports ECS-VPC.
-
-## Example Usage
-
-```
-# Create a new EIP association and use it to associate a EIP form a instance.
-
-resource "alicloud_vpc" "vpc" {
- cidr_block = "10.1.0.0/21"
-}
-
-resource "alicloud_vswitch" "vsw" {
- vpc_id = "${alicloud_vpc.vpc.id}"
- cidr_block = "10.1.1.0/24"
- availability_zone = "cn-beijing-a"
-
- depends_on = [
- "alicloud_vpc.vpc",
- ]
-}
-
-resource "alicloud_instance" "ecs_instance" {
- image_id = "ubuntu_140405_64_40G_cloudinit_20161115.vhd"
- instance_type = "ecs.s1.small"
- availability_zone = "cn-beijing-a"
- security_groups = ["${alicloud_security_group.group.id}"]
- vswitch_id = "${alicloud_vswitch.vsw.id}"
- instance_name = "hello"
- instance_network_type = "vpc"
-
- tags {
- Name = "TerraformTest-instance"
- }
-}
-
-resource "alicloud_eip" "eip" {}
-
-resource "alicloud_eip_association" "eip_asso" {
- allocation_id = "${alicloud_eip.eip.id}"
- instance_id = "${alicloud_instance.ecs_instance.id}"
-}
-
-resource "alicloud_security_group" "group" {
- name = "terraform-test-group"
- description = "New security group"
- vpc_id = "${alicloud_vpc.vpc.id}"
-}
-```
-
-## Argument Reference
-
-The following arguments are supported:
-
-* `allocation_id` - (Optional, Forces new resource) The allocation EIP ID.
-* `instance_id` - (Optional, Forces new resource) The ID of the instance.
-
-## Attributes Reference
-
-The following attributes are exported:
-
-* `allocation_id` - As above.
-* `instance_id` - As above.
\ No newline at end of file
diff --git a/website/source/docs/providers/alicloud/r/ess_scaling_configuration.html.markdown b/website/source/docs/providers/alicloud/r/ess_scaling_configuration.html.markdown
deleted file mode 100644
index 003b7c988..000000000
--- a/website/source/docs/providers/alicloud/r/ess_scaling_configuration.html.markdown
+++ /dev/null
@@ -1,84 +0,0 @@
----
-layout: "alicloud"
-page_title: "Alicloud: alicloud_ess_scaling_configuration"
-sidebar_current: "docs-alicloud-resource-ess-scaling-configuration"
-description: |-
- Provides a ESS scaling configuration resource.
----
-
-# alicloud\_ess\_scaling\_configuration
-
-Provides a ESS scaling configuration resource.
-
-## Example Usage
-
-```
-resource "alicloud_security_group" "classic" {
- # Other parameters...
-}
-resource "alicloud_ess_scaling_group" "scaling" {
- min_size = 1
- max_size = 2
- removal_policies = ["OldestInstance", "NewestInstance"]
-}
-
-resource "alicloud_ess_scaling_configuration" "config" {
- scaling_group_id = "${alicloud_ess_scaling_group.scaling.id}"
-
- image_id = "ubuntu_140405_64_40G_cloudinit_20161115.vhd"
- instance_type = "ecs.s2.large"
- security_group_id = "${alicloud_security_group.classic.id}"
-}
-
-```
-
-## Argument Reference
-
-The following arguments are supported:
-
-* `scaling_group_id` - (Required) ID of the scaling group of a scaling configuration.
-* `image_id` - (Required) ID of an image file, indicating the image resource selected when an instance is enabled.
-* `instance_type` - (Required) Resource type of an ECS instance.
-* `io_optimized` - (Required) Valid values are `none`, `optimized`, If `optimized`, the launched ECS instance will be I/O optimized.
-* `security_group_id` - (Required) ID of the security group to which a newly created instance belongs.
-* `scaling_configuration_name` - (Optional) Name shown for the scheduled task. If this parameter value is not specified, the default value is ScalingConfigurationId.
-* `internet_charge_type` - (Optional) Network billing type, Values: PayByBandwidth or PayByTraffic. If this parameter value is not specified, the default value is PayByBandwidth.
-* `internet_max_bandwidth_in` - (Optional) Maximum incoming bandwidth from the public network, measured in Mbps (Mega bit per second). The value range is [1,200].
-* `internet_max_bandwidth_out` - (Optional) Maximum outgoing bandwidth from the public network, measured in Mbps (Mega bit per second). The value range for PayByBandwidth is [1,100].
-* `system_disk_category` - (Optional) Category of the system disk. The parameter value options are cloud and ephemeral.
-* `data_disk` - (Optional) DataDisk mappings to attach to ecs instance. See [Block datadisk](#block-datadisk) below for details.
-* `instance_ids` - (Optional) ID of the ECS instance to be attached to the scaling group after it is enabled. You can input up to 20 IDs.
-
-
-## Block datadisk
-
-The datadisk mapping supports the following:
-
-* `size` - (Optional) Size of data disk, in GB. The value ranges from 5 to 2,000 for a cloud disk and from 5 to 1,024 for an ephemeral disk. A maximum of four values can be entered.
-* `category` - (Optional) Category of data disk. The parameter value options are cloud and ephemeral.
-* `snapshot_id` - (Optional) Snapshot used for creating the data disk. If this parameter is specified, the size parameter is neglected, and the size of the created disk is the size of the snapshot.
-* `device` - (Optional) Attaching point of the data disk. If this parameter is empty, the ECS automatically assigns the attaching point when an ECS is created. The parameter value ranges from /dev/xvdb to /dev/xvdz. Restrictions on attaching ECS instances:
- - The attached ECS instance and the scaling group must be in the same region.
- - The attached ECS instance and the instance with active scaling configurations must be of the same type.
- - The attached ECS instance must in the running state.
- - The attached ECS instance has not been attached to other scaling groups.
- - The attached ECS instance supports Subscription and Pay-As-You-Go payment methods.
- - If the VswitchID is specified for a scaling group, you cannot attach Classic ECS instances or ECS instances on other VPCs to the scaling group.
- - If the VswitchID is not specified for the scaling group, ECS instances of the VPC type cannot be attached to the scaling group
-* `active` - (Optional) If active current scaling configuration in the scaling group.
-* `enable` - (Optional) Enables the specified scaling group.
- - After the scaling group is successfully enabled (the group is active), the ECS instances specified by the interface are attached to the group.
- - If the current number of ECS instances in the scaling group is still smaller than MinSize after the ECS instances specified by the interface are attached, the Auto Scaling service automatically creates ECS instances in Pay-As-You-Go mode to make odds even. For example, a scaling group is created with MinSize = 5. Two existing ECS instances are specified by the InstanceId.N parameter when the scaling group is enabled. Three additional ECS instances are automatically created after the two ECS instances are attached by the Auto Scaling service to the scaling group.
-
-## Attributes Reference
-
-The following attributes are exported:
-
-* `id` - The scaling configuration ID.
-* `active` - Wether the current scaling configuration is actived.
-* `image_id` - The ecs instance Image id.
-* `instance_type` - The ecs instance type.
-* `io_optimized` - The ecs instance whether I/O optimized.
-* `security_group_id` - ID of the security group to which a newly created instance belongs.
-* `scaling_configuration_name` - Name of scaling configuration.
-* `internet_charge_type` - Internet charge type of ecs instance.
\ No newline at end of file
diff --git a/website/source/docs/providers/alicloud/r/ess_scaling_group.html.markdown b/website/source/docs/providers/alicloud/r/ess_scaling_group.html.markdown
deleted file mode 100644
index f039c5f19..000000000
--- a/website/source/docs/providers/alicloud/r/ess_scaling_group.html.markdown
+++ /dev/null
@@ -1,57 +0,0 @@
----
-layout: "alicloud"
-page_title: "Alicloud: alicloud_ess_scaling_group"
-sidebar_current: "docs-alicloud-resource-ess-scaling-group"
-description: |-
- Provides a ESS scaling group resource.
----
-
-# alicloud\_ess\_scaling\_group
-
-Provides a ESS scaling group resource.
-
-## Example Usage
-
-```
-resource "alicloud_ess_scaling_group" "scaling" {
- min_size = 1
- max_size = 2
- removal_policies = ["OldestInstance", "NewestInstance"]
-}
-```
-
-## Argument Reference
-
-The following arguments are supported:
-
-* `min_size` - (Required) Minimum number of ECS instances in the scaling group. Value range: [0, 100].
-* `max_size` - (Required) Maximum number of ECS instances in the scaling group. Value range: [0, 100].
-* `scaling_group_name` - (Optional) Name shown for the scaling group, which must contain 2-40 characters (English or Chinese). If this parameter is not specified, the default value is ScalingGroupId.
-* `default_cooldown` - (Optional) Default cool-down time (in seconds) of the scaling group. Value range: [0, 86400]. The default value is 300s.
-* `vswitch_id` - (Optional) The virtual switch ID which the ecs instance to be create in.
-* `removal_policies` - (Optional) RemovalPolicy is used to select the ECS instances you want to remove from the scaling group when multiple candidates for removal exist. Optional values:
- - OldestInstance: removes the first ECS instance attached to the scaling group.
- - NewestInstance: removes the first ECS instance attached to the scaling group.
- - OldestScalingConfiguration: removes the ECS instance with the oldest scaling configuration.
- - Default values: OldestScalingConfiguration and OldestInstance. You can enter up to two removal policies.
-* `db_instance_ids` - (Optional) If an RDS instance is specified in the scaling group, the scaling group automatically attaches the Intranet IP addresses of its ECS instances to the RDS access whitelist.
- - The specified RDS instance must be in running status.
- - The specified RDS instance’s whitelist must have room for more IP addresses.
-* `loadbalancer_ids` - (Optional) If a Server Load Balancer instance is specified in the scaling group, the scaling group automatically attaches its ECS instances to the Server Load Balancer instance.
- - The Server Load Balancer instance must be enabled.
- - Health check must be enabled for all listener ports configured for the Server Load Balancer instance; otherwise, creation fails.
- - The Server Load Balancer instance attached with VPC-type ECS instances cannot be attached to the scaling group.
- - The default weight of an ECS instance attached to the Server Load Balancer instance is 50.
-
-## Attributes Reference
-
-The following attributes are exported:
-
-* `id` - The scaling group ID.
-* `min_size` - The minimum number of ECS instances.
-* `max_size` - The maximum number of ECS instances.
-* `scaling_group_name` - The name of the scaling group.
-* `default_cooldown` - The default cool-down of the scaling group.
-* `removal_policies` - The removal policy used to select the ECS instance to remove from the scaling group.
-* `db_instance_ids` - The db instance id which the ECS instance attached to.
-* `loadbalancer_ids` - The slb instance id which the ECS instance attached to.
\ No newline at end of file
diff --git a/website/source/docs/providers/alicloud/r/ess_scaling_rule.html.markdown b/website/source/docs/providers/alicloud/r/ess_scaling_rule.html.markdown
deleted file mode 100644
index ec24b5067..000000000
--- a/website/source/docs/providers/alicloud/r/ess_scaling_rule.html.markdown
+++ /dev/null
@@ -1,59 +0,0 @@
----
-layout: "alicloud"
-page_title: "Alicloud: alicloud_ess_scaling_rule"
-sidebar_current: "docs-alicloud-resource-ess-scaling-rule"
-description: |-
- Provides a ESS scaling rule resource.
----
-
-# alicloud\_ess\_scaling\_rule
-
-Provides a ESS scaling rule resource.
-
-## Example Usage
-
-```
-resource "alicloud_ess_scaling_group" "scaling" {
- # Other parameters...
-}
-
-resource "alicloud_ess_scaling_configuration" "config" {
- # Other parameters...
-}
-
-resource "alicloud_ess_scaling_rule" "rule" {
- scaling_group_id = "${alicloud_ess_scaling_group.scaling.id}"
- adjustment_type = "TotalCapacity"
- adjustment_value = 2
- cooldown = 60
-}
-```
-
-## Argument Reference
-
-The following arguments are supported:
-
-* `scaling_group_id` - (Required) ID of the scaling group of a scaling rule.
-* `adjustment_type` - (Required) Adjustment mode of a scaling rule. Optional values:
- - QuantityChangeInCapacity: It is used to increase or decrease a specified number of ECS instances.
- - PercentChangeInCapacity: It is used to increase or decrease a specified proportion of ECS instances.
- - TotalCapacity: It is used to adjust the quantity of ECS instances in the current scaling group to a specified value.
-* `adjustment_value` - (Required) Adjusted value of a scaling rule. Value range:
- - QuantityChangeInCapacity:(0, 100] U (-100, 0]
- - PercentChangeInCapacity:[0, 10000] U [-10000, 0]
- - TotalCapacity:[0, 100]
-* `scaling_rule_name` - (Optional) Name shown for the scaling rule, which is a string containing 2 to 40 English or Chinese characters.
-* `cooldown` - (Optional) Cool-down time of a scaling rule. Value range: [0, 86,400], in seconds. The default value is empty.
-
-
-## Attributes Reference
-
-The following attributes are exported:
-
-* `id` - The scaling rule ID.
-* `scaling_group_id` - The id of scaling group.
-* `ari` - Unique identifier of a scaling rule.
-* `adjustment_type` - Adjustment mode of a scaling rule.
-* `adjustment_value` - Adjustment value of a scaling rule.
-* `scaling_rule_name` - Name of a scaling rule.
-* `cooldown` - Cool-down time of a scaling rule.
\ No newline at end of file
diff --git a/website/source/docs/providers/alicloud/r/ess_schedule.html.markdown b/website/source/docs/providers/alicloud/r/ess_schedule.html.markdown
deleted file mode 100644
index abe2a298e..000000000
--- a/website/source/docs/providers/alicloud/r/ess_schedule.html.markdown
+++ /dev/null
@@ -1,65 +0,0 @@
----
-layout: "alicloud"
-page_title: "Alicloud: alicloud_ess_schedule"
-sidebar_current: "docs-alicloud-resource-ess-schedule"
-description: |-
- Provides a ESS schedule resource.
----
-
-# alicloud\_ess\_schedule
-
-Provides a ESS schedule resource.
-
-## Example Usage
-
-```
-resource "alicloud_ess_scaling_group" "scaling" {
- # Other parameters...
-}
-
-resource "alicloud_ess_scaling_configuration" "config" {
- # Other parameters...
-}
-
-resource "alicloud_ess_scaling_rule" "rule" {
- # Other parameters...
-}
-
-resource "alicloud_ess_schedule" "schedule" {
- scheduled_action = "${alicloud_ess_scaling_rule.rule.ari}"
- launch_time = "2017-04-29T07:30Z"
- scheduled_task_name = "sg-schedule"
-}
-```
-
-## Argument Reference
-
-The following arguments are supported:
-
-* `scheduled_action` - (Required) Operations performed when the scheduled task is triggered. Fill in the unique identifier of the scaling rule.
-* `launch_time` - (Required) Operations performed when the scheduled task is triggered. Fill in the unique identifier of the scaling rule.
-* `scheduled_task_name` - (Optional) Display name of the scheduled task, which must be 2-40 characters (English or Chinese) long.
-* `description` - (Optional) Description of the scheduled task, which is 2-200 characters (English or Chinese) long.
-* `launch_expiration_time` - (Optional) Time period within which the failed scheduled task is retried. The default value is 600s. Value range: [0, 21600]
-* `recurrence_type` - (Optional) Type of the scheduled task to be repeated. RecurrenceType, RecurrenceValue and RecurrenceEndTime must be specified. Optional values:
- - Daily: Recurrence interval by day for a scheduled task.
- - Weekly: Recurrence interval by week for a scheduled task.
- - Monthly: Recurrence interval by month for a scheduled task.
-* `recurrence_value` - (Optional) Value of the scheduled task to be repeated. RecurrenceType, RecurrenceValue and RecurrenceEndTime must be specified.
- - Daily: Only one value in the range [1,31] can be filled.
- - Weekly: Multiple values can be filled. The values of Sunday to Saturday are 0 to 6 in sequence. Multiple values shall be separated by a comma “,”.
- - Monthly: In the format of A-B. The value range of A and B is 1 to 31, and the B value must be greater than the A value.
-* `recurrence_end_time` - (Optional) End time of the scheduled task to be repeated. The date format follows the ISO8601 standard and uses UTC time. It is in the format of YYYY-MM-DDThh:mmZ. A time point 90 days after creation or modification cannot be entered. RecurrenceType, RecurrenceValue and RecurrenceEndTime must be specified.
-* `task_enabled` - (Optional) Whether to enable the scheduled task. The default value is true.
-
-
-## Attributes Reference
-
-The following attributes are exported:
-
-* `id` - The schedule task ID.
-* `scheduled_action` - The action of schedule task.
-* `launch_time` - The time of schedule task be triggered.
-* `scheduled_task_name` - The name of schedule task.
-* `description` - The description of schedule task.
-* `task_enabled` - Wether the task is enabled.
\ No newline at end of file
diff --git a/website/source/docs/providers/alicloud/r/forward.html.markdown b/website/source/docs/providers/alicloud/r/forward.html.markdown
deleted file mode 100644
index 6024921a6..000000000
--- a/website/source/docs/providers/alicloud/r/forward.html.markdown
+++ /dev/null
@@ -1,68 +0,0 @@
----
-layout: "alicloud"
-page_title: "Alicloud: alicloud_forward_entry"
-sidebar_current: "docs-alicloud-resource-vpc"
-description: |-
- Provides a Alicloud forward resource.
----
-
-# alicloud\_forward
-
-Provides a forward resource.
-
-## Example Usage
-
-Basic Usage
-
-```
-resource "alicloud_vpc" "foo" {
- ...
-}
-
-resource "alicloud_vswitch" "foo" {
- ...
-}
-
-resource "alicloud_nat_gateway" "foo" {
- vpc_id = "${alicloud_vpc.foo.id}"
- spec = "Small"
- name = "test_foo"
-
- bandwidth_packages = [
- {
- ip_count = 2
- bandwidth = 5
- zone = ""
- },
- {
- ip_count = 1
- bandwidth = 6
- zone = "cn-beijing-b"
- }
- ]
-
- depends_on = [
- "alicloud_vswitch.foo",
- ]
-}
-
-resource "alicloud_forward_entry" "foo" {
- forward_table_id = "${alicloud_nat_gateway.foo.forward_table_ids}"
- external_ip = "${alicloud_nat_gateway.foo.bandwidth_packages.0.public_ip_addresses}"
- external_port = "80"
- ip_protocol = "tcp"
- internal_ip = "172.16.0.3"
- internal_port = "8080"
-}
-
-```
-## Argument Reference
-
-The following arguments are supported:
-
-* `forward_table_id` - (Required, Forces new resource) The value can get from `alicloud_nat_gateway` Attributes "forward_table_ids".
-* `external_ip` - (Required, Forces new resource) The external ip address, the ip must along bandwidth package public ip which `alicloud_nat_gateway` argument `bandwidth_packages`.
-* `external_port` - (Required) The external port, valid value is 1~65535|any.
-* `ip_protocol` - (Required) The ip protocal, valid value is tcp|udp|any.
-* `internal_ip` - (Required) The internal ip, must a private ip.
-* `internal_port` - (Required) The internal port, valid value is 1~65535|any.
\ No newline at end of file
diff --git a/website/source/docs/providers/alicloud/r/instance.html.markdown b/website/source/docs/providers/alicloud/r/instance.html.markdown
deleted file mode 100644
index a6fe7587c..000000000
--- a/website/source/docs/providers/alicloud/r/instance.html.markdown
+++ /dev/null
@@ -1,97 +0,0 @@
----
-layout: "alicloud"
-page_title: "Alicloud: alicloud_instance"
-sidebar_current: "docs-alicloud-resource-instance"
-description: |-
- Provides a ECS instance resource.
----
-
-# alicloud\_instance
-
-Provides a ECS instance resource.
-
-## Example Usage
-
-```
-# Create a new ECS instance for classic
-resource "alicloud_security_group" "classic" {
- name = "tf_test_foo"
- description = "foo"
-}
-
-resource "alicloud_instance" "classic" {
- # cn-beijing
- availability_zone = "cn-beijing-b"
- security_groups = ["${alicloud_security_group.classic.*.id}"]
-
- allocate_public_ip = true
-
- # series II
- instance_type = "ecs.n1.medium"
- io_optimized = "optimized"
- system_disk_category = "cloud_efficiency"
- image_id = "ubuntu_140405_64_40G_cloudinit_20161115.vhd"
- instance_name = "test_foo"
-}
-
-# Create a new ECS instance for VPC
-resource "alicloud_vpc" "default" {
- # Other parameters...
-}
-
-resource "alicloud_vswitch" "default" {
- # Other parameters...
-}
-
-resource "alicloud_slb" "vpc" {
- name = "test-slb-tf"
- vpc_id = "${alicloud_vpc.default.id}"
- vswitch_id = "${alicloud_vswitch.default.id}"
-}
-```
-
-## Argument Reference
-
-The following arguments are supported:
-
-* `image_id` - (Required) The Image to use for the instance. ECS instance's image can be replaced via changing 'image_id'.
-* `instance_type` - (Required) The type of instance to start.
-* `io_optimized` - (Required) Valid values are `none`, `optimized`, If `optimized`, the launched ECS instance will be I/O optimized.
-* `security_groups` - (Required) A list of security group ids to associate with.
-* `availability_zone` - (Optional) The Zone to start the instance in.
-* `instance_name` - (Optional) The name of the ECS. This instance_name can have a string of 2 to 128 characters, must contain only alphanumeric characters or hyphens, such as "-",".","_", and must not begin or end with a hyphen, and must not begin with http:// or https://. If not specified,
-Terraform will autogenerate a default name is `ECS-Instance`.
-* `allocate_public_ip` - (Optional) Associate a public ip address with an instance in a VPC or Classic. Boolean value, Default is false.
-* `system_disk_category` - (Optional) Valid values are `cloud`, `cloud_efficiency`, `cloud_ssd`, For I/O optimized instance type, `cloud_ssd` and `cloud_efficiency` disks are supported. For non I/O Optimized instance type, `cloud` disk are supported.
-* `system_disk_size` - (Optional) Size of the system disk, value range: 40GB ~ 500GB. Default is 40GB. ECS instance's system disk can be reset when replacing system disk.
-* `description` - (Optional) Description of the instance, This description can have a string of 2 to 256 characters, It cannot begin with http:// or https://. Default value is null.
-* `internet_charge_type` - (Optional) Internet charge type of the instance, Valid values are `PayByBandwidth`, `PayByTraffic`. Default is `PayByBandwidth`.
-* `internet_max_bandwidth_in` - (Optional) Maximum incoming bandwidth from the public network, measured in Mbps (Mega bit per second). Value range: [1, 200]. If this value is not specified, then automatically sets it to 200 Mbps.
-* `internet_max_bandwidth_out` - (Optional) Maximum outgoing bandwidth to the public network, measured in Mbps (Mega bit per second). Value range: [0, 100], If this value is not specified, then automatically sets it to 0 Mbps.
-* `host_name` - (Optional) Host name of the ECS, which is a string of at least two characters. “hostname” cannot start or end with “.” or “-“. In addition, two or more consecutive “.” or “-“ symbols are not allowed. On Windows, the host name can contain a maximum of 15 characters, which can be a combination of uppercase/lowercase letters, numerals, and “-“. The host name cannot contain dots (“.”) or contain only numeric characters.
-On other OSs such as Linux, the host name can contain a maximum of 30 characters, which can be segments separated by dots (“.”), where each segment can contain uppercase/lowercase letters, numerals, or “_“.
-* `password` - (Optional) Password to an instance is a string of 8 to 30 characters. It must contain uppercase/lowercase letters and numerals, but cannot contain special symbols. In order to take effect new password, the instance will be restarted after modifying the password.
-* `vswitch_id` - (Optional) The virtual switch ID to launch in VPC. If you want to create instances in VPC network, this parameter must be set.
-* `instance_charge_type` - (Optional) Valid values are `PrePaid`, `PostPaid`, The default is `PostPaid`.
-* `period` - (Optional) The time that you have bought the resource, in month. Only valid when instance_charge_type is set as `PrePaid`. Value range [1, 12].
-* `tags` - (Optional) A mapping of tags to assign to the resource.
-* `user_data` - (Optional) The user data to provide when launching the instance.
-
-## Attributes Reference
-
-The following attributes are exported:
-
-* `id` - The instance ID.
-* `availability_zone` - The Zone to start the instance in.
-* `instance_name` - The instance name.
-* `host_name` - The instance host name.
-* `description` - The instance description.
-* `status` - The instance status.
-* `image_id` - The instance Image Id.
-* `instance_type` - The instance type.
-* `instance_network_type` - The instance network type and it has two values: `vpc` and `classic`.
-* `io_optimized` - The instance whether I/O optimized.
-* `private_ip` - The instance private ip.
-* `public_ip` - The instance public ip.
-* `vswitch_id` - If the instance created in VPC, then this value is virtual switch ID.
-* `tags` - The instance tags, use jsonencode(item) to display the value.
diff --git a/website/source/docs/providers/alicloud/r/nat_gateway.html.markdown b/website/source/docs/providers/alicloud/r/nat_gateway.html.markdown
deleted file mode 100644
index 33ceb6e9b..000000000
--- a/website/source/docs/providers/alicloud/r/nat_gateway.html.markdown
+++ /dev/null
@@ -1,85 +0,0 @@
----
-layout: "alicloud"
-page_title: "Alicloud: alicloud_nat_gateway"
-sidebar_current: "docs-alicloud-resource-nat-gateway"
-description: |-
- Provides a resource to create a VPC NAT Gateway.
----
-
-# alicloud\_nat\_gateway
-
-Provides a resource to create a VPC NAT Gateway.
-
-~> **NOTE:** alicloud_nat_gateway must depends on alicloud_vswitch.
-
-
-## Example Usage
-
-Basic usage
-
-```
-resource "alicloud_vpc" "vpc" {
- name = "tf_test_foo"
- cidr_block = "172.16.0.0/12"
-}
-
-resource "alicloud_vswitch" "vsw" {
- vpc_id = "${alicloud_vpc.vpc.id}"
- cidr_block = "172.16.0.0/21"
- availability_zone = "cn-beijing-b"
-}
-
-resource "alicloud_nat_gateway" "nat_gateway" {
- vpc_id = "${alicloud_vpc.vpc.id}"
- spec = "Small"
- name = "test_foo"
-
- bandwidth_packages = [{
- ip_count = 1
- bandwidth = 5
- zone = "cn-beijing-b"
- },
- {
- ip_count = 2
- bandwidth = 10
- zone = "cn-beijing-b"
- },
- ]
-
- depends_on = [
- "alicloud_vswitch.vsw",
- ]
-}
-```
-
-## Argument Reference
-
-The following arguments are supported:
-
-* `vpc_id` - (Required, Forces New Resorce) The VPC ID.
-* `spec` - (Required, Forces New Resorce) The specification of the nat gateway. Valid values are `Small`, `Middle` and `Large`. Details refer to [Nat Gateway Specification](https://help.aliyun.com/document_detail/42757.html?spm=5176.doc32322.6.559.kFNBzv)
-* `name` - (Optional) Name of the nat gateway. The value can have a string of 2 to 128 characters, must contain only alphanumeric characters or hyphens, such as "-",".","_", and must not begin or end with a hyphen, and must not begin with http:// or https://. Defaults to null.
-* `description` - (Optional) Description of the nat gateway, This description can have a string of 2 to 256 characters, It cannot begin with http:// or https://. Defaults to null.
-* `bandwidth_packages` - (Required) A list of bandwidth packages for the nat gatway.
-
-## Block bandwidth package
-
-The bandwidth package mapping supports the following:
-
-* `ip_count` - (Required) The IP number of the current bandwidth package. Its value range from 1 to 50.
-* `bandwidth` - (Required) The bandwidth value of the current bandwidth package. Its value range from 5 to 5000.
-* `zone` - (Optional) The AZ for the current bandwidth. If this value is not specified, Terraform will set a random AZ.
-* `public_ip_addresses` - (Computer) The public ip for bandwidth package. the public ip count equal `ip_count`, multi ip would complex with ",", such as "10.0.0.1,10.0.0.2".
-
-## Attributes Reference
-
-The following attributes are exported:
-
-* `id` - The ID of the nat gateway.
-* `name` - The name of the nat gateway.
-* `description` - The description of the nat gateway.
-* `spec` - The specification of the nat gateway.
-* `vpc_id` - The VPC ID for the nat gateway.
-* `bandwidth_package_ids` - A list ID of the bandwidth packages, and split them with commas
-* `snat_table_ids` - The nat gateway will auto create a snap and forward item, the `snat_table_ids` is the created one.
-* `forward_table_ids` - The nat gateway will auto create a snap and forward item, the `forward_table_ids` is the created one.
diff --git a/website/source/docs/providers/alicloud/r/security_group.html.markdown b/website/source/docs/providers/alicloud/r/security_group.html.markdown
deleted file mode 100644
index c25dbe808..000000000
--- a/website/source/docs/providers/alicloud/r/security_group.html.markdown
+++ /dev/null
@@ -1,53 +0,0 @@
----
-layout: "alicloud"
-page_title: "Alicloud: alicloud_security_group"
-sidebar_current: "docs-alicloud-resource-security-group"
-description: |-
- Provides a Alicloud Security Group resource.
----
-
-# alicloud\_security\_group
-
-Provides a security group resource.
-
-~> **NOTE:** `alicloud_security_group` is used to build and manage a security group, and `alicloud_security_group_rule` can define ingress or egress rules for it.
-
-## Example Usage
-
-Basic Usage
-
-```
-resource "alicloud_security_group" "group" {
- name = "terraform-test-group"
- description = "New security group"
-}
-```
-Basic usage for vpc
-
-```
-resource "alicloud_security_group" "group" {
- name = "new-group"
- vpc_id = "${alicloud_vpc.vpc.id}"
-}
-
-resource "alicloud_vpc" "vpc" {
- cidr_block = "10.1.0.0/21"
-}
-```
-
-## Argument Reference
-
-The following arguments are supported:
-
-* `name` - (Optional) The name of the security group. Defaults to null.
-* `description` - (Optional, Forces new resource) The security group description. Defaults to null.
-* `vpc_id` - (Optional, Forces new resource) The VPC ID.
-
-## Attributes Reference
-
-The following attributes are exported:
-
-* `id` - The ID of the security group
-* `vpc_id` - The VPC ID.
-* `name` - The name of the security group
-* `description` - The description of the security group
\ No newline at end of file
diff --git a/website/source/docs/providers/alicloud/r/security_group_rule.html.markdown b/website/source/docs/providers/alicloud/r/security_group_rule.html.markdown
deleted file mode 100644
index feaab5b06..000000000
--- a/website/source/docs/providers/alicloud/r/security_group_rule.html.markdown
+++ /dev/null
@@ -1,61 +0,0 @@
----
-layout: "alicloud"
-page_title: "Alicloud: alicloud_security_group_rule"
-sidebar_current: "docs-alicloud-resource-security-group-rule"
-description: |-
- Provides a Alicloud Security Group Rule resource.
----
-
-# alicloud\_security\_group\_rule
-
-Provides a security group rule resource.
-Represents a single `ingress` or `egress` group rule, which can be added to external Security Groups.
-
-~> **NOTE:** `nic_type` should set to `intranet` when security group type is `vpc`. In this situation it does not distinguish between intranet and internet, the rule is effective on them both.
-
-
-## Example Usage
-
-Basic Usage
-
-```
-resource "alicloud_security_group" "default" {
- name = "default"
-}
-
-resource "alicloud_security_group_rule" "allow_all_tcp" {
- type = "ingress"
- ip_protocol = "tcp"
- nic_type = "internet"
- policy = "accept"
- port_range = "1/65535"
- priority = 1
- security_group_id = "${alicloud_security_group.default.id}"
- cidr_ip = "0.0.0.0/0"
-}
-```
-
-## Argument Reference
-
-The following arguments are supported:
-
-* `type` - (Required) The type of rule being created. Valid options are `ingress` (inbound) or `egress` (outbound).
-* `ip_protocol` - (Required) The protocol. Can be `tcp`, `udp`, `icmp`, `gre` or `all`.
-* `port_range` - (Required) The range of port numbers relevant to the IP protocol. When the protocol is tcp or udp, the default port number range is 1-65535. For example, `1/200` means that the range of the port numbers is 1-200.
-* `security_group_id` - (Required) The security group to apply this rule to.
-* `nic_type` - (Optional, Forces new resource) Network type, can be either `internet` or `intranet`, the default value is `internet`.
-* `policy` - (Optional, Forces new resource) Authorization policy, can be either `accept` or `drop`, the default value is `accept`.
-* `priority` - (Optional, Forces new resource) Authorization policy priority, with parameter values: `1-100`, default value: 1.
-* `cidr_ip` - (Optional, Forces new resource) The target IP address range. The default value is 0.0.0.0/0 (which means no restriction will be applied). Other supported formats include 10.159.6.18/12. Only IPv4 is supported.
-* `source_security_group_id` - (Optional, Forces new resource) The target security group ID within the same region. Either the `source_security_group_id` or `cidr_ip` must be set. If both are set, then `cidr_ip` is authorized by default. If this field is specified, but no `cidr_ip` is specified, the `nic_type` can only select `intranet`.
-* `source_group_owner_account` - (Optional, Forces new resource) The Alibaba Cloud user account Id of the target security group when security groups are authorized across accounts. This parameter is invalid if `cidr_ip` has already been set.
-
-## Attributes Reference
-
-The following attributes are exported:
-
-* `id` - The ID of the security group rule
-* `type` - The type of rule, `ingress` or `egress`
-* `name` - The name of the security group
-* `port_range` - The range of port numbers
-* `ip_protocol` - The protocol of the security group rule
\ No newline at end of file
diff --git a/website/source/docs/providers/alicloud/r/slb.html.markdown b/website/source/docs/providers/alicloud/r/slb.html.markdown
deleted file mode 100644
index 7feb52490..000000000
--- a/website/source/docs/providers/alicloud/r/slb.html.markdown
+++ /dev/null
@@ -1,141 +0,0 @@
----
-layout: "alicloud"
-page_title: "Alicloud: alicloud_slb"
-sidebar_current: "docs-alicloud-resource-slb"
-description: |-
- Provides an Application Load Banlancer resource.
----
-
-# alicloud\_slb
-
-Provides an Application Load Balancer resource.
-
-## Example Usage
-
-```
-# Create a new load balancer for classic
-resource "alicloud_slb" "classic" {
- name = "test-slb-tf"
- internet = true
- internet_charge_type = "paybybandwidth"
- bandwidth = 5
-
- listener = [
- {
- "instance_port" = "2111"
- "lb_port" = "21"
- "lb_protocol" = "tcp"
- "bandwidth" = "5"
- },
- {
- "instance_port" = "8000"
- "lb_port" = "80"
- "lb_protocol" = "http"
- "bandwidth" = "5"
- },
- {
- "instance_port" = "1611"
- "lb_port" = "161"
- "lb_protocol" = "udp"
- "bandwidth" = "5"
- },
- ]
-}
-
-# Create a new load balancer for VPC
-resource "alicloud_vpc" "default" {
- # Other parameters...
-}
-
-resource "alicloud_vswitch" "default" {
- # Other parameters...
-}
-
-resource "alicloud_slb" "vpc" {
- name = "test-slb-tf"
- vswitch_id = "${alicloud_vswitch.default.id}"
-}
-```
-
-## Argument Reference
-
-The following arguments are supported:
-
-* `name` - (Optional) The name of the SLB. This name must be unique within your AliCloud account, can have a maximum of 80 characters,
-must contain only alphanumeric characters or hyphens, such as "-","/",".","_", and must not begin or end with a hyphen. If not specified,
-Terraform will autogenerate a name beginning with `tf-lb`.
-* `internet` - (Optional, Forces New Resource) If true, the SLB addressType will be internet, false will be intranet, Default is false. If load balancer launched in VPC, this value must be "false".
-* `internet_charge_type` - (Optional, Forces New Resource) Valid
- values are `paybybandwidth`, `paybytraffic`. If this value is "paybybandwidth", then argument "internet" must be "true". Default is "paybytraffic". If load balancer launched in VPC, this value must be "paybytraffic".
-* `bandwidth` - (Optional) Valid
- value is between 1 and 1000, If argument "internet_charge_type" is "paybytraffic", then this value will be ignore.
-* `listener` - (Optional) Additional SLB listener. See [Block listener](#block-listener) below for details.
-* `vswitch_id` - (Required for a VPC SLB, Forces New Resource) The VSwitch ID to launch in.
-
-## Block listener
-
-load balance support 4 protocal to listen on, they are `http`,`https`,`tcp`,`udp`, the every listener support which portocal following:
-
-listener parameter | support protocol | value range |
-------------- | ------------- | ------------- |
-instance_port | http & https & tcp & udp | 1-65535 |
-lb_port | http & https & tcp & udp | 1-65535 |
-lb_protocol | http & https & tcp & udp |
-bandwidth | http & https & tcp & udp | -1 / 1-1000 |
-scheduler | http & https & tcp & udp | wrr or wlc |
-sticky_session | http & https | on or off |
-sticky_session_type | http & https | insert or server |
-cookie_timeout | http & https | 1-86400 |
-cookie | http & https | |
-persistence_timeout | tcp & udp | 0-3600 |
-health_check | http & https | on or off |
-health_check_type | tcp | tcp or http |
-health_check_domain | http & https & tcp |
-health_check_uri | http & https & tcp | |
-health_check_connect_port | http & https & tcp & udp | 1-65535 or -520 |
-healthy_threshold | http & https & tcp & udp | 1-10 |
-unhealthy_threshold | http & https & tcp & udp | 1-10 |
-health_check_timeout | http & https & tcp & udp | 1-50 |
-health_check_interval | http & https & tcp & udp | 1-5 |
-health_check_http_code | http & https & tcp | http_2xx,http_3xx,http_4xx,http_5xx |
-ssl_certificate_id | https | |
-
-
-The listener mapping supports the following:
-
-* `instance_port` - (Required) The port on which the backend servers are listening. Valid value is between 1 to 65535.
-* `lb_port` - (Required) The port on which the load balancer is listening. Valid value is between 1 to 65535.
-* `lb_protocol` - (Required) The protocol to listen on. Valid values are `http` and and `tcp` and `udp`.
-* `bandwidth` - (Required) The bandwidth on which the load balancer is listening. Valid values is -1 or between 1 and 1000. If -1, the bindwidth will haven’t upper limit.
-* `scheduler` - (Optinal) Scheduling algorithm, Valid Value is `wrr` / `wlc`, Default is "wrr".
-* `sticky_session` - (Optinal) Whether to enable session persistence, Value: `on` / `off`.
-* `sticky_session_type` - (Optinal) Mode for handling the cookie. If "sticky_session" is on, the parameter is mandatory, and if "sticky_session" is off, the parameter will be ignored. Value:`insert` / `server`. If it is set to insert, it means it is inserted from Server Load Balancer; and if it is set to server, it means the Server Load Balancer learns from the backend server.
-* `cookie_timeout` - (Optinal) The parameter is mandatory when "sticky_session" is on and "sticky_session_type" is insert. Otherwise, it will be ignored. Value: 1-86400(in seconds)
-* `cookie` - (Optinal) The cookie configured on the server
-It is mandatory only when "sticky_session" is on and "sticky_session_type" is server; otherwise, the parameter will be ignored. Value:String in line with RFC 2965, with length being 1- 200. It only contains characters such as ASCII codes, English letters and digits instead of the comma, semicolon or spacing, and it cannot start with $.
-* `persistence_timeout` - (Optinal) Timeout of connection persistence. Value: 0-3600(in seconds) .Default:0 The value 0 indicates to close it.
-* `health_check` - (Optinal) Whether to enable health check. Value:`on` / `off`
-* `health_check_type` - (Optinal) Type of health check. Value:`tcp` | `http` , Default:`tcp` . TCP supports TCP and HTTP health check mode, you can select the particular mode depending on your application.
-* `health_check_domain` - (Optinal) Domain name used for health check. When TCP listener need to use HTTP health check, this parameter will be configured; and when TCP health check is used, the parameter will be ignored. Value: `$_ip | custom string`. Rules of the custom string: its length is limited to 1-80 and only characters such as letters, digits, ‘-‘ and ‘.’ are allowed. When the parameter is set to $_ip by the user, Server Load Balancer uses the private network IP address of each backend server as Domain used for health check.
-* `health_check_uri` - (Optinal) URI used for health check. When TCP listener need to use HTTP health check, this parameter will be configured; and when TCP health check is used, the parameter will be ignored.
-Value:Its length is limited to 1-80 and it must start with /. Only characters such as letters, digits, ‘-’, ‘/’, ‘.’, ‘%’, ‘?’, #’ and ‘&’ are allowed.
-* `health_check_connect_port` - (Optinal) Port used for health check. Value: `1-65535`, Default:None. When the parameter is not set, it means the backend server port is used (BackendServerPort).
-* `healthy_threshold` - (Optinal) Threshold determining the result of the health check is success. Value:`1-10`, Default:3.
-* `unhealthy_threshold` - (Optinal) Threshold determining the result of the health check is fail. Value:`1-10`, Default:3.
-* `health_check_timeout` - (Optinal) Maximum timeout of each health check response. When "health_check" is on, the parameter is mandatory; and when "mandatory" is off, the parameter will be ignored. Value:`1-50`(in seconds). Note: If health_check_timeout < health_check_interval, health_check_timeout is invalid, and the timeout is health_check_interval.
-* `health_check_interval` - (Optinal) Time interval of health checks.
-When "health_check" is on, the parameter is mandatory; and when "health_check" is off, the parameter will be ignored. Value:`1-5` (in seconds)
-* `health_check_http_code` - (Optinal) Regular health check HTTP status code. Multiple codes are segmented by “,”. When "health_check" is on, the parameter is mandatory; and when "health_check" is off, the parameter will be ignored. Value:`http_2xx` / `http_3xx` / `http_4xx` / `http_5xx`.
-* `ssl_certificate_id` - (Optinal) Security certificate ID.
-
-## Attributes Reference
-
-The following attributes are exported:
-
-* `id` - The ID of the load balancer.
-* `name` - The name of the load balancer.
-* `internet` - The internet of the load balancer.
-* `internet_charge_type` - The internet_charge_type of the load balancer.
-* `bandwidth` - The bandwidth of the load balancer.
-* `vswitch_id` - The VSwitch ID of the load balancer. Only available on SLB launched in a VPC.
-* `address` - The IP address of the load balancer.
\ No newline at end of file
diff --git a/website/source/docs/providers/alicloud/r/slb_attachment.html.markdown b/website/source/docs/providers/alicloud/r/slb_attachment.html.markdown
deleted file mode 100644
index ce3d7ac39..000000000
--- a/website/source/docs/providers/alicloud/r/slb_attachment.html.markdown
+++ /dev/null
@@ -1,42 +0,0 @@
----
-layout: "alicloud"
-page_title: "Alicloud: alicloud_slb_attachment"
-sidebar_current: "docs-alicloud-resource-slb-attachment"
-description: |-
- Provides an Application Load Banlancer Attachment resource.
----
-
-# alicloud\_slb\_attachment
-
-Provides an Application Load Balancer Attachment resource.
-
-## Example Usage
-
-```
-# Create a new load balancer attachment for classic
-resource "alicloud_slb" "default" {
- # Other parameters...
-}
-
-resource "alicloud_instance" "default" {
- # Other parameters...
-}
-
-resource "alicloud_slb_attachment" "default" {
- slb_id = "${alicloud_slb.default.id}"
- instances = ["${alicloud_instance.default.id}"]
-}
-```
-
-## Argument Reference
-
-The following arguments are supported:
-
-* `slb_id` - (Required) The ID of the SLB..
-* `instances` - (Required) A list of instance ids to added backend server in the SLB. If dettachment instances then this value set [].
-
-## Attributes Reference
-
-The following attributes are exported:
-
-* `backend_servers` - The backend servers of the load balancer.
diff --git a/website/source/docs/providers/alicloud/r/snat.html.markdown b/website/source/docs/providers/alicloud/r/snat.html.markdown
deleted file mode 100644
index f39549387..000000000
--- a/website/source/docs/providers/alicloud/r/snat.html.markdown
+++ /dev/null
@@ -1,61 +0,0 @@
----
-layout: "alicloud"
-page_title: "Alicloud: alicloud_snat_entry"
-sidebar_current: "docs-alicloud-resource-vpc"
-description: |-
- Provides a Alicloud snat resource.
----
-
-# alicloud\_snat
-
-Provides a snat resource.
-
-## Example Usage
-
-Basic Usage
-
-```
-resource "alicloud_vpc" "foo" {
- ...
-}
-
-resource "alicloud_vswitch" "foo" {
- ...
-}
-
-resource "alicloud_nat_gateway" "foo" {
- vpc_id = "${alicloud_vpc.foo.id}"
- spec = "Small"
- name = "test_foo"
-
- bandwidth_packages = [
- {
- ip_count = 2
- bandwidth = 5
- zone = ""
- },
- {
- ip_count = 1
- bandwidth = 6
- zone = "cn-beijing-b"
- }
- ]
-
- depends_on = [
- "alicloud_vswitch.foo"
- ]
-}
-
-resource "alicloud_snat_entry" "foo" {
- snat_table_id = "${alicloud_nat_gateway.foo.snat_table_ids}"
- source_vswitch_id = "${alicloud_vswitch.foo.id}"
- snat_ip = "${alicloud_nat_gateway.foo.bandwidth_packages.0.public_ip_addresses}"
-}
-```
-## Argument Reference
-
-The following arguments are supported:
-
-* `snat_table_id` - (Required, Forces new resource) The value can get from `alicloud_nat_gateway` Attributes "snat_table_ids".
-* `source_vswitch_id` - (Required, Forces new resource) The vswitch ID.
-* `snat_ip` - (Required) The SNAT ip address, the ip must along bandwidth package public ip which `alicloud_nat_gateway` argument `bandwidth_packages`.
diff --git a/website/source/docs/providers/alicloud/r/vpc.html.markdown b/website/source/docs/providers/alicloud/r/vpc.html.markdown
deleted file mode 100644
index f464326d8..000000000
--- a/website/source/docs/providers/alicloud/r/vpc.html.markdown
+++ /dev/null
@@ -1,41 +0,0 @@
----
-layout: "alicloud"
-page_title: "Alicloud: alicloud_vpc"
-sidebar_current: "docs-alicloud-resource-vpc"
-description: |-
- Provides a Alicloud VPC resource.
----
-
-# alicloud\_vpc
-
-Provides a VPC resource.
-
-~> **NOTE:** Terraform will auto build a router and a route table while it uses `alicloud_vpc` to build a vpc resource.
-
-## Example Usage
-
-Basic Usage
-
-```
-resource "alicloud_vpc" "vpc" {
- name = "tf_test_foo"
- cidr_block = "172.16.0.0/12"
-}
-```
-## Argument Reference
-
-The following arguments are supported:
-
-* `cidr_block` - (Required, Forces new resource) The CIDR block for the VPC.
-* `name` - (Optional) The name of the VPC. Defaults to null.
-* `description` - (Optional) The VPC description. Defaults to null.
-
-## Attributes Reference
-
-The following attributes are exported:
-
-* `id` - The ID of the VPC.
-* `cidr_block` - The CIDR block for the VPC.
-* `name` - The name of the VPC.
-* `description` - The description of the VPC.
-* `router_id` - The ID of the router created by default on VPC creation.
diff --git a/website/source/docs/providers/alicloud/r/vroute_entry.html.markdown b/website/source/docs/providers/alicloud/r/vroute_entry.html.markdown
deleted file mode 100644
index adca830be..000000000
--- a/website/source/docs/providers/alicloud/r/vroute_entry.html.markdown
+++ /dev/null
@@ -1,53 +0,0 @@
----
-layout: "alicloud"
-page_title: "Alicloud: alicloud_route_entry"
-sidebar_current: "docs-alicloud-resource-route-entry"
-description: |-
- Provides a Alicloud Route Entry resource.
----
-
-# alicloud\_route\_entry
-
-Provides a route entry resource.
-
-## Example Usage
-
-Basic Usage
-
-```
-resource "alicloud_vpc" "vpc" {
- name = "tf_test_foo"
- cidr_block = "172.16.0.0/12"
-}
-
-resource "alicloud_route_entry" "default" {
- router_id = "${alicloud_vpc.default.router_id}"
- route_table_id = "${alicloud_vpc.default.router_table_id}"
- destination_cidrblock = "${var.entry_cidr}"
- nexthop_type = "Instance"
- nexthop_id = "${alicloud_instance.snat.id}"
-}
-
-resource "alicloud_instance" "snat" {
- // ...
-}
-```
-## Argument Reference
-
-The following arguments are supported:
-
-* `router_id` - (Required, Forces new resource) The ID of the virtual router attached to Vpc.
-* `route_table_id` - (Required, Forces new resource) The ID of the route table.
-* `destination_cidrblock` - (Required, Forces new resource) The RouteEntry's target network segment.
-* `nexthop_type` - (Required, Forces new resource) The next hop type. Available value is Instance.
-* `nexthop_id` - (Required, Forces new resource) The route entry's next hop.
-
-## Attributes Reference
-
-The following attributes are exported:
-
-* `router_id` - (Required, Forces new resource) The ID of the virtual router attached to Vpc.
-* `route_table_id` - (Required, Forces new resource) The ID of the route table.
-* `destination_cidrblock` - (Required, Forces new resource) The RouteEntry's target network segment.
-* `nexthop_type` - (Required, Forces new resource) The next hop type. Available value is Instance.
-* `nexthop_id` - (Required, Forces new resource) The route entry's next hop.
diff --git a/website/source/docs/providers/alicloud/r/vswitch.html.markdown b/website/source/docs/providers/alicloud/r/vswitch.html.markdown
deleted file mode 100644
index 8f6bbdb2f..000000000
--- a/website/source/docs/providers/alicloud/r/vswitch.html.markdown
+++ /dev/null
@@ -1,48 +0,0 @@
----
-layout: "alicloud"
-page_title: "Alicloud: alicloud_vswitch"
-sidebar_current: "docs-alicloud-resource-vswitch"
-description: |-
- Provides a Alicloud VPC switch resource.
----
-
-# alicloud\_vswitch
-
-Provides a VPC switch resource.
-
-## Example Usage
-
-Basic Usage
-
-```
-resource "alicloud_vpc" "vpc" {
- name = "tf_test_foo"
- cidr_block = "172.16.0.0/12"
-}
-
-resource "alicloud_vswitch" "vsw" {
- vpc_id = "${alicloud_vpc.vpc.id}"
- cidr_block = "172.16.0.0/21"
- availability_zone = "cn-beijing-b"
-}
-```
-## Argument Reference
-
-The following arguments are supported:
-
-* `availability_zone` - (Required, Forces new resource) The AZ for the switch.
-* `vpc_id` - (Required, Forces new resource) The VPC ID.
-* `cidr_block` - (Required, Forces new resource) The CIDR block for the switch.
-* `name` - (Optional) The name of the switch. Defaults to null.
-* `description` - (Optional) The switch description. Defaults to null.
-
-## Attributes Reference
-
-The following attributes are exported:
-
-* `id` - The ID of the switch.
-* `availability_zone` The AZ for the switch.
-* `cidr_block` - The CIDR block for the switch.
-* `vpc_id` - The VPC ID.
-* `name` - The name of the switch.
-* `description` - The description of the switch.
diff --git a/website/source/docs/providers/archive/d/archive_file.md b/website/source/docs/providers/archive/d/archive_file.md
deleted file mode 100644
index 6950b4efe..000000000
--- a/website/source/docs/providers/archive/d/archive_file.md
+++ /dev/null
@@ -1,79 +0,0 @@
----
-layout: "archive"
-page_title: "Archive: archive_file"
-sidebar_current: "docs-archive-datasource-archive-file"
-description: |-
- Generates an archive from content, a file, or directory of files.
----
-
-# archive_file
-
-Generates an archive from content, a file, or directory of files.
-
-## Example Usage
-
-```hcl
-# Archive a single file.
-
-data "archive_file" "init" {
- type = "zip"
- source_file = "${path.module}/init.tpl"
- output_path = "${path.module}/files/init.zip"
-}
-
-# Archive multiple files.
-
-data "archive_file" "dotfiles" {
- type = "zip"
- output_path = "${path.module}/files/dotfiles.zip"
-
- source {
- content = "${data.template_file.vimrc.rendered}"
- filename = ".vimrc"
- }
-
- source {
- content = "${data.template_file.ssh_config.rendered}"
- filename = ".ssh/config"
- }
-}
-```
-
-## Argument Reference
-
-The following arguments are supported:
-
-NOTE: One of `source`, `source_content_filename` (with `source_content`), `source_file`, or `source_dir` must be specified.
-
-* `type` - (Required) The type of archive to generate.
- NOTE: `zip` is supported.
-
-* `output_path` - (Required) The output of the archive file.
-
-* `source_content` - (Optional) Add only this content to the archive with `source_content_filename` as the filename.
-
-* `source_content_filename` - (Optional) Set this as the filename when using `source_content`.
-
-* `source_file` - (Optional) Package this file into the archive.
-
-* `source_dir` - (Optional) Package entire contents of this directory into the archive.
-
-* `source` - (Optional) Specifies attributes of a single source file to include into the archive.
-
-The `source` block supports the following:
-
-* `content` - (Required) Add this content to the archive with `filename` as the filename.
-
-* `filename` - (Required) Set this as the filename when declaring a `source`.
-
-## Attributes Reference
-
-The following attributes are exported:
-
-* `output_size` - The size of the output archive file.
-
-* `output_sha` - The SHA1 checksum of output archive file.
-
-* `output_base64sha256` - The base64-encoded SHA256 checksum of output archive file.
-
-* `output_md5` - The MD5 checksum of output archive file.
diff --git a/website/source/docs/providers/archive/index.html.markdown b/website/source/docs/providers/archive/index.html.markdown
deleted file mode 100644
index b9787f6b6..000000000
--- a/website/source/docs/providers/archive/index.html.markdown
+++ /dev/null
@@ -1,19 +0,0 @@
----
-layout: "archive"
-page_title: "Provider: Archive"
-sidebar_current: "docs-archive-index"
-description: |-
- The Archive provider is used to manage archive files.
----
-
-# Archive Provider
-
-The archive provider exposes resources to manage archive files.
-
-Use the navigation to the left to read about the available resources.
-
-## Example Usage
-
-```hcl
-provider "archive" {}
-```
diff --git a/website/source/docs/providers/arukas/index.html.markdown b/website/source/docs/providers/arukas/index.html.markdown
deleted file mode 100644
index 08aa8bf82..000000000
--- a/website/source/docs/providers/arukas/index.html.markdown
+++ /dev/null
@@ -1,84 +0,0 @@
----
-layout: "arukas"
-page_title: "Provider: Arukas"
-sidebar_current: "docs-arukas-index"
-description: |-
- The Arukas provider is used to interact with the resources supported by Arukas.
----
-
-# Arukas Provider
-
-The Arukas provider is used to manage [Arukas](https://arukas.io/en/) resources.
-
-Use the navigation to the left to read about the available resources.
-
-For additional details please refer to [Arukas documentation](https://arukas.io/en/category/documents-en/).
-
-## Example Usage
-
-Here is an example that will setup the following:
-
-+ A container resource using the "NGINX" image
-+ Instance count is 1
-+ Memory size is 256Mbyte
-+ Expose tcp 80 port to the EndPoint
-+ Set environments variable with like "key1=value1"
-
-Add the below to a file called `arukas.tf` and run the `terraform` command from the same directory:
-
-```hcl
-provider "arukas" {
- token = ""
- secret = ""
-}
-
-resource "arukas_container" "foobar" {
- name = "terraform_for_arukas_test_foobar"
- image = "nginx:latest"
- instances = 1
- memory = 256
-
- ports = {
- protocol = "tcp"
- number = "80"
- }
-
- environments {
- key = "key1"
- value = "value1"
- }
-}
-```
-
-You'll need to provide your Arukas API token and secret,
-so that Terraform can connect. If you don't want to put
-credentials in your configuration file, you can leave them
-out:
-
-```hcl
-provider "arukas" {}
-```
-
-...and instead set these environment variables:
-
-- `ARUKAS_JSON_API_TOKEN` : Your Arukas API token
-- `ARUKAS_JSON_API_SECRET`: Your Arukas API secret
-
-## Argument Reference
-
-The following arguments are supported:
-
-* `token` - (Required) This is the Arukas API token. It must be provided, but
- it can also be sourced from the `ARUKAS_JSON_API_TOKEN` environment variable.
-
-* `secret` - (Required) This is the Arukas API secret. It must be provided, but
- it can also be sourced from the `ARUKAS_JSON_API_SECRET` environment variable.
-
-* `api_url` - (Optional) Override Arukas API Root URL. Also taken from the `ARUKAS_JSON_API_URL`
- environment variable if provided.
-
-* `trace` - (Optional) The flag of Arukas API trace log. Also taken from the `ARUKAS_DEBUG`
- environment variable if provided.
-
-* `timeout` - (Optional) Override Arukas API timeout seconds. Also taken from the `ARUKAS_TIMEOUT`
- environment variable if provided.
diff --git a/website/source/docs/providers/arukas/r/container.html.markdown b/website/source/docs/providers/arukas/r/container.html.markdown
deleted file mode 100644
index 81bdf1c28..000000000
--- a/website/source/docs/providers/arukas/r/container.html.markdown
+++ /dev/null
@@ -1,100 +0,0 @@
----
-layout: "arukas"
-page_title: "Arukas: container"
-sidebar_current: "docs-arukas-resource-container"
-description: |-
- Manages Arukas Containers
----
-
-# arukas_container
-
-Provides container resource. This allows container to be created, updated and deleted.
-
-For additional details please refer to [API documentation](https://arukas.io/en/documents-en/arukas-api-reference-en/#containers).
-
-## Example Usage
-
-Create a new container using the "NGINX" image.
-
-```hcl
-resource "arukas_container" "foobar" {
- name = "terraform_for_arukas_test_foobar"
- image = "nginx:latest"
- instances = 1
- memory = 256
-
- ports = {
- protocol = "tcp"
- number = "80"
- }
-
- environments {
- key = "key1"
- value = "value1"
- }
-}
-```
-
-## Argument Reference
-
-The following arguments are supported:
-
-* `name` - (Required, string) The name of the container.
-* `image` - (Required, string) The ID of the image to back this container.It must be a public image on DockerHub.
-* `instances` - (Optional, int) The count of the instance. It must be between `1` and `10`.
-* `memory` - (Optional, int) The size of the instance RAM.It must be `256` or `512`.
-* `endpoint` - (Optional,string) The subdomain part of the endpoint assigned by Arukas. If it is not set, Arukas will do automatic assignment.
-* `ports` - (Required , block) See [Ports](#ports) below for details.
-* `environments` - (Required , block) See [Environments](#environments) below for details.
-* `cmd` - (Optional , string) The command of the container.
-
-
-### Ports
-
-`ports` is a block within the configuration that can be repeated to specify
-the port mappings of the container. Each `ports` block supports
-the following:
-
-* `protocol` - (Optional, string) Protocol that can be used over this port, defaults to `tcp`,It must be `tcp` or `udp`.
-* `number` - (Optional, int) Port within the container,defaults to `80`, It must be between `1` to `65535`.
-
-
-### Environments
-
-`environments` is a block within the configuration that can be repeated to specify
-the environment variables. Each `environments` block supports
-the following:
-
-* `key` - (Required, string) Key of environment variable.
-* `value` - (Required, string) Value of environment variable.
-
-
-## Attributes Reference
-
-The following attributes are exported:
-
-* `id` - The ID of the container.
-* `app_id` - The ID of the Arukas application to which the container belongs.
-* `name` - The name of the container.
-* `image` - The ID of the image to back this container.
-* `instances` - The count of the instance.
-* `memory` - The size of the instance RAM.
-* `endpoint` - The subdomain part of the endpoint assigned by Arukas.
-* `ports` - See [Ports](#ports) below for details.
-* `environments` - See [Environments](#environments) below for details.
-* `cmd` - The command of the container.
-* `port_mappings` - See [PortMappings](#port_mappings) below for details.
-* `endpoint_full_url` - The URL of endpoint.
-* `endpoint_full_hostname` - The Hostname of endpoint.
-
-
-### PortMappings
-
-`port_mappings` is a block within the configuration that
-the port mappings of the container. Each `port_mappings` block supports
-the following:
-
-* `host` - The name of the host actually running the container.
-* `ipaddress` - The IP address of the host actually running the container.
-* `container_port` - Port within the container.
-* `service_port` - The actual port mapped to the port in the container.
diff --git a/website/source/docs/providers/aws/d/acm_certificate.html.markdown b/website/source/docs/providers/aws/d/acm_certificate.html.markdown
deleted file mode 100644
index c3dac5ff1..000000000
--- a/website/source/docs/providers/aws/d/acm_certificate.html.markdown
+++ /dev/null
@@ -1,35 +0,0 @@
----
-layout: "aws"
-page_title: "AWS: aws_acm_certificate"
-sidebar_current: "docs-aws-datasource-acm-certificate"
-description: |-
- Get information on a Amazon Certificate Manager (ACM) Certificate
----
-
-# aws\_acm\_certificate
-
-Use this data source to get the ARN of a certificate in AWS Certificate
-Manager (ACM). The process of requesting and verifying a certificate in ACM
-requires some manual steps, which means that Terraform cannot automate the
-creation of ACM certificates. But using this data source, you can reference
-them by domain without having to hard code the ARNs as input.
-
-## Example Usage
-
-```hcl
-data "aws_acm_certificate" "example" {
- domain = "tf.example.com"
- statuses = ["ISSUED"]
-}
-```
-
-## Argument Reference
-
- * `domain` - (Required) The domain of the certificate to look up. If no certificate is found with this name, an error will be returned.
- * `statuses` - (Optional) A list of statuses on which to filter the returned list. Valid values are `PENDING_VALIDATION`, `ISSUED`,
- `INACTIVE`, `EXPIRED`, `VALIDATION_TIMED_OUT`, `REVOKED` and `FAILED`. If no value is specified, only certificates in the `ISSUED` state
- are returned.
-
-## Attributes Reference
-
- * `arn` - Set to the ARN of the found certificate, suitable for referencing in other resources that support ACM certificates.
diff --git a/website/source/docs/providers/aws/d/alb.html.markdown b/website/source/docs/providers/aws/d/alb.html.markdown
deleted file mode 100644
index 32107f202..000000000
--- a/website/source/docs/providers/aws/d/alb.html.markdown
+++ /dev/null
@@ -1,48 +0,0 @@
----
-layout: "aws"
-page_title: "AWS: aws_alb"
-sidebar_current: "docs-aws-datasource-alb-x"
-description: |-
- Provides an Application Load Balancer data source.
----
-
-# aws\_alb
-
-Provides information about an Application Load Balancer.
-
-This data source can prove useful when a module accepts an ALB as an input
-variable and needs to, for example, determine the security groups associated
-with it, etc.
-
-## Example Usage
-
-```hcl
-variable "alb_arn" {
- type = "string"
- default = ""
-}
-
-variable "alb_name" {
- type = "string"
- default = ""
-}
-
-data "aws_alb" "test" {
- arn = "${var.alb_arn}"
- name = "${var.alb_arn}"
-}
-```
-
-## Argument Reference
-
-The following arguments are supported:
-
-* `arn` - (Optional) The full ARN of the load balancer.
-* `name` - (Optional) The unique name of the load balancer.
-
-~> **NOTE**: When both `arn` and `name` are specified, `arn` takes precedence.
-
-## Attributes Reference
-
-See the [ALB Resource](/docs/providers/aws/r/alb.html) for details on the
-returned attributes - they are identical.
diff --git a/website/source/docs/providers/aws/d/alb_listener.html.markdown b/website/source/docs/providers/aws/d/alb_listener.html.markdown
deleted file mode 100644
index 8237a847c..000000000
--- a/website/source/docs/providers/aws/d/alb_listener.html.markdown
+++ /dev/null
@@ -1,38 +0,0 @@
----
-layout: "aws"
-page_title: "AWS: aws_alb_listener"
-sidebar_current: "docs-aws-datasource-alb-listener"
-description: |-
- Provides an Application Load Balancer Listener data source.
----
-
-# aws\_alb\_listener
-
-Provides information about an Application Load Balancer Listener.
-
-This data source can prove useful when a module accepts an ALB Listener as an
-input variable and needs to know the ALB it is attached to, or other
-information specific to the listener in question.
-
-## Example Usage
-
-```hcl
-variable "listener_arn" {
- type = "string"
-}
-
-data "aws_alb_listener" "listener" {
- arn = "${var.listener_arn}"
-}
-```
-
-## Argument Reference
-
-The following arguments are supported:
-
-* `arn` - (Required) The ARN of the listener.
-
-## Attributes Reference
-
-See the [ALB Listener Resource](/docs/providers/aws/r/alb_listener.html) for details
-on the returned attributes - they are identical.
diff --git a/website/source/docs/providers/aws/d/ami.html.markdown b/website/source/docs/providers/aws/d/ami.html.markdown
deleted file mode 100644
index a91c5dbc2..000000000
--- a/website/source/docs/providers/aws/d/ami.html.markdown
+++ /dev/null
@@ -1,123 +0,0 @@
----
-layout: "aws"
-page_title: "AWS: aws_ami"
-sidebar_current: "docs-aws-datasource-ami"
-description: |-
- Get information on a Amazon Machine Image (AMI).
----
-
-# aws\_ami
-
-Use this data source to get the ID of a registered AMI for use in other
-resources.
-
-## Example Usage
-
-```hcl
-data "aws_ami" "nat_ami" {
- most_recent = true
- executable_users = ["self"]
-
- filter {
- name = "owner-alias"
- values = ["amazon"]
- }
-
- filter {
- name = "name"
- values = ["amzn-ami-vpc-nat*"]
- }
-
- name_regex = "^myami-\\d{3}"
- owners = ["self"]
-}
-```
-
-## Argument Reference
-
-* `most_recent` - (Optional) If more than one result is returned, use the most
-recent AMI.
-
-* `executable_users` - (Optional) Limit search to users with *explicit* launch permission on
- the image. Valid items are the numeric account ID or `self`.
-
-* `filter` - (Optional) One or more name/value pairs to filter off of. There are
-several valid keys, for a full reference, check out
-[describe-images in the AWS CLI reference][1].
-
-* `owners` - (Optional) Limit search to specific AMI owners. Valid items are the numeric
-account ID, `amazon`, or `self`.
-
-* `name_regex` - (Optional) A regex string to apply to the AMI list returned
-by AWS. This allows more advanced filtering not supported from the AWS API. This
-filtering is done locally on what AWS returns, and could have a performance
-impact if the result is large. It is recommended to combine this with other
-options to narrow down the list AWS returns.
-
-~> **NOTE:** At least one of `executable_users`, `filter`, `owners`, or
-`name_regex` must be specified.
-
-~> **NOTE:** If more or less than a single match is returned by the search,
-Terraform will fail. Ensure that your search is specific enough to return
-a single AMI ID only, or use `most_recent` to choose the most recent one. If
-you want to match multiple AMIs, use the `aws_ami_ids` data source instead.
-
-## Attributes Reference
-
-`id` is set to the ID of the found AMI. In addition, the following attributes
-are exported:
-
-~> **NOTE:** Some values are not always set and may not be available for
-interpolation.
-
-* `architecture` - The OS architecture of the AMI (ie: `i386` or `x86_64`).
-* `block_device_mappings` - The block device mappings of the AMI.
- * `block_device_mappings.#.device_name` - The physical name of the device.
- * `block_device_mappings.#.ebs.delete_on_termination` - `true` if the EBS volume
- will be deleted on termination.
- * `block_device_mappings.#.ebs.encrypted` - `true` if the EBS volume
- is encrypted.
- * `block_device_mappings.#.ebs.iops` - `0` if the EBS volume is
- not a provisioned IOPS image, otherwise the supported IOPS count.
- * `block_device_mappings.#.ebs.snapshot_id` - The ID of the snapshot.
- * `block_device_mappings.#.ebs.volume_size` - The size of the volume, in GiB.
- * `block_device_mappings.#.ebs.volume_type` - The volume type.
- * `block_device_mappings.#.no_device` - Suppresses the specified device
- included in the block device mapping of the AMI.
- * `block_device_mappings.#.virtual_name` - The virtual device name (for
- instance stores).
-* `creation_date` - The date and time the image was created.
-* `description` - The description of the AMI that was provided during image
- creation.
-* `hypervisor` - The hypervisor type of the image.
-* `image_id` - The ID of the AMI. Should be the same as the resource `id`.
-* `image_location` - The location of the AMI.
-* `image_owner_alias` - The AWS account alias (for example, `amazon`, `self`) or
- the AWS account ID of the AMI owner.
-* `image_type` - The type of image.
-* `kernel_id` - The kernel associated with the image, if any. Only applicable
- for machine images.
-* `name` - The name of the AMI that was provided during image creation.
-* `owner_id` - The AWS account ID of the image owner.
-* `platform` - The value is Windows for `Windows` AMIs; otherwise blank.
-* `product_codes` - Any product codes associated with the AMI.
- * `product_codes.#.product_code_id` - The product code.
- * `product_codes.#.product_code_type` - The type of product code.
-* `public` - `true` if the image has public launch permissions.
-* `ramdisk_id` - The RAM disk associated with the image, if any. Only applicable
- for machine images.
-* `root_device_name` - The device name of the root device.
-* `root_device_type` - The type of root device (ie: `ebs` or `instance-store`).
-* `sriov_net_support` - Specifies whether enhanced networking is enabled.
-* `state` - The current state of the AMI. If the state is `available`, the image
- is successfully registered and can be used to launch an instance.
-* `state_reason` - Describes a state change. Fields are `UNSET` if not available.
- * `state_reason.code` - The reason code for the state change.
- * `state_reason.message` - The message for the state change.
-* `tags` - Any tags assigned to the image.
- * `tags.#.key` - The key name of the tag.
- * `tags.#.value` - The value of the tag.
-* `virtualization_type` - The type of virtualization of the AMI (ie: `hvm` or
- `paravirtual`).
-
-[1]: http://docs.aws.amazon.com/cli/latest/reference/ec2/describe-images.html
diff --git a/website/source/docs/providers/aws/d/ami_ids.html.markdown b/website/source/docs/providers/aws/d/ami_ids.html.markdown
deleted file mode 100644
index 8bad15bee..000000000
--- a/website/source/docs/providers/aws/d/ami_ids.html.markdown
+++ /dev/null
@@ -1,52 +0,0 @@
----
-layout: "aws"
-page_title: "AWS: aws_ami_ids"
-sidebar_current: "docs-aws-datasource-ami-ids"
-description: |-
- Provides a list of AMI IDs.
----
-
-# aws\_ami_ids
-
-Use this data source to get a list of AMI IDs matching the specified criteria.
-
-## Example Usage
-
-```hcl
-data "aws_ami_ids" "ubuntu" {
- owners = ["099720109477"]
-
- filter {
- name = "name"
- values = ["ubuntu/images/ubuntu-*-*-amd64-server-*"]
- }
-}
-```
-
-## Argument Reference
-
-* `executable_users` - (Optional) Limit search to users with *explicit* launch
-permission on the image. Valid items are the numeric account ID or `self`.
-
-* `filter` - (Optional) One or more name/value pairs to filter off of. There
-are several valid keys, for a full reference, check out
-[describe-images in the AWS CLI reference][1].
-
-* `owners` - (Optional) Limit search to specific AMI owners. Valid items are
-the numeric account ID, `amazon`, or `self`.
-
-* `name_regex` - (Optional) A regex string to apply to the AMI list returned
-by AWS. This allows more advanced filtering not supported from the AWS API.
-This filtering is done locally on what AWS returns, and could have a performance
-impact if the result is large. It is recommended to combine this with other
-options to narrow down the list AWS returns.
-
-~> **NOTE:** At least one of `executable_users`, `filter`, `owners` or
-`name_regex` must be specified.
-
-## Attributes Reference
-
-`ids` is set to the list of AMI IDs, sorted by creation time in descending
-order.
-
-[1]: http://docs.aws.amazon.com/cli/latest/reference/ec2/describe-images.html
diff --git a/website/source/docs/providers/aws/d/autoscaling_groups.html.markdown b/website/source/docs/providers/aws/d/autoscaling_groups.html.markdown
deleted file mode 100644
index 28faf2766..000000000
--- a/website/source/docs/providers/aws/d/autoscaling_groups.html.markdown
+++ /dev/null
@@ -1,53 +0,0 @@
----
-layout: "aws"
-page_title: "AWS: aws_autoscaling_groups"
-sidebar_current: "docs-aws-datasource-autoscaling-groups"
-description: |-
- Provides a list of Autoscaling Groups within a specific region.
----
-
-# aws\_autoscaling\_groups
-
-The Autoscaling Groups data source allows access to the list of AWS
-ASGs within a specific region. This will allow you to pass a list of AutoScaling Groups to other resources.
-
-## Example Usage
-
-```hcl
-data "aws_autoscaling_groups" "groups" {
- filter {
- name = "key"
- values = ["Team"]
- }
-
- filter {
- name = "value"
- values = ["Pets"]
- }
-}
-
-resource "aws_autoscaling_notification" "slack_notifications" {
- group_names = ["${data.aws_autoscaling_groups.groups.names}"]
-
- notifications = [
- "autoscaling:EC2_INSTANCE_LAUNCH",
- "autoscaling:EC2_INSTANCE_TERMINATE",
- "autoscaling:EC2_INSTANCE_LAUNCH_ERROR",
- "autoscaling:EC2_INSTANCE_TERMINATE_ERROR",
- ]
-
- topic_arn = "TOPIC ARN"
-}
-```
-
-## Argument Reference
-
-* `filter` - (Optional) A filter used to scope the list e.g. by tags. See [related docs](http://docs.aws.amazon.com/AutoScaling/latest/APIReference/API_Filter.html).
- * `name` - (Required) The name of the filter. The valid values are: `auto-scaling-group`, `key`, `value`, and `propagate-at-launch`.
- * `values` - (Required) The value of the filter.
-
-## Attributes Reference
-
-The following attributes are exported:
-
-* `names` - A list of the Autoscaling Groups in the current region.
diff --git a/website/source/docs/providers/aws/d/availability_zone.html.markdown b/website/source/docs/providers/aws/d/availability_zone.html.markdown
deleted file mode 100644
index 00278f089..000000000
--- a/website/source/docs/providers/aws/d/availability_zone.html.markdown
+++ /dev/null
@@ -1,98 +0,0 @@
----
-layout: "aws"
-page_title: "AWS: aws_availability_zone"
-sidebar_current: "docs-aws-datasource-availability-zone"
-description: |-
- Provides details about a specific availability zone
----
-
-# aws\_availability\_zone
-
-`aws_availability_zone` provides details about a specific availability zone (AZ)
-in the current region.
-
-This can be used both to validate an availability zone given in a variable
-and to split the AZ name into its component parts of an AWS region and an
-AZ identifier letter. The latter may be useful e.g. for implementing a
-consistent subnet numbering scheme across several regions by mapping both
-the region and the subnet letter to network numbers.
-
-This is different from the `aws_availability_zones` (plural) data source,
-which provides a list of the available zones.
-
-## Example Usage
-
-The following example shows how this data source might be used to derive
-VPC and subnet CIDR prefixes systematically for an availability zone.
-
-```hcl
-variable "region_number" {
- # Arbitrary mapping of region name to number to use in
- # a VPC's CIDR prefix.
- default = {
- us-east-1 = 1
- us-west-1 = 2
- us-west-2 = 3
- eu-central-1 = 4
- ap-northeast-1 = 5
- }
-}
-
-variable "az_number" {
- # Assign a number to each AZ letter used in our configuration
- default = {
- a = 1
- b = 2
- c = 3
- d = 4
- e = 5
- f = 6
- }
-}
-
-# Retrieve the AZ where we want to create network resources
-# This must be in the region selected on the AWS provider.
-data "aws_availability_zone" "example" {
- name = "eu-central-1a"
-}
-
-# Create a VPC for the region associated with the AZ
-resource "aws_vpc" "example" {
- cidr_block = "${cidrsubnet("10.0.0.0/8", 4, var.region_number[data.aws_availability_zone.example.region])}"
-}
-
-# Create a subnet for the AZ within the regional VPC
-resource "aws_subnet" "example" {
- vpc_id = "${aws_vpc.example.id}"
- cidr_block = "${cidrsubnet(aws_vpc.example.cidr_block, 4, var.az_number[data.aws_availability_zone.example.name_suffix])}"
-}
-```
-
-## Argument Reference
-
-The arguments of this data source act as filters for querying the available
-availability zones. The given filters must match exactly one availability
-zone whose data will be exported as attributes.
-
-* `name` - (Optional) The full name of the availability zone to select.
-
-* `state` - (Optional) A specific availability zone state to require. May
- be any of `"available"`, `"information"`, `"impaired"` or `"available"`.
-
-All reasonable uses of this data source will specify `name`, since `state`
-alone would match a single AZ only in a region that itself has only one AZ.
-
-## Attributes Reference
-
-The following attributes are exported:
-
-* `name` - The name of the selected availability zone.
-
-* `region` - The region where the selected availability zone resides.
- This is always the region selected on the provider, since this data source
- searches only within that region.
-
-* `name_suffix` - The part of the AZ name that appears after the region name,
- uniquely identifying the AZ within its region.
-
-* `state` - The current state of the AZ.
diff --git a/website/source/docs/providers/aws/d/availability_zones.html.markdown b/website/source/docs/providers/aws/d/availability_zones.html.markdown
deleted file mode 100644
index 648c144e5..000000000
--- a/website/source/docs/providers/aws/d/availability_zones.html.markdown
+++ /dev/null
@@ -1,52 +0,0 @@
----
-layout: "aws"
-page_title: "AWS: aws_availability_zones"
-sidebar_current: "docs-aws-datasource-availability-zones"
-description: |-
- Provides a list of Availability Zones which can be used by an AWS account.
----
-
-# aws\_availability\_zones
-
-The Availability Zones data source allows access to the list of AWS
-Availability Zones which can be accessed by an AWS account within the region
-configured in the provider.
-
-This is different from the `aws_availability_zone` (singular) data source,
-which provides some details about a specific availability zone.
-
-## Example Usage
-
-```hcl
-# Declare the data source
-data "aws_availability_zones" "available" {}
-
-# e.g. Create subnets in the first two available availability zones
-
-resource "aws_subnet" "primary" {
- availability_zone = "${data.aws_availability_zones.available.names[0]}"
-
- # ...
-}
-
-resource "aws_subnet" "secondary" {
- availability_zone = "${data.aws_availability_zones.available.names[1]}"
-
- # ...
-}
-```
-
-## Argument Reference
-
-The following arguments are supported:
-
-* `state` - (Optional) Allows to filter list of Availability Zones based on their
-current state. Can be either `"available"`, `"information"`, `"impaired"` or
-`"unavailable"`. By default the list includes a complete set of Availability Zones
-to which the underlying AWS account has access, regardless of their state.
-
-## Attributes Reference
-
-The following attributes are exported:
-
-* `names` - A list of the Availability Zone names available to the account.
diff --git a/website/source/docs/providers/aws/d/billing_service_account.html.markdown b/website/source/docs/providers/aws/d/billing_service_account.html.markdown
deleted file mode 100644
index aa6da6550..000000000
--- a/website/source/docs/providers/aws/d/billing_service_account.html.markdown
+++ /dev/null
@@ -1,61 +0,0 @@
----
-layout: "aws"
-page_title: "AWS: aws_billing_service_account"
-sidebar_current: "docs-aws-datasource-billing-service-account"
-description: |-
- Get AWS Billing Service Account
----
-
-# aws\_billing\_service\_account
-
-Use this data source to get the Account ID of the [AWS Billing and Cost Management Service Account](http://docs.aws.amazon.com/awsaccountbilling/latest/aboutv2/billing-getting-started.html#step-2) for the purpose of whitelisting in S3 bucket policy.
-
-## Example Usage
-
-```hcl
-data "aws_billing_service_account" "main" {}
-
-resource "aws_s3_bucket" "billing_logs" {
- bucket = "my-billing-tf-test-bucket"
- acl = "private"
-
- policy = < **NOTE:** If more or less than a single match is returned by the search,
-Terraform will fail. Ensure that your search is specific enough to return
-a single solution stack, or use `most_recent` to choose the most recent one.
-
-## Attributes Reference
-
-* `name` - The name of the solution stack.
diff --git a/website/source/docs/providers/aws/d/elasticache_cluster.html.markdown b/website/source/docs/providers/aws/d/elasticache_cluster.html.markdown
deleted file mode 100644
index 576f2102f..000000000
--- a/website/source/docs/providers/aws/d/elasticache_cluster.html.markdown
+++ /dev/null
@@ -1,55 +0,0 @@
----
-layout: "aws"
-page_title: "AWS: aws_elasticache_cluster"
-sidebar_current: "docs-aws-datasource-elasticache-cluster"
-description: |-
- Get information on an ElastiCache Cluster resource.
----
-
-# aws_elasticache_cluster
-
-Use this data source to get information about an Elasticache Cluster
-
-## Example Usage
-
-```hcl
-data "aws_elasticache_cluster" "my_cluster" {
- cluster_id = "my-cluster-id"
-}
-
-## Argument Reference
-
-The following arguments are supported:
-
-* `cluster_id` – (Required) Group identifier.
-
-
-## Attributes Reference
-
-The following attributes are exported:
-
-* `node_type` – The cluster node type.
-* `num_cache_nodes` – The number of cache nodes that the cache cluster has.
-* `engine` – Name of the cache engine.
-* `engine_version` – Version number of the cache engine.
-* `subnet_group_name` – Name of the subnet group associated to the cache cluster.
-* `security_group_names` – List of security group names associated with this cache cluster.
-* `security_group_ids` – List VPC security groups associated with the cache cluster.
-* `parameter_group_name` – Name of the parameter group associated with this cache cluster.
-* `replication_group_id` - The replication group to which this cache cluster belongs.
-* `maintenance_window` – Specifies the weekly time range for when maintenance
-on the cache cluster is performed.
-* `snapshot_window` - The daily time range (in UTC) during which ElastiCache will
-begin taking a daily snapshot of the cache cluster.
-* `snapshot_retention_limit` - The number of days for which ElastiCache will
-retain automatic cache cluster snapshots before deleting them.
-* `availability_zone` - The Availability Zone for the cache cluster.
-* `notification_topic_arn` – An Amazon Resource Name (ARN) of an
-SNS topic that ElastiCache notifications get sent to.
-* `port` – The port number on which each of the cache nodes will
-accept connections.
-* `configuration_endpoint` - The configuration endpoint to allow host discovery.
-* `cluster_address` - The DNS name of the cache cluster without the port appended.
-* `cache_nodes` - List of node objects including `id`, `address`, `port` and `availability_zone`.
- Referenceable e.g. as `${data.aws_elasticache_cluster.bar.cache_nodes.0.address}`
-* `tags` - The tags assigned to the resource
diff --git a/website/source/docs/providers/aws/d/elb_hosted_zone_id.html.markdown b/website/source/docs/providers/aws/d/elb_hosted_zone_id.html.markdown
deleted file mode 100644
index ad2db7ca5..000000000
--- a/website/source/docs/providers/aws/d/elb_hosted_zone_id.html.markdown
+++ /dev/null
@@ -1,40 +0,0 @@
----
-layout: "aws"
-page_title: "AWS: aws_elb_hosted_zone_id"
-sidebar_current: "docs-aws-datasource-elb-hosted-zone-id"
-description: |-
- Get AWS Elastic Load Balancing Hosted Zone Id
----
-
-# aws\_elb\_hosted\_zone\_id
-
-Use this data source to get the HostedZoneId of the AWS Elastic Load Balancing HostedZoneId
-in a given region for the purpose of using in an AWS Route53 Alias.
-
-## Example Usage
-
-```hcl
-data "aws_elb_hosted_zone_id" "main" {}
-
-resource "aws_route53_record" "www" {
- zone_id = "${aws_route53_zone.primary.zone_id}"
- name = "example.com"
- type = "A"
-
- alias {
- name = "${aws_elb.main.dns_name}"
- zone_id = "${data.aws_elb_hosted_zone_id.main.id}"
- evaluate_target_health = true
- }
-}
-```
-
-## Argument Reference
-
-* `region` - (Optional) Name of the region whose AWS ELB HostedZoneId is desired.
- Defaults to the region from the AWS provider configuration.
-
-
-## Attributes Reference
-
-* `id` - The ID of the AWS ELB HostedZoneId in the selected region.
diff --git a/website/source/docs/providers/aws/d/elb_service_account.html.markdown b/website/source/docs/providers/aws/d/elb_service_account.html.markdown
deleted file mode 100644
index ca0d30304..000000000
--- a/website/source/docs/providers/aws/d/elb_service_account.html.markdown
+++ /dev/null
@@ -1,72 +0,0 @@
----
-layout: "aws"
-page_title: "AWS: aws_elb_service_account"
-sidebar_current: "docs-aws-datasource-elb-service-account"
-description: |-
- Get AWS Elastic Load Balancing Service Account
----
-
-# aws\_elb\_service\_account
-
-Use this data source to get the Account ID of the [AWS Elastic Load Balancing Service Account](http://docs.aws.amazon.com/elasticloadbalancing/latest/classic/enable-access-logs.html#attach-bucket-policy)
-in a given region for the purpose of whitelisting in S3 bucket policy.
-
-## Example Usage
-
-```hcl
-data "aws_elb_service_account" "main" {}
-
-resource "aws_s3_bucket" "elb_logs" {
- bucket = "my-elb-tf-test-bucket"
- acl = "private"
-
- policy = < **NOTE:** At least one of `filter`, `instance_tags`, or `instance_id` must be specified.
-
-~> **NOTE:** If anything other than a single match is returned by the search,
-Terraform will fail. Ensure that your search is specific enough to return
-a single Instance ID only.
-
-## Attributes Reference
-
-`id` is set to the ID of the found Instance. In addition, the following attributes
-are exported:
-
-~> **NOTE:** Some values are not always set and may not be available for
-interpolation.
-
-* `associate_public_ip_address` - Whether or not the Instance is associated with a public IP address or not (Boolean).
-* `availability_zone` - The availability zone of the Instance.
-* `ebs_block_device` - The EBS block device mappings of the Instance.
- * `delete_on_termination` - If the EBS volume will be deleted on termination.
- * `device_name` - The physical name of the device.
- * `encrypted` - If the EBS volume is encrypted.
- * `iops` - `0` If the EBS volume is not a provisioned IOPS image, otherwise the supported IOPS count.
- * `snapshot_id` - The ID of the snapshot.
- * `volume_size` - The size of the volume, in GiB.
- * `volume_type` - The volume type.
-* `ebs_optimized` - Whether the Instance is EBS optimized or not (Boolean).
-* `ephemeral_block_device` - The ephemeral block device mappings of the Instance.
- * `device_name` - The physical name of the device.
- * `no_device` - Whether the specified device included in the device mapping was suppressed or not (Boolean).
- * `virtual_name` - The virtual device name.
-* `iam_instance_profile` - The name of the instance profile associated with the Instance.
-* `ipv6_addresses` - The IPv6 addresses associated to the Instance, if applicable. **NOTE**: Unlike the IPv4 address, this doesn't change if you attach an EIP to the instance.
-* `instance_type` - The type of the Instance.
-* `key_name` - The key name of the Instance.
-* `monitoring` - Whether detailed monitoring is enabled or disabled for the Instance (Boolean).
-* `network_interface_id` - The ID of the network interface that was created with the Instance.
-* `placement_group` - The placement group of the Instance.
-* `private_dns` - The private DNS name assigned to the Instance. Can only be
- used inside the Amazon EC2, and only available if you've enabled DNS hostnames
- for your VPC.
-* `private_ip` - The private IP address assigned to the Instance.
-* `public_dns` - The public DNS name assigned to the Instance. For EC2-VPC, this
- is only available if you've enabled DNS hostnames for your VPC.
-* `public_ip` - The public IP address assigned to the Instance, if applicable. **NOTE**: If you are using an [`aws_eip`](/docs/providers/aws/r/eip.html) with your instance, you should refer to the EIP's address directly and not use `public_ip`, as this field will change after the EIP is attached.
-* `root_block_device` - The root block device mappings of the Instance
- * `delete_on_termination` - If the root block device will be deleted on termination.
- * `iops` - `0` If the volume is not a provisioned IOPS image, otherwise the supported IOPS count.
- * `volume_size` - The size of the volume, in GiB.
- * `volume_type` - The type of the volume.
-* `security_groups` - The associated security groups.
-* `source_dest_check` - Whether the network interface performs source/destination checking (Boolean).
-* `subnet_id` - The VPC subnet ID.
-* `user_data` - The User Data supplied to the Instance.
-* `tags` - A mapping of tags assigned to the Instance.
-* `tenancy` - The tenancy of the instance: `dedicated`, `default`, `host`.
-* `vpc_security_group_ids` - The associated security groups in a non-default VPC.
-
-[1]: http://docs.aws.amazon.com/cli/latest/reference/ec2/describe-instances.html
diff --git a/website/source/docs/providers/aws/d/ip_ranges.html.markdown b/website/source/docs/providers/aws/d/ip_ranges.html.markdown
deleted file mode 100644
index 7d9d57a7a..000000000
--- a/website/source/docs/providers/aws/d/ip_ranges.html.markdown
+++ /dev/null
@@ -1,57 +0,0 @@
----
-layout: "aws"
-page_title: "AWS: aws_ip_ranges"
-sidebar_current: "docs-aws-datasource-ip_ranges"
-description: |-
- Get information on AWS IP ranges.
----
-
-# aws\_ip_ranges
-
-Use this data source to get the [IP ranges][1] of various AWS products and services.
-
-## Example Usage
-
-```hcl
-data "aws_ip_ranges" "european_ec2" {
- regions = ["eu-west-1", "eu-central-1"]
- services = ["ec2"]
-}
-
-resource "aws_security_group" "from_europe" {
- name = "from_europe"
-
- ingress {
- from_port = "443"
- to_port = "443"
- protocol = "tcp"
- cidr_blocks = ["${data.aws_ip_ranges.european_ec2.cidr_blocks}"]
- }
-
- tags {
- CreateDate = "${data.aws_ip_ranges.european_ec2.create_date}"
- SyncToken = "${data.aws_ip_ranges.european_ec2.sync_token}"
- }
-}
-```
-
-## Argument Reference
-
-* `regions` - (Optional) Filter IP ranges by regions (or include all regions, if
-omitted). Valid items are `global` (for `cloudfront`) as well as all AWS regions
-(e.g. `eu-central-1`)
-
-* `services` - (Required) Filter IP ranges by services. Valid items are `amazon`
-(for amazon.com), `cloudfront`, `ec2`, `route53`, `route53_healthchecks` and `S3`.
-
-~> **NOTE:** If the specified combination of regions and services does not yield any
-CIDR blocks, Terraform will fail.
-
-## Attributes Reference
-
-* `cidr_blocks` - The lexically ordered list of CIDR blocks.
-* `create_date` - The publication time of the IP ranges (e.g. `2016-08-03-23-46-05`).
-* `sync_token` - The publication time of the IP ranges, in Unix epoch time format
- (e.g. `1470267965`).
-
-[1]: http://docs.aws.amazon.com/general/latest/gr/aws-ip-ranges.html
diff --git a/website/source/docs/providers/aws/d/kinesis_stream.html.markdown b/website/source/docs/providers/aws/d/kinesis_stream.html.markdown
deleted file mode 100644
index 1784eca18..000000000
--- a/website/source/docs/providers/aws/d/kinesis_stream.html.markdown
+++ /dev/null
@@ -1,45 +0,0 @@
----
-layout: "aws"
-page_title: "AWS: aws_kinesis_stream"
-sidebar_current: "docs-aws-datasource-kinesis-stream"
-description: |-
- Provides a Kinesis Stream data source.
----
-
-# aws\_kinesis\_stream
-
-Use this data source to get information about a Kinesis Stream for use in other
-resources.
-
-For more details, see the [Amazon Kinesis Documentation][1].
-
-## Example Usage
-
-```
-data "aws_kinesis_stream" "stream" {
- name = "stream-name"
-}
-```
-
-## Argument Reference
-
-* `name` - (Required) The name of the Kinesis Stream.
-
-## Attributes Reference
-
-`id` is set to the Amazon Resource Name (ARN) of the Kinesis Stream. In addition, the following attributes
-are exported:
-
-* `arn` - The Amazon Resource Name (ARN) of the Kinesis Stream (same as id).
-* `name` - The name of the Kinesis Stream.
-* `creation_timestamp` - The approximate UNIX timestamp that the stream was created.
-* `status` - The current status of the stream. The stream status is one of CREATING, DELETING, ACTIVE, or UPDATING.
-* `retention_period` - Length of time (in hours) data records are accessible after they are added to the stream.
-* `open_shards` - The list of shard ids in the OPEN state. See [Shard State][2] for more.
-* `closed_shards` - The list of shard ids in the CLOSED state. See [Shard State][2] for more.
-* `shard_level_metrics` - A list of shard-level CloudWatch metrics which are enabled for the stream. See [Monitoring with CloudWatch][3] for more.
-* `tags` - A mapping of tags to assigned to the stream.
-
-[1]: https://aws.amazon.com/documentation/kinesis/
-[2]: https://docs.aws.amazon.com/streams/latest/dev/kinesis-using-sdk-java-after-resharding.html#kinesis-using-sdk-java-resharding-data-routing
-[3]: https://docs.aws.amazon.com/streams/latest/dev/monitoring-with-cloudwatch.html
\ No newline at end of file
diff --git a/website/source/docs/providers/aws/d/kms_alias.html.markdown b/website/source/docs/providers/aws/d/kms_alias.html.markdown
deleted file mode 100644
index b37e77488..000000000
--- a/website/source/docs/providers/aws/d/kms_alias.html.markdown
+++ /dev/null
@@ -1,30 +0,0 @@
----
-layout: "aws"
-page_title: "AWS: aws_kms_alias"
-sidebar_current: "docs-aws-datasource-kms-alias"
-description: |-
- Get information on a AWS Key Management Service (KMS) Alias
----
-
-# aws\_kms\_alias
-
-Use this data source to get the ARN of a KMS key alias.
-By using this data source, you can reference key alias
-without having to hard code the ARN as input.
-
-## Example Usage
-
-```hcl
-data "aws_kms_alias" "s3" {
- name = "alias/aws/s3"
-}
-```
-
-## Argument Reference
-
-* `name` - (Required) The display name of the alias. The name must start with the word "alias" followed by a forward slash (alias/)
-
-## Attributes Reference
-
-* `arn` - The Amazon Resource Name(ARN) of the key alias.
-* `target_key_id` - Key identifier pointed to by the alias.
diff --git a/website/source/docs/providers/aws/d/kms_ciphertext.html.markdown b/website/source/docs/providers/aws/d/kms_ciphertext.html.markdown
deleted file mode 100644
index f27a5af47..000000000
--- a/website/source/docs/providers/aws/d/kms_ciphertext.html.markdown
+++ /dev/null
@@ -1,48 +0,0 @@
----
-layout: "aws"
-page_title: "AWS: aws_kms_ciphertext"
-sidebar_current: "docs-aws-datasource-kms-ciphertext"
-description: |-
- Provides ciphertext encrypted using a KMS key
----
-
-# aws\_kms\_ciphertext
-
-The KMS ciphertext data source allows you to encrypt plaintext into ciphertext
-by using an AWS KMS customer master key.
-
-~> **Note:** All arguments including the plaintext be stored in the raw state as plain-text.
-[Read more about sensitive data in state](/docs/state/sensitive-data.html).
-
-## Example Usage
-
-```hcl
-resource "aws_kms_key" "oauth_config" {
- description = "oauth config"
- is_enabled = true
-}
-
-data "aws_kms_ciphertext" "oauth" {
- key_id = "${aws_kms_key.oauth_config.key_id}"
- plaintext = < **NOTE**: Using this data provider will allow you to conceal secret data within your
-resource definitions but does not take care of protecting that data in the
-logging output, plan output or state output.
-
-Please take care to secure your secret data outside of resource definitions.
-
-## Example Usage
-
-First, let's encrypt a password with KMS using the [AWS CLI
-tools](http://docs.aws.amazon.com/cli/latest/reference/kms/encrypt.html). This
-requires you to have your AWS CLI setup correctly, and you would replace the
-key-id with your own.
-
-```
-$ echo 'master-password' > plaintext-password
-$ aws kms encrypt \
-> --key-id ab123456-c012-4567-890a-deadbeef123 \
-> --plaintext fileb://plaintext-example \
-> --encryption-context foo=bar \
-> --output text --query CiphertextBlob
-AQECAHgaPa0J8WadplGCqqVAr4HNvDaFSQ+NaiwIBhmm6qDSFwAAAGIwYAYJKoZIhvcNAQcGoFMwUQIBADBMBgkqhkiG9w0BBwEwHgYJYIZIAWUDBAEuMBEEDI+LoLdvYv8l41OhAAIBEIAfx49FFJCLeYrkfMfAw6XlnxP23MmDBdqP8dPp28OoAQ==
-```
-
-Now, take that output and add it to your resource definitions.
-
-```hcl
-data "aws_kms_secret" "db" {
- secret {
- name = "master_password"
- payload = "AQECAHgaPa0J8WadplGCqqVAr4HNvDaFSQ+NaiwIBhmm6qDSFwAAAGIwYAYJKoZIhvcNAQcGoFMwUQIBADBMBgkqhkiG9w0BBwEwHgYJYIZIAWUDBAEuMBEEDI+LoLdvYv8l41OhAAIBEIAfx49FFJCLeYrkfMfAw6XlnxP23MmDBdqP8dPp28OoAQ=="
-
- context {
- foo = "bar"
- }
- }
-}
-
-resource "aws_rds_cluster" "rds" {
- master_username = "root"
- master_password = "${data.aws_kms_secret.db.master_password}"
-
- # ...
-}
-```
-
-And your RDS cluster would have the root password set to "master-password"
-
-## Argument Reference
-
-The following arguments are supported:
-
-* `secret` - (Required) One or more encrypted payload definitions from the KMS
- service. See the Secret Definitions below.
-
-
-### Secret Definitions
-
-Each secret definition supports the following arguments:
-
-* `name` - (Required) The name to export this secret under in the attributes.
-* `payload` - (Required) Base64 encoded payload, as returned from a KMS encrypt
- opertation.
-* `context` - (Optional) An optional mapping that makes up the Encryption
- Context for the secret.
-* `grant_tokens` (Optional) An optional list of Grant Tokens for the secret.
-
-For more information on `context` and `grant_tokens` see the [KMS
-Concepts](http://docs.aws.amazon.com/kms/latest/developerguide/concepts.html)
-
-## Attributes Reference
-
-Each `secret` defined is exported under its `name` as a top-level attribute.
diff --git a/website/source/docs/providers/aws/d/partition.html.markdown b/website/source/docs/providers/aws/d/partition.html.markdown
deleted file mode 100644
index a6efe5b4b..000000000
--- a/website/source/docs/providers/aws/d/partition.html.markdown
+++ /dev/null
@@ -1,39 +0,0 @@
----
-layout: "aws"
-page_title: "AWS: aws_partition"
-sidebar_current: "docs-aws-datasource-partition"
-description: |-
- Get AWS partition identifier
----
-
-# aws\_partition
-
-Use this data source to lookup current AWS partition in which Terraform is working
-
-## Example Usage
-
-```hcl
-data "aws_partition" "current" {}
-
-data "aws_iam_policy_document" "s3_policy" {
- statement {
- sid = "1"
-
- actions = [
- "s3:ListBucket",
- ]
-
- resources = [
- "arn:${data.aws_partition.current.partition}:s3:::my-bucket",
- ]
- }
-}
-```
-
-## Argument Reference
-
-There are no arguments available for this data source.
-
-## Attributes Reference
-
-`partition` is set to the identifier of the current partition.
diff --git a/website/source/docs/providers/aws/d/prefix_list.html.markdown b/website/source/docs/providers/aws/d/prefix_list.html.markdown
deleted file mode 100644
index 7dcd62129..000000000
--- a/website/source/docs/providers/aws/d/prefix_list.html.markdown
+++ /dev/null
@@ -1,66 +0,0 @@
----
-layout: "aws"
-page_title: "AWS: aws_prefix-list"
-sidebar_current: "docs-aws-datasource-prefix-list"
-description: |-
- Provides details about a specific prefix list
----
-
-# aws\_prefix\_list
-
-`aws_prefix_list` provides details about a specific prefix list (PL)
-in the current region.
-
-This can be used both to validate a prefix list given in a variable
-and to obtain the CIDR blocks (IP address ranges) for the associated
-AWS service. The latter may be useful e.g. for adding network ACL
-rules.
-
-## Example Usage
-
-```hcl
-resource "aws_vpc_endpoint" "private_s3" {
- vpc_id = "${aws_vpc.foo.id}"
- service_name = "com.amazonaws.us-west-2.s3"
-}
-
-data "aws_prefix_list" "private_s3" {
- prefix_list_id = "${aws_vpc_endpoint.private_s3.prefix_list_id}"
-}
-
-resource "aws_network_acl" "bar" {
- vpc_id = "${aws_vpc.foo.id}"
-}
-
-resource "aws_network_acl_rule" "private_s3" {
- network_acl_id = "${aws_network_acl.bar.id}"
- rule_number = 200
- egress = false
- protocol = "tcp"
- rule_action = "allow"
- cidr_block = "${data.aws_prefix_list.private_s3.cidr_blocks[0]}"
- from_port = 443
- to_port = 443
-}
-```
-
-## Argument Reference
-
-The arguments of this data source act as filters for querying the available
-prefix lists. The given filters must match exactly one prefix list
-whose data will be exported as attributes.
-
-* `prefix_list_id` - (Optional) The ID of the prefix list to select.
-
-* `name` - (Optional) The name of the prefix list to select.
-
-## Attributes Reference
-
-The following attributes are exported:
-
-* `id` - The ID of the selected prefix list.
-
-* `name` - The name of the selected prefix list.
-
-* `cidr_blocks` - The list of CIDR blocks for the AWS service associated
-with the prefix list.
diff --git a/website/source/docs/providers/aws/d/redshift_service_account.html.markdown b/website/source/docs/providers/aws/d/redshift_service_account.html.markdown
deleted file mode 100644
index 815d676ba..000000000
--- a/website/source/docs/providers/aws/d/redshift_service_account.html.markdown
+++ /dev/null
@@ -1,58 +0,0 @@
----
-layout: "aws"
-page_title: "AWS: aws_redshift_service_account"
-sidebar_current: "docs-aws-datasource-redshift-service-account"
-description: |-
- Get AWS Redshift Service Account ID for storing audit data in S3.
----
-
-# aws\_redshift\_service\_account
-
-Use this data source to get the Service Account ID of the [AWS Redshift Account](http://docs.aws.amazon.com/redshift/latest/mgmt/db-auditing.html#db-auditing-enable-logging)
-in a given region for the purpose of allowing Redshift to store audit data in S3.
-
-## Example Usage
-
-```hcl
-data "aws_redshift_service_account" "main" {}
-
-resource "aws_s3_bucket" "bucket" {
- bucket = "tf-redshift-logging-test-bucket"
- force_destroy = true
-
- policy = < **Note:** The content of an object (`body` field) is available only for objects which have a human-readable `Content-Type` (`text/*` and `application/json`). This is to prevent printing unsafe characters and potentially downloading large amount of data which would be thrown away in favour of metadata.
-
-## Example Usage
-
-```hcl
-data "aws_s3_bucket_object" "lambda" {
- bucket = "my-lambda-functions"
- key = "hello-world.zip"
-}
-
-resource "aws_iam_role" "iam_for_lambda" {
- name = "iam_for_lambda"
-
- assume_role_policy = < **Note:** The [default security group for a VPC](http://docs.aws.amazon.com/AmazonVPC/latest/UserGuide/VPC_SecurityGroups.html#DefaultSecurityGroup) has the name `default`.
diff --git a/website/source/docs/providers/aws/d/sns_topic.html.markdown b/website/source/docs/providers/aws/d/sns_topic.html.markdown
deleted file mode 100644
index edbe0577e..000000000
--- a/website/source/docs/providers/aws/d/sns_topic.html.markdown
+++ /dev/null
@@ -1,29 +0,0 @@
----
-layout: "aws"
-page_title: "AWS: aws_sns_topic"
-sidebar_current: "docs-aws-datasource-sns-topic"
-description: |-
- Get information on a Amazon Simple Notification Service (SNS) Topic
----
-
-# aws\_sns\_topic
-
-Use this data source to get the ARN of a topic in AWS Simple Notification
-Service (SNS). By using this data source, you can reference SNS topics
-without having to hard code the ARNs as input.
-
-## Example Usage
-
-```hcl
-data "aws_sns_topic" "example" {
- name = "an_example_topic"
-}
-```
-
-## Argument Reference
-
-* `name` - (Required) The friendly name of the topic to match.
-
-## Attributes Reference
-
-* `arn` - Set to the ARN of the found topic, suitable for referencing in other resources that support SNS topics.
diff --git a/website/source/docs/providers/aws/d/ssm_parameter.html.markdown b/website/source/docs/providers/aws/d/ssm_parameter.html.markdown
deleted file mode 100644
index cd1fc9114..000000000
--- a/website/source/docs/providers/aws/d/ssm_parameter.html.markdown
+++ /dev/null
@@ -1,37 +0,0 @@
----
-layout: "aws"
-page_title: "AWS: aws_ssm_parameter"
-sidebar_current: "docs-aws-datasource-ssm-parameter"
-description: |-
- Provides a SSM Parameter datasource
----
-
-# aws\_ssm\_parameter
-
-Provides an SSM Parameter data source.
-
-## Example Usage
-
-To store a basic string parameter:
-
-```hcl
-data "aws_ssm_parameter" "foo" {
- name = "foo"
-}
-```
-
-~> **Note:** The unencrypted value of a SecureString will be stored in the raw state as plain-text.
-[Read more about sensitive data in state](/docs/state/sensitive-data.html).
-
-## Argument Reference
-
-The following arguments are supported:
-
-* `name` - (Required) The name of the parameter.
-
-
-The following attributes are exported:
-
-* `name` - (Required) The name of the parameter.
-* `type` - (Required) The type of the parameter. Valid types are `String`, `StringList` and `SecureString`.
-* `value` - (Required) The value of the parameter.
diff --git a/website/source/docs/providers/aws/d/subnet.html.markdown b/website/source/docs/providers/aws/d/subnet.html.markdown
deleted file mode 100644
index ea412400c..000000000
--- a/website/source/docs/providers/aws/d/subnet.html.markdown
+++ /dev/null
@@ -1,83 +0,0 @@
----
-layout: "aws"
-page_title: "AWS: aws_subnet"
-sidebar_current: "docs-aws-datasource-subnet-x"
-description: |-
- Provides details about a specific VPC subnet
----
-
-# aws\_subnet
-
-`aws_subnet` provides details about a specific VPC subnet.
-
-This resource can prove useful when a module accepts a subnet id as
-an input variable and needs to, for example, determine the id of the
-VPC that the subnet belongs to.
-
-## Example Usage
-
-The following example shows how one might accept a subnet id as a variable
-and use this data source to obtain the data necessary to create a security
-group that allows connections from hosts in that subnet.
-
-```hcl
-variable "subnet_id" {}
-
-data "aws_subnet" "selected" {
- id = "${var.subnet_id}"
-}
-
-resource "aws_security_group" "subnet" {
- vpc_id = "${data.aws_subnet.selected.vpc_id}"
-
- ingress {
- cidr_blocks = ["${data.aws_subnet.selected.cidr_block}"]
- from_port = 80
- to_port = 80
- protocol = "tcp"
- }
-}
-```
-
-## Argument Reference
-
-The arguments of this data source act as filters for querying the available
-subnets in the current region. The given filters must match exactly one
-subnet whose data will be exported as attributes.
-
-* `availability_zone` - (Optional) The availability zone where the
- subnet must reside.
-
-* `cidr_block` - (Optional) The cidr block of the desired subnet.
-
-* `ipv6_cidr_block` - (Optional) The Ipv6 cidr block of the desired subnet
-
-* `default_for_az` - (Optional) Boolean constraint for whether the desired
- subnet must be the default subnet for its associated availability zone.
-
-* `filter` - (Optional) Custom filter block as described below.
-
-* `id` - (Optional) The id of the specific subnet to retrieve.
-
-* `state` - (Optional) The state that the desired subnet must have.
-
-* `tags` - (Optional) A mapping of tags, each pair of which must exactly match
- a pair on the desired subnet.
-
-* `vpc_id` - (Optional) The id of the VPC that the desired subnet belongs to.
-
-More complex filters can be expressed using one or more `filter` sub-blocks,
-which take the following arguments:
-
-* `name` - (Required) The name of the field to filter by, as defined by
- [the underlying AWS API](http://docs.aws.amazon.com/AWSEC2/latest/APIReference/API_DescribeSubnets.html).
-
-* `values` - (Required) Set of values that are accepted for the given field.
- A subnet will be selected if any one of the given values matches.
-
-## Attributes Reference
-
-All of the argument attributes except `filter` blocks are also exported as
-result attributes. This data source will complete the data by populating
-any fields that are not included in the configuration with the data for
-the selected subnet.
diff --git a/website/source/docs/providers/aws/d/subnet_ids.html.markdown b/website/source/docs/providers/aws/d/subnet_ids.html.markdown
deleted file mode 100644
index 000161258..000000000
--- a/website/source/docs/providers/aws/d/subnet_ids.html.markdown
+++ /dev/null
@@ -1,63 +0,0 @@
----
-layout: "aws"
-page_title: "AWS: aws_subnet_ids"
-sidebar_current: "docs-aws-datasource-subnet-ids"
-description: |-
- Provides a list of subnet Ids for a VPC
----
-
-# aws\_subnet\_ids
-
-`aws_subnet_ids` provides a list of ids for a vpc_id
-
-This resource can be useful for getting back a list of subnet ids for a vpc.
-
-## Example Usage
-
-The following shows outputing all cidr blocks for every subnet id in a vpc.
-
-```hcl
-data "aws_subnet_ids" "example" {
- vpc_id = "${var.vpc_id}"
-}
-
-data "aws_subnet" "example" {
- count = "${length(data.aws_subnet_ids.example.ids)}"
- id = "${data.aws_subnet_ids.example.ids[count.index]}"
-}
-
-output "subnet_cidr_blocks" {
- value = ["${data.aws_subnet.example.*.cidr_block}"]
-}
-```
-
-The following example retrieves a list of all subnets in a VPC with a custom
-tag of `Tier` set to a value of "Private" so that the `aws_instance` resource
-can loop through the subnets, putting instances across availability zones.
-
-```hcl
-data "aws_subnet_ids" "private" {
- vpc_id = "${var.vpc_id}"
- tags {
- Tier = "Private"
- }
-}
-
-resource "aws_instance" "app" {
- count = "3"
- ami = "${var.ami}"
- instance_type = "t2.micro"
- subnet_id = "${element(data.aws_subnet_ids.private.ids, count.index)}"
-}
-```
-
-## Argument Reference
-
-* `vpc_id` - (Required) The VPC ID that you want to filter from.
-
-* `tags` - (Optional) A mapping of tags, each pair of which must exactly match
- a pair on the desired subnets.
-
-## Attributes Reference
-
-* `ids` - Is a list of all the subnet ids found. If none found. This data source will fail out.
diff --git a/website/source/docs/providers/aws/d/vpc.html.markdown b/website/source/docs/providers/aws/d/vpc.html.markdown
deleted file mode 100644
index efc153438..000000000
--- a/website/source/docs/providers/aws/d/vpc.html.markdown
+++ /dev/null
@@ -1,83 +0,0 @@
----
-layout: "aws"
-page_title: "AWS: aws_vpc"
-sidebar_current: "docs-aws-datasource-vpc-x"
-description: |-
- Provides details about a specific VPC
----
-
-# aws\_vpc
-
-`aws_vpc` provides details about a specific VPC.
-
-This resource can prove useful when a module accepts a vpc id as
-an input variable and needs to, for example, determine the CIDR block of that
-VPC.
-
-## Example Usage
-
-The following example shows how one might accept a VPC id as a variable
-and use this data source to obtain the data necessary to create a subnet
-within it.
-
-```hcl
-variable "vpc_id" {}
-
-data "aws_vpc" "selected" {
- id = "${var.vpc_id}"
-}
-
-resource "aws_subnet" "example" {
- vpc_id = "${data.aws_vpc.selected.id}"
- availability_zone = "us-west-2a"
- cidr_block = "${cidrsubnet(data.aws_vpc.selected.cidr_block, 4, 1)}"
-}
-```
-
-## Argument Reference
-
-The arguments of this data source act as filters for querying the available
-VPCs in the current region. The given filters must match exactly one
-VPC whose data will be exported as attributes.
-
-* `cidr_block` - (Optional) The cidr block of the desired VPC.
-
-* `dhcp_options_id` - (Optional) The DHCP options id of the desired VPC.
-
-* `default` - (Optional) Boolean constraint on whether the desired VPC is
- the default VPC for the region.
-
-* `filter` - (Optional) Custom filter block as described below.
-
-* `id` - (Optional) The id of the specific VPC to retrieve.
-
-* `state` - (Optional) The current state of the desired VPC.
- Can be either `"pending"` or `"available"`.
-
-* `tags` - (Optional) A mapping of tags, each pair of which must exactly match
- a pair on the desired VPC.
-
-More complex filters can be expressed using one or more `filter` sub-blocks,
-which take the following arguments:
-
-* `name` - (Required) The name of the field to filter by, as defined by
- [the underlying AWS API](http://docs.aws.amazon.com/AWSEC2/latest/APIReference/API_DescribeVpcs.html).
-
-* `values` - (Required) Set of values that are accepted for the given field.
- A VPC will be selected if any one of the given values matches.
-
-## Attributes Reference
-
-All of the argument attributes except `filter` blocks are also exported as
-result attributes. This data source will complete the data by populating
-any fields that are not included in the configuration with the data for
-the selected VPC.
-
-The following attribute is additionally exported:
-
-* `instance_tenancy` - The allowed tenancy of instances launched into the
- selected VPC. May be any of `"default"`, `"dedicated"`, or `"host"`.
-
-* `ipv6_association_id` - The association ID for the IPv6 CIDR block.
-
-* `ipv6_cidr_block` - The IPv6 CIDR block.
diff --git a/website/source/docs/providers/aws/d/vpc_endpoint.html.markdown b/website/source/docs/providers/aws/d/vpc_endpoint.html.markdown
deleted file mode 100644
index cd944859f..000000000
--- a/website/source/docs/providers/aws/d/vpc_endpoint.html.markdown
+++ /dev/null
@@ -1,48 +0,0 @@
----
-layout: "aws"
-page_title: "AWS: aws_vpc_endpoint"
-sidebar_current: "docs-aws-datasource-vpc-endpoint-x"
-description: |-
- Provides details about a specific VPC endpoint.
----
-
-# aws\_vpc\_endpoint
-
-The VPC Endpoint data source provides details about
-a specific VPC endpoint.
-
-## Example Usage
-
-```hcl
-# Declare the data source
-data "aws_vpc_endpoint" "s3" {
- vpc_id = "${aws_vpc.foo.id}"
- service_name = "com.amazonaws.us-west-2.s3"
-}
-
-resource "aws_vpc_endpoint_route_table_association" "private_s3" {
- vpc_endpoint_id = "${data.aws_vpc_endpoint.s3.id}"
- route_table_id = "${aws_route_table.private.id}"
-}
-```
-
-## Argument Reference
-
-The arguments of this data source act as filters for querying the available VPC endpoints.
-The given filters must match exactly one VPC endpoint whose data will be exported as attributes.
-
-* `id` - (Optional) The ID of the specific VPC Endpoint to retrieve.
-
-* `state` - (Optional) The state of the specific VPC Endpoint to retrieve.
-
-* `vpc_id` - (Optional) The ID of the VPC in which the specific VPC Endpoint is used.
-
-* `service_name` - (Optional) The AWS service name of the specific VPC Endpoint to retrieve.
-
-## Attributes Reference
-
-All of the argument attributes are also exported as result attributes.
-
-* `policy` - The policy document associated with the VPC Endpoint.
-
-* `route_table_ids` - One or more route tables associated with the VPC Endpoint.
diff --git a/website/source/docs/providers/aws/d/vpc_endpoint_service.html.markdown b/website/source/docs/providers/aws/d/vpc_endpoint_service.html.markdown
deleted file mode 100644
index cac062fa7..000000000
--- a/website/source/docs/providers/aws/d/vpc_endpoint_service.html.markdown
+++ /dev/null
@@ -1,46 +0,0 @@
----
-layout: "aws"
-page_title: "AWS: aws_vpc_endpoint_service"
-sidebar_current: "docs-aws-datasource-vpc-endpoint-service"
-description: |-
- Provides details about a specific AWS service that can be specified when creating a VPC endpoint.
----
-
-# aws\_vpc\_endpoint\_service
-
-The VPC Endpoint Service data source allows access to a specific AWS
-service that can be specified when creating a VPC endpoint within the region
-configured in the provider.
-
-## Example Usage
-
-```hcl
-# Declare the data source
-data "aws_vpc_endpoint_service" "s3" {
- service = "s3"
-}
-
-# Create a VPC
-resource "aws_vpc" "foo" {
- cidr_block = "10.0.0.0/16"
-}
-
-# Create a VPC endpoint
-resource "aws_vpc_endpoint" "ep" {
- vpc_id = "${aws_vpc.foo.id}"
- service_name = "${data.aws_vpc_endpoint_service.s3.service_name}"
-}
-```
-
-## Argument Reference
-
-The arguments of this data source act as filters for querying the available VPC endpoint services.
-The given filters must match exactly one VPC endpoint service whose data will be exported as attributes.
-
-* `service` - (Required) The common name of the AWS service (e.g. `s3`).
-
-## Attributes Reference
-
-The following attributes are exported:
-
-* `service_name` - The service name of the AWS service that can be specified when creating a VPC endpoint.
diff --git a/website/source/docs/providers/aws/d/vpc_peering_connection.html.markdown b/website/source/docs/providers/aws/d/vpc_peering_connection.html.markdown
deleted file mode 100644
index 535828e5f..000000000
--- a/website/source/docs/providers/aws/d/vpc_peering_connection.html.markdown
+++ /dev/null
@@ -1,90 +0,0 @@
----
-layout: "aws"
-page_title: "AWS: aws_vpc_peering_connection"
-sidebar_current: "docs-aws-datasource-vpc-peering-connection"
-description: |-
- Provides details about a specific VPC peering connection.
----
-
-# aws\_vpc\_peering\_connection
-
-The VPC Peering Connection data source provides details about
-a specific VPC peering connection.
-
-## Example Usage
-
-```hcl
-# Declare the data source
-data "aws_vpc_peering_connection" "pc" {
- vpc_id = "${aws_vpc.foo.id}"
- peer_cidr_block = "10.0.1.0/22"
-}
-
-# Create a route table
-resource "aws_route_table" "rt" {
- vpc_id = "${aws_vpc.foo.id}"
-}
-
-# Create a route
-resource "aws_route" "r" {
- route_table_id = "${aws_route_table.rt.id}"
- destination_cidr_block = "${data.aws_vpc_peering_connection.pc.peer_cidr_block}"
- vpc_peering_connection_id = "${data.aws_vpc_peering_connection.pc.id}"
-}
-```
-
-## Argument Reference
-
-The arguments of this data source act as filters for querying the available VPC peering connection.
-The given filters must match exactly one VPC peering connection whose data will be exported as attributes.
-
-* `id` - (Optional) The ID of the specific VPC Peering Connection to retrieve.
-
-* `status` - (Optional) The status of the specific VPC Peering Connection to retrieve.
-
-* `vpc_id` - (Optional) The ID of the requester VPC of the specific VPC Peering Connection to retrieve.
-
-* `owner_id` - (Optional) The AWS account ID of the owner of the requester VPC of the specific VPC Peering Connection to retrieve.
-
-* `cidr_block` - (Optional) The CIDR block of the requester VPC of the specific VPC Peering Connection to retrieve.
-
-* `peer_vpc_id` - (Optional) The ID of the accepter VPC of the specific VPC Peering Connection to retrieve.
-
-* `peer_owner_id` - (Optional) The AWS account ID of the owner of the accepter VPC of the specific VPC Peering Connection to retrieve.
-
-* `peer_cidr_block` - (Optional) The CIDR block of the accepter VPC of the specific VPC Peering Connection to retrieve.
-
-* `filter` - (Optional) Custom filter block as described below.
-
-* `tags` - (Optional) A mapping of tags, each pair of which must exactly match
- a pair on the desired VPC Peering Connection.
-
-More complex filters can be expressed using one or more `filter` sub-blocks,
-which take the following arguments:
-
-* `name` - (Required) The name of the field to filter by, as defined by
- [the underlying AWS API](http://docs.aws.amazon.com/AWSEC2/latest/APIReference/API_DescribeVpcPeeringConnections.html).
-
-* `values` - (Required) Set of values that are accepted for the given field.
- A VPC Peering Connection will be selected if any one of the given values matches.
-
-## Attributes Reference
-
-All of the argument attributes except `filter` are also exported as result attributes.
-
-* `accepter` - A configuration block that describes [VPC Peering Connection]
-(http://docs.aws.amazon.com/AmazonVPC/latest/PeeringGuide) options set for the accepter VPC.
-
-* `requester` - A configuration block that describes [VPC Peering Connection]
-(http://docs.aws.amazon.com/AmazonVPC/latest/PeeringGuide) options set for the requester VPC.
-
-#### Accepter and Requester Attributes Reference
-
-* `allow_remote_vpc_dns_resolution` - Indicates whether a local VPC can resolve public DNS hostnames to
-private IP addresses when queried from instances in a peer VPC.
-
-* `allow_classic_link_to_remote_vpc` - Indicates whether a local ClassicLink connection can communicate
-with the peer VPC over the VPC peering connection.
-
-* `allow_vpc_to_remote_classic_link` - Indicates whether a local VPC can communicate with a ClassicLink
-connection in the peer VPC over the VPC peering connection.
diff --git a/website/source/docs/providers/aws/d/vpn_gateway.html.markdown b/website/source/docs/providers/aws/d/vpn_gateway.html.markdown
deleted file mode 100644
index c20338beb..000000000
--- a/website/source/docs/providers/aws/d/vpn_gateway.html.markdown
+++ /dev/null
@@ -1,58 +0,0 @@
----
-layout: "aws"
-page_title: "AWS: aws_vpn_gateway"
-sidebar_current: "docs-aws-datasource-vpn-gateway"
-description: |-
- Provides details about a specific VPN gateway.
----
-
-# aws\_vpn\_gateway
-
-The VPN Gateway data source provides details about
-a specific VPN gateway.
-
-## Example Usage
-
-```hcl
-data "aws_vpn_gateway" "selected" {
- filter {
- name = "tag:Name"
- values = ["vpn-gw"]
- }
-}
-
-output "vpn_gateway_id" {
- value = "${data.aws_vpn_gateway.selected.id}"
-}
-```
-
-## Argument Reference
-
-The arguments of this data source act as filters for querying the available VPN gateways.
-The given filters must match exactly one VPN gateway whose data will be exported as attributes.
-
-* `id` - (Optional) The ID of the specific VPN Gateway to retrieve.
-
-* `state` - (Optional) The state of the specific VPN Gateway to retrieve.
-
-* `availability_zone` - (Optional) The Availability Zone of the specific VPN Gateway to retrieve.
-
-* `attached_vpc_id` - (Optional) The ID of a VPC attached to the specific VPN Gateway to retrieve.
-
-* `filter` - (Optional) Custom filter block as described below.
-
-* `tags` - (Optional) A mapping of tags, each pair of which must exactly match
- a pair on the desired VPN Gateway.
-
-More complex filters can be expressed using one or more `filter` sub-blocks,
-which take the following arguments:
-
-* `name` - (Required) The name of the field to filter by, as defined by
- [the underlying AWS API](http://docs.aws.amazon.com/AWSEC2/latest/APIReference/API_DescribeVpnGateways.html).
-
-* `values` - (Required) Set of values that are accepted for the given field.
- A VPN Gateway will be selected if any one of the given values matches.
-
-## Attributes Reference
-
-All of the argument attributes are also exported as result attributes.
diff --git a/website/source/docs/providers/aws/index.html.markdown b/website/source/docs/providers/aws/index.html.markdown
deleted file mode 100644
index 75104bdc4..000000000
--- a/website/source/docs/providers/aws/index.html.markdown
+++ /dev/null
@@ -1,314 +0,0 @@
----
-layout: "aws"
-page_title: "Provider: AWS"
-sidebar_current: "docs-aws-index"
-description: |-
- The Amazon Web Services (AWS) provider is used to interact with the many resources supported by AWS. The provider needs to be configured with the proper credentials before it can be used.
----
-
-# AWS Provider
-
-The Amazon Web Services (AWS) provider is used to interact with the
-many resources supported by AWS. The provider needs to be configured
-with the proper credentials before it can be used.
-
-Use the navigation to the left to read about the available resources.
-
-## Example Usage
-
-```hcl
-# Configure the AWS Provider
-provider "aws" {
- access_key = "${var.aws_access_key}"
- secret_key = "${var.aws_secret_key}"
- region = "us-east-1"
-}
-
-# Create a web server
-resource "aws_instance" "web" {
- # ...
-}
-```
-
-## Authentication
-
-The AWS provider offers a flexible means of providing credentials for
-authentication. The following methods are supported, in this order, and
-explained below:
-
-- Static credentials
-- Environment variables
-- Shared credentials file
-- EC2 Role
-
-### Static credentials ###
-
-Static credentials can be provided by adding an `access_key` and `secret_key` in-line in the
-AWS provider block:
-
-Usage:
-
-```hcl
-provider "aws" {
- region = "us-west-2"
- access_key = "anaccesskey"
- secret_key = "asecretkey"
-}
-```
-
-### Environment variables
-
-You can provide your credentials via the `AWS_ACCESS_KEY_ID` and
-`AWS_SECRET_ACCESS_KEY`, environment variables, representing your AWS
-Access Key and AWS Secret Key, respectively. The `AWS_DEFAULT_REGION`
-and `AWS_SESSION_TOKEN` environment variables are also used, if
-applicable:
-
-```hcl
-provider "aws" {}
-```
-
-Usage:
-
-```hcl
-$ export AWS_ACCESS_KEY_ID="anaccesskey"
-$ export AWS_SECRET_ACCESS_KEY="asecretkey"
-$ export AWS_DEFAULT_REGION="us-west-2"
-$ terraform plan
-```
-
-### Shared Credentials file
-
-You can use an AWS credentials file to specify your credentials. The
-default location is `$HOME/.aws/credentials` on Linux and OS X, or
-`"%USERPROFILE%\.aws\credentials"` for Windows users. If we fail to
-detect credentials inline, or in the environment, Terraform will check
-this location. You can optionally specify a different location in the
-configuration by providing the `shared_credentials_file` attribute, or
-in the environment with the `AWS_SHARED_CREDENTIALS_FILE` variable. This
-method also supports a `profile` configuration and matching
-`AWS_PROFILE` environment variable:
-
-Usage:
-
-```hcl
-provider "aws" {
- region = "us-west-2"
- shared_credentials_file = "/Users/tf_user/.aws/creds"
- profile = "customprofile"
-}
-```
-
-### EC2 Role
-
-If you're running Terraform from an EC2 instance with IAM Instance Profile
-using IAM Role, Terraform will just ask
-[the metadata API](http://docs.aws.amazon.com/AWSEC2/latest/UserGuide/iam-roles-for-amazon-ec2.html#instance-metadata-security-credentials)
-endpoint for credentials.
-
-This is a preferred approach over any other when running in EC2 as you can avoid
-hard coding credentials. Instead these are leased on-the-fly by Terraform
-which reduces the chance of leakage.
-
-You can provide the custom metadata API endpoint via the `AWS_METADATA_ENDPOINT` variable
-which expects the endpoint URL, including the version, and defaults to `http://169.254.169.254:80/latest`.
-
-### Assume role
-
-If provided with a role ARN, Terraform will attempt to assume this role
-using the supplied credentials.
-
-Usage:
-
-```hcl
-provider "aws" {
- assume_role {
- role_arn = "arn:aws:iam::ACCOUNT_ID:role/ROLE_NAME"
- session_name = "SESSION_NAME"
- external_id = "EXTERNAL_ID"
- }
-}
-```
-
-## Argument Reference
-
-The following arguments are supported in the `provider` block:
-
-* `access_key` - (Optional) This is the AWS access key. It must be provided, but
- it can also be sourced from the `AWS_ACCESS_KEY_ID` environment variable, or via
- a shared credentials file if `profile` is specified.
-
-* `secret_key` - (Optional) This is the AWS secret key. It must be provided, but
- it can also be sourced from the `AWS_SECRET_ACCESS_KEY` environment variable, or
- via a shared credentials file if `profile` is specified.
-
-* `region` - (Required) This is the AWS region. It must be provided, but
- it can also be sourced from the `AWS_DEFAULT_REGION` environment variables, or
- via a shared credentials file if `profile` is specified.
-
-* `profile` - (Optional) This is the AWS profile name as set in the shared credentials
- file.
-
-* `assume_role` - (Optional) An `assume_role` block (documented below). Only one
- `assume_role` block may be in the configuration.
-
-* `shared_credentials_file` = (Optional) This is the path to the shared credentials file.
- If this is not set and a profile is specified, `~/.aws/credentials` will be used.
-
-* `token` - (Optional) Use this to set an MFA token. It can also be sourced
- from the `AWS_SESSION_TOKEN` environment variable.
-
-* `max_retries` - (Optional) This is the maximum number of times an API
- call is retried, in the case where requests are being throttled or
- experiencing transient failures. The delay between the subsequent API
- calls increases exponentially.
-
-* `allowed_account_ids` - (Optional) List of allowed, white listed, AWS
- account IDs to prevent you from mistakenly using an incorrect one (and
- potentially end up destroying a live environment). Conflicts with
- `forbidden_account_ids`.
-
-* `forbidden_account_ids` - (Optional) List of forbidden, blacklisted,
- AWS account IDs to prevent you mistakenly using a wrong one (and
- potentially end up destroying a live environment). Conflicts with
- `allowed_account_ids`.
-
-* `insecure` - (Optional) Explicitly allow the provider to
- perform "insecure" SSL requests. If omitted, default value is `false`.
-
-* `skip_credentials_validation` - (Optional) Skip the credentials
- validation via the STS API. Useful for AWS API implementations that do
- not have STS available or implemented.
-
-* `skip_get_ec2_platforms` - (Optional) Skip getting the supported EC2
- platforms. Used by users that don't have ec2:DescribeAccountAttributes
- permissions.
-
-* `skip_region_validation` - (Optional) Skip validation of provided region name.
- Useful for AWS-like implementations that use their own region names
- or to bypass the validation for regions that aren't publicly available yet.
-
-* `skip_requesting_account_id` - (Optional) Skip requesting the account
- ID. Useful for AWS API implementations that do not have the IAM, STS
- API, or metadata API. When set to `true`, prevents you from managing
- any resource that requires Account ID to construct an ARN, e.g.
- - `aws_db_instance`
- - `aws_db_option_group`
- - `aws_db_parameter_group`
- - `aws_db_security_group`
- - `aws_db_subnet_group`
- - `aws_elasticache_cluster`
- - `aws_glacier_vault`
- - `aws_rds_cluster`
- - `aws_rds_cluster_instance`
- - `aws_rds_cluster_parameter_group`
- - `aws_redshift_cluster`
-
-* `skip_metadata_api_check` - (Optional) Skip the AWS Metadata API
- check. Useful for AWS API implementations that do not have a metadata
- API endpoint. Setting to `true` prevents Terraform from authenticating
- via the Metadata API. You may need to use other authentication methods
- like static credentials, configuration variables, or environment
- variables.
-
-* `s3_force_path_style` - (Optional) Set this to `true` to force the
- request to use path-style addressing, i.e.,
- `http://s3.amazonaws.com/BUCKET/KEY`. By default, the S3 client will use
- virtual hosted bucket addressing, `http://BUCKET.s3.amazonaws.com/KEY`,
- when possible. Specific to the Amazon S3 service.
-
-The nested `assume_role` block supports the following:
-
-* `role_arn` - (Required) The ARN of the role to assume.
-
-* `session_name` - (Optional) The session name to use when making the
- AssumeRole call.
-
-* `external_id` - (Optional) The external ID to use when making the
- AssumeRole call.
-
-* `policy` - (Optional) A more restrictive policy to apply to the temporary credentials.
-This gives you a way to further restrict the permissions for the resulting temporary
-security credentials. You cannot use the passed policy to grant permissions that are
-in excess of those allowed by the access policy of the role that is being assumed.
-
-Nested `endpoints` block supports the following:
-
-* `cloudwatch` - (Optional) Use this to override the default endpoint
- URL constructed from the `region`. It's typically used to connect to
- custom CloudWatch endpoints.
-
-* `cloudwatchevents` - (Optional) Use this to override the default endpoint
- URL constructed from the `region`. It's typically used to connect to
- custom CloudWatchEvents endpoints.
-
-* `cloudwatchlogs` - (Optional) Use this to override the default endpoint
- URL constructed from the `region`. It's typically used to connect to
- custom CloudWatchLogs endpoints.
-
-* `cloudformation` - (Optional) Use this to override the default endpoint
- URL constructed from the `region`. It's typically used to connect to
- custom CloudFormation endpoints.
-
-* `dynamodb` - (Optional) Use this to override the default endpoint
- URL constructed from the `region`. It's typically used to connect to
- `dynamodb-local`.
-
-* `kinesis` - (Optional) Use this to override the default endpoint
- URL constructed from the `region`. It's typically used to connect to
- `kinesalite`.
-
-* `kms` - (Optional) Use this to override the default endpoint
- URL constructed from the `region`. It's typically used to connect to
- custom KMS endpoints.
-
-* `iam` - (Optional) Use this to override the default endpoint
- URL constructed from the `region`. It's typically used to connect to
- custom IAM endpoints.
-
-* `ec2` - (Optional) Use this to override the default endpoint
- URL constructed from the `region`. It's typically used to connect to
- custom EC2 endpoints.
-
-* `elb` - (Optional) Use this to override the default endpoint
- URL constructed from the `region`. It's typically used to connect to
- custom ELB endpoints.
-
-* `rds` - (Optional) Use this to override the default endpoint
- URL constructed from the `region`. It's typically used to connect to
- custom RDS endpoints.
-
-* `s3` - (Optional) Use this to override the default endpoint
- URL constructed from the `region`. It's typically used to connect to
- custom S3 endpoints.
-
-* `sns` - (Optional) Use this to override the default endpoint
- URL constructed from the `region`. It's typically used to connect to
- custom SNS endpoints.
-
-* `sqs` - (Optional) Use this to override the default endpoint
- URL constructed from the `region`. It's typically used to connect to
- custom SQS endpoints.
-
-## Getting the Account ID
-
-If you use either `allowed_account_ids` or `forbidden_account_ids`,
-Terraform uses several approaches to get the actual account ID
-in order to compare it with allowed or forbidden IDs.
-
-Approaches differ per authentication providers:
-
- * EC2 instance w/ IAM Instance Profile - [Metadata API](http://docs.aws.amazon.com/AWSEC2/latest/UserGuide/ec2-instance-metadata.html)
- is always used. Introduced in Terraform `0.6.16`.
- * All other providers (environment variable, shared credentials file, ...)
- will try two approaches in the following order
- * `iam:GetUser` - Typically useful for IAM Users. It also means
- that each user needs to be privileged to call `iam:GetUser` for themselves.
- * `sts:GetCallerIdentity` - _Should_ work for both IAM Users and federated IAM Roles,
- introduced in Terraform `0.6.16`.
- * `iam:ListRoles` - This is specifically useful for IdP-federated profiles
- which cannot use `iam:GetUser`. It also means that each federated user
- need to be _assuming_ an IAM role which allows `iam:ListRoles`.
- Used in Terraform `0.6.16+`.
- There used to be no better way to get account ID out of the API
- when using federated account until `sts:GetCallerIdentity` was introduced.
diff --git a/website/source/docs/providers/aws/r/alb.html.markdown b/website/source/docs/providers/aws/r/alb.html.markdown
deleted file mode 100644
index 491a59cb6..000000000
--- a/website/source/docs/providers/aws/r/alb.html.markdown
+++ /dev/null
@@ -1,92 +0,0 @@
----
-layout: "aws"
-page_title: "AWS: aws_alb"
-sidebar_current: "docs-aws-resource-alb"
-description: |-
- Provides an Application Load Balancer resource.
----
-
-# aws\_alb
-
-Provides an Application Load Balancer resource.
-
-The official AWS CLI calls this "elbv2" while their documentation calls it
-an Application Load Balancer. Terraform uses "ALB" but they mean the same
-thing.
-
-## Example Usage
-
-```hcl
-# Create a new load balancer
-resource "aws_alb" "test" {
- name = "test-alb-tf"
- internal = false
- security_groups = ["${aws_security_group.alb_sg.id}"]
- subnets = ["${aws_subnet.public.*.id}"]
-
- enable_deletion_protection = true
-
- access_logs {
- bucket = "${aws_s3_bucket.alb_logs.bucket}"
- prefix = "test-alb"
- }
-
- tags {
- Environment = "production"
- }
-}
-```
-
-## Argument Reference
-
-The following arguments are supported:
-
-* `name` - (Optional) The name of the ALB. This name must be unique within your AWS account, can have a maximum of 32 characters,
-must contain only alphanumeric characters or hyphens, and must not begin or end with a hyphen. If not specified,
-Terraform will autogenerate a name beginning with `tf-lb`.
-* `name_prefix` - (Optional) Creates a unique name beginning with the specified prefix. Conflicts with `name`.
-* `internal` - (Optional) If true, the ALB will be internal.
-* `security_groups` - (Optional) A list of security group IDs to assign to the ALB.
-* `access_logs` - (Optional) An Access Logs block. Access Logs documented below.
-* `subnets` - (Required) A list of subnet IDs to attach to the ALB.
-* `idle_timeout` - (Optional) The time in seconds that the connection is allowed to be idle. Default: 60.
-* `enable_deletion_protection` - (Optional) If true, deletion of the load balancer will be disabled via
- the AWS API. This will prevent Terraform from deleting the load balancer. Defaults to `false`.
-* `ip_address_type` - (Optional) The type of IP addresses used by the subnets for your load balancer. The possible values are `ipv4` and `dualstack`
-* `tags` - (Optional) A mapping of tags to assign to the resource.
-
-~> **NOTE::** Please note that internal ALBs can only use `ipv4` as the ip_address_type. You can only change to `dualstack` ip_address_type if the selected subnets are IPv6 enabled.
-
-Access Logs (`access_logs`) support the following:
-
-* `bucket` - (Required) The S3 bucket name to store the logs in.
-* `prefix` - (Optional) The S3 bucket prefix. Logs are stored in the root if not configured.
-* `enabled` = (Optional) Boolean to enable / disable `access_logs`. Default is `true`
-
-## Attributes Reference
-
-The following attributes are exported in addition to the arguments listed above:
-
-* `id` - The ARN of the load balancer (matches `arn`).
-* `arn` - The ARN of the load balancer (matches `id`).
-* `arn_suffix` - The ARN suffix for use with CloudWatch Metrics.
-* `dns_name` - The DNS name of the load balancer.
-* `canonical_hosted_zone_id` - The canonical hosted zone ID of the load balancer.
-* `zone_id` - The canonical hosted zone ID of the load balancer (to be used in a Route 53 Alias record).
-
-## Timeouts
-
-`aws_alb` provides the following
-[Timeouts](/docs/configuration/resources.html#timeouts) configuration options:
-
-- `create` - (Default `10 minutes`) Used for Creating ALB
-- `update` - (Default `10 minutes`) Used for ALB modifications
-- `delete` - (Default `10 minutes`) Used for destroying ALB
-
-## Import
-
-ALBs can be imported using their ARN, e.g.
-
-```
-$ terraform import aws_alb.bar arn:aws:elasticloadbalancing:us-west-2:123456789012:loadbalancer/app/my-load-balancer/50dc6c495c0c9188
-```
diff --git a/website/source/docs/providers/aws/r/alb_listener.html.markdown b/website/source/docs/providers/aws/r/alb_listener.html.markdown
deleted file mode 100644
index 53e75d795..000000000
--- a/website/source/docs/providers/aws/r/alb_listener.html.markdown
+++ /dev/null
@@ -1,68 +0,0 @@
----
-layout: "aws"
-page_title: "AWS: aws_alb_listener"
-sidebar_current: "docs-aws-resource-alb-listener"
-description: |-
- Provides an Application Load Balancer Listener resource.
----
-
-# aws\_alb\_listener
-
-Provides an Application Load Balancer Listener resource.
-
-## Example Usage
-
-```hcl
-# Create a new load balancer
-resource "aws_alb" "front_end" {
- # ...
-}
-
-resource "aws_alb_target_group" "front_end" {
- # ...
-}
-
-resource "aws_alb_listener" "front_end" {
- load_balancer_arn = "${aws_alb.front_end.arn}"
- port = "443"
- protocol = "HTTPS"
- ssl_policy = "ELBSecurityPolicy-2015-05"
- certificate_arn = "arn:aws:iam::187416307283:server-certificate/test_cert_rab3wuqwgja25ct3n4jdj2tzu4"
-
- default_action {
- target_group_arn = "${aws_alb_target_group.front_end.arn}"
- type = "forward"
- }
-}
-```
-
-## Argument Reference
-
-The following arguments are supported:
-
-* `load_balancer_arn` - (Required, Forces New Resource) The ARN of the load balancer.
-* `port` - (Required) The port on which the load balancer is listening.
-* `protocol` - (Optional) The protocol for connections from clients to the load balancer. Valid values are `HTTP` and `HTTPS`. Defaults to `HTTP`.
-* `ssl_policy` - (Optional) The name of the SSL Policy for the listener. Required if `protocol` is `HTTPS`.
-* `certificate_arn` - (Optional) The ARN of the SSL server certificate. Exactly one certificate is required if the protocol is HTTPS.
-* `default_action` - (Required) An Action block. Action blocks are documented below.
-
-Action Blocks (for `default_action`) support the following:
-
-* `target_group_arn` - (Required) The ARN of the Target Group to which to route traffic.
-* `type` - (Required) The type of routing action. The only valid value is `forward`.
-
-## Attributes Reference
-
-The following attributes are exported in addition to the arguments listed above:
-
-* `id` - The ARN of the listener (matches `arn`)
-* `arn` - The ARN of the listener (matches `id`)
-
-## Import
-
-Listeners can be imported using their ARN, e.g.
-
-```
-$ terraform import aws_alb_listener.front_end arn:aws:elasticloadbalancing:us-west-2:187416307283:listener/app/front-end-alb/8e4497da625e2d8a/9ab28ade35828f96
-```
diff --git a/website/source/docs/providers/aws/r/alb_listener_rule.html.markdown b/website/source/docs/providers/aws/r/alb_listener_rule.html.markdown
deleted file mode 100644
index 6bfe263c7..000000000
--- a/website/source/docs/providers/aws/r/alb_listener_rule.html.markdown
+++ /dev/null
@@ -1,89 +0,0 @@
----
-layout: "aws"
-page_title: "AWS: aws_alb_listener_rule"
-sidebar_current: "docs-aws-resource-alb-listener-rule"
-description: |-
- Provides an Application Load Balancer Listener Rule resource.
----
-
-# aws\_alb\_listener\_rule
-
-Provides an Application Load Balancer Listener Rule resource.
-
-## Example Usage
-
-```hcl
-# Create a new load balancer
-resource "aws_alb" "front_end" {
- # ...
-}
-
-resource "aws_alb_listener" "front_end" {
- # Other parameters
-}
-
-resource "aws_alb_listener_rule" "static" {
- listener_arn = "${aws_alb_listener.front_end.arn}"
- priority = 100
-
- action {
- type = "forward"
- target_group_arn = "${aws_alb_target_group.static.arn}"
- }
-
- condition {
- field = "path-pattern"
- values = ["/static/*"]
- }
-}
-
-resource "aws_alb_listener_rule" "host_based_routing" {
- listener_arn = "${aws_alb_listener.front_end.arn}"
- priority = 99
-
- action {
- type = "forward"
- target_group_arn = "${aws_alb_target_group.static.arn}"
- }
-
- condition {
- field = "host-header"
- values = ["my-service.*.terraform.io"]
- }
-}
-
-```
-
-## Argument Reference
-
-The following arguments are supported:
-
-* `listener_arn` - (Required, Forces New Resource) The ARN of the listener to which to attach the rule.
-* `priority` - (Required) The priority for the rule. A listener can't have multiple rules with the same priority.
-* `action` - (Required) An Action block. Action blocks are documented below.
-* `condition` - (Required) A Condition block. Condition blocks are documented below.
-
-Action Blocks (for `action`) support the following:
-
-* `target_group_arn` - (Required) The ARN of the Target Group to which to route traffic.
-* `type` - (Required) The type of routing action. The only valid value is `forward`.
-
-Condition Blocks (for `condition`) support the following:
-
-* `field` - (Required) The name of the field. Must be one of `path-pattern` for path based routing or `host-header` for host based routing.
-* `values` - (Required) The path patterns to match. A maximum of 1 can be defined.
-
-## Attributes Reference
-
-The following attributes are exported in addition to the arguments listed above:
-
-* `id` - The ARN of the rule (matches `arn`)
-* `arn` - The ARN of the rule (matches `id`)
-
-## Import
-
-Rules can be imported using their ARN, e.g.
-
-```
-$ terraform import aws_alb_listener_rule.front_end arn:aws:elasticloadbalancing:us-west-2:187416307283:listener-rule/app/test/8e4497da625e2d8a/9ab28ade35828f96/67b3d2d36dd7c26b
-```
diff --git a/website/source/docs/providers/aws/r/alb_target_group.html.markdown b/website/source/docs/providers/aws/r/alb_target_group.html.markdown
deleted file mode 100644
index c554a06d5..000000000
--- a/website/source/docs/providers/aws/r/alb_target_group.html.markdown
+++ /dev/null
@@ -1,75 +0,0 @@
----
-layout: "aws"
-page_title: "AWS: aws_alb_target_group"
-sidebar_current: "docs-aws-resource-alb-target-group"
-description: |-
- Provides a Target Group resource for use with Application Load
- Balancers.
----
-
-# aws\_alb\_target\_group
-
-Provides a Target Group resource for use with Application Load Balancer
-resources.
-
-## Example Usage
-
-```hcl
-resource "aws_alb_target_group" "test" {
- name = "tf-example-alb-tg"
- port = 80
- protocol = "HTTP"
- vpc_id = "${aws_vpc.main.id}"
-}
-
-resource "aws_vpc" "main" {
- cidr_block = "10.0.0.0/16"
-}
-```
-
-## Argument Reference
-
-The following arguments are supported:
-
-* `name` - (Optional, Forces new resource) The name of the target group. If omitted, Terraform will assign a random, unique name.
-* `name_prefix` - (Optional, Forces new resource) Creates a unique name beginning with the specified prefix. Conflicts with `name`.
-* `port` - (Required) The port on which targets receive traffic, unless overridden when registering a specific target.
-* `protocol` - (Required) The protocol to use for routing traffic to the targets.
-* `vpc_id` - (Required) The identifier of the VPC in which to create the target group.
-* `deregistration_delay` - (Optional) The amount time for Elastic Load Balancing to wait before changing the state of a deregistering target from draining to unused. The range is 0-3600 seconds. The default value is 300 seconds.
-* `stickiness` - (Optional) A Stickiness block. Stickiness blocks are documented below.
-* `health_check` - (Optional) A Health Check block. Health Check blocks are documented below.
-* `tags` - (Optional) A mapping of tags to assign to the resource.
-
-Stickiness Blocks (`stickiness`) support the following:
-
-* `type` - (Required) The type of sticky sessions. The only current possible value is `lb_cookie`.
-* `cookie_duration` - (Optional) The time period, in seconds, during which requests from a client should be routed to the same target. After this time period expires, the load balancer-generated cookie is considered stale. The range is 1 second to 1 week (604800 seconds). The default value is 1 day (86400 seconds).
-* `enabled` - (Optional) Boolean to enable / disable `stickiness`. Default is `true`
-
-Health Check Blocks (`health_check`) support the following:
-
-* `interval` - (Optional) The approximate amount of time, in seconds, between health checks of an individual target. Minimum value 5 seconds, Maximum value 300 seconds. Default 30 seconds.
-* `path` - (Optional) The destination for the health check request. Default `/`.
-* `port` - (Optional) The port to use to connect with the target. Valid values are either ports 1-65536, or `traffic-port`. Defaults to `traffic-port`.
-* `protocol` - (Optional) The protocol to use to connect with the target. Defaults to `HTTP`.
-* `timeout` - (Optional) The amount of time, in seconds, during which no response means a failed health check. Defaults to 5 seconds.
-* `healthy_threshold` - (Optional) The number of consecutive health checks successes required before considering an unhealthy target healthy. Defaults to 5.
-* `unhealthy_threshold` - (Optional) The number of consecutive health check failures required before considering the target unhealthy. Defaults to 2.
-* `matcher` (Optional) The HTTP codes to use when checking for a successful response from a target. Defaults to `200`. You can specify multiple values (for example, "200,202") or a range of values (for example, "200-299").
-
-## Attributes Reference
-
-The following attributes are exported in addition to the arguments listed above:
-
-* `id` - The ARN of the Target Group (matches `arn`)
-* `arn` - The ARN of the Target Group (matches `id`)
-* `arn_suffix` - The ARN suffix for use with CloudWatch Metrics.
-
-## Import
-
-Target Groups can be imported using their ARN, e.g.
-
-```
-$ terraform import aws_alb_target_group.app_front_end arn:aws:elasticloadbalancing:us-west-2:187416307283:targetgroup/app-front-end/20cfe21448b66314
-```
diff --git a/website/source/docs/providers/aws/r/alb_target_group_attachment.html.markdown b/website/source/docs/providers/aws/r/alb_target_group_attachment.html.markdown
deleted file mode 100644
index a0cac7241..000000000
--- a/website/source/docs/providers/aws/r/alb_target_group_attachment.html.markdown
+++ /dev/null
@@ -1,50 +0,0 @@
----
-layout: "aws"
-page_title: "AWS: aws_alb_target_group_attachment"
-sidebar_current: "docs-aws-resource-alb-target-group-attachment"
-description: |-
- Provides the ability to register instances and containers with an ALB
- target group
----
-
-# aws\_alb\_target\_group\_attachment
-
-Provides the ability to register instances and containers with an ALB
-target group
-
-## Example Usage
-
-```hcl
-resource "aws_alb_target_group_attachment" "test" {
- target_group_arn = "${aws_alb_target_group.test.arn}"
- target_id = "${aws_instance.test.id}"
- port = 80
-}
-
-resource "aws_alb_target_group" "test" {
- // Other arguments
-}
-
-resource "aws_instance" "test" {
- // Other arguments
-}
-```
-
-## Argument Reference
-
-The following arguments are supported:
-
-* `target_group_arn` - (Required) The ARN of the target group with which to register targets
-* `target_id` (Required) The ID of the target. This is the Instance ID for an instance, or the container ID for an ECS container.
-* `port` - (Optional) The port on which targets receive traffic.
-
-## Attributes Reference
-
-The following attributes are exported in addition to the arguments listed above:
-
-* `id` - A unique identifier for the attachment
-
-## Import
-
-Target Group Attachments cannot be imported.
-
diff --git a/website/source/docs/providers/aws/r/ami.html.markdown b/website/source/docs/providers/aws/r/ami.html.markdown
deleted file mode 100644
index 7c1a64298..000000000
--- a/website/source/docs/providers/aws/r/ami.html.markdown
+++ /dev/null
@@ -1,100 +0,0 @@
----
-layout: "aws"
-page_title: "AWS: aws_ami"
-sidebar_current: "docs-aws-resource-ami"
-description: |-
- Creates and manages a custom Amazon Machine Image (AMI).
----
-
-# aws\_ami
-
-The AMI resource allows the creation and management of a completely-custom
-*Amazon Machine Image* (AMI).
-
-If you just want to duplicate an existing AMI, possibly copying it to another
-region, it's better to use `aws_ami_copy` instead.
-
-If you just want to share an existing AMI with another AWS account,
-it's better to use `aws_ami_launch_permission` instead.
-
-## Example Usage
-
-```hcl
-# Create an AMI that will start a machine whose root device is backed by
-# an EBS volume populated from a snapshot. It is assumed that such a snapshot
-# already exists with the id "snap-xxxxxxxx".
-resource "aws_ami" "example" {
- name = "terraform-example"
- virtualization_type = "hvm"
- root_device_name = "/dev/xvda"
-
- ebs_block_device {
- device_name = "/dev/xvda"
- snapshot_id = "snap-xxxxxxxx"
- volume_size = 8
- }
-}
-```
-
-## Argument Reference
-
-The following arguments are supported:
-
-* `name` - (Required) A region-unique name for the AMI.
-* `description` - (Optional) A longer, human-readable description for the AMI.
-* `virtualization_type` - (Optional) Keyword to choose what virtualization mode created instances
- will use. Can be either "paravirtual" (the default) or "hvm". The choice of virtualization type
- changes the set of further arguments that are required, as described below.
-* `architecture` - (Optional) Machine architecture for created instances. Defaults to "x86_64".
-* `ebs_block_device` - (Optional) Nested block describing an EBS block device that should be
- attached to created instances. The structure of this block is described below.
-* `ephemeral_block_device` - (Optional) Nested block describing an ephemeral block device that
- should be attached to created instances. The structure of this block is described below.
-
-When `virtualization_type` is "paravirtual" the following additional arguments apply:
-
-* `image_location` - (Required) Path to an S3 object containing an image manifest, e.g. created
- by the `ec2-upload-bundle` command in the EC2 command line tools.
-* `kernel_id` - (Required) The id of the kernel image (AKI) that will be used as the paravirtual
- kernel in created instances.
-* `ramdisk_id` - (Optional) The id of an initrd image (ARI) that will be used when booting the
- created instances.
-
-When `virtualization_type` is "hvm" the following additional arguments apply:
-
-* `sriov_net_support` - (Optional) When set to "simple" (the default), enables enhanced networking
- for created instances. No other value is supported at this time.
-
-Nested `ebs_block_device` blocks have the following structure:
-
-* `device_name` - (Required) The path at which the device is exposed to created instances.
-* `delete_on_termination` - (Optional) Boolean controlling whether the EBS volumes created to
- support each created instance will be deleted once that instance is terminated.
-* `encrypted` - (Optional) Boolean controlling whether the created EBS volumes will be encrypted.
-* `iops` - (Required only when `volume_type` is "io1") Number of I/O operations per second the
- created volumes will support.
-* `snapshot_id` - (Optional) The id of an EBS snapshot that will be used to initialize the created
- EBS volumes. If set, the `volume_size` attribute must be at least as large as the referenced
- snapshot.
-* `volume_size` - (Required unless `snapshot_id` is set) The size of created volumes in GiB.
- If `snapshot_id` is set and `volume_size` is omitted then the volume will have the same size
- as the selected snapshot.
-* `volume_type` - (Optional) The type of EBS volume to create. Can be one of "standard" (the
- default), "io1" or "gp2".
-* `kms_key_id` - (Optional) The full ARN of the AWS Key Management Service (AWS KMS) CMK to use when encrypting the snapshots of
-an image during a copy operation. This parameter is only required if you want to use a non-default CMK;
-if this parameter is not specified, the default CMK for EBS is used
-
-~> **Note:** You can specify `encrypted` or `snapshot_id` but not both.
-
-Nested `ephemeral_block_device` blocks have the following structure:
-
-* `device_name` - (Required) The path at which the device is exposed to created instances.
-* `virtual_name` - (Required) A name for the ephemeral device, of the form "ephemeralN" where
- *N* is a volume number starting from zero.
-
-## Attributes Reference
-
-The following attributes are exported:
-
-* `id` - The ID of the created AMI.
diff --git a/website/source/docs/providers/aws/r/ami_copy.html.markdown b/website/source/docs/providers/aws/r/ami_copy.html.markdown
deleted file mode 100644
index 67db233f0..000000000
--- a/website/source/docs/providers/aws/r/ami_copy.html.markdown
+++ /dev/null
@@ -1,60 +0,0 @@
----
-layout: "aws"
-page_title: "AWS: aws_ami_copy"
-sidebar_current: "docs-aws-resource-ami-copy"
-description: |-
- Duplicates an existing Amazon Machine Image (AMI)
----
-
-# aws\_ami\_copy
-
-The "AMI copy" resource allows duplication of an Amazon Machine Image (AMI),
-including cross-region copies.
-
-If the source AMI has associated EBS snapshots, those will also be duplicated
-along with the AMI.
-
-This is useful for taking a single AMI provisioned in one region and making
-it available in another for a multi-region deployment.
-
-Copying an AMI can take several minutes. The creation of this resource will
-block until the new AMI is available for use on new instances.
-
-## Example Usage
-
-```hcl
-resource "aws_ami_copy" "example" {
- name = "terraform-example"
- description = "A copy of ami-xxxxxxxx"
- source_ami_id = "ami-xxxxxxxx"
- source_ami_region = "us-west-1"
-
- tags {
- Name = "HelloWorld"
- }
-}
-```
-
-## Argument Reference
-
-The following arguments are supported:
-
-* `name` - (Required) A region-unique name for the AMI.
-* `source_ami_id` - (Required) The id of the AMI to copy. This id must be valid in the region
- given by `source_ami_region`.
-* `source_ami_region` - (Required) The region from which the AMI will be copied. This may be the
- same as the AWS provider region in order to create a copy within the same region.
-* `encrypted` - (Optional) Specifies whether the destination snapshots of the copied image should be encrypted. Defaults to `false`
-* `kms_key_id` - (Optional) The full ARN of the KMS Key to use when encrypting the snapshots of an image during a copy operation. If not specified, then the default AWS KMS Key will be used
-
-This resource also exposes the full set of arguments from the [`aws_ami`](ami.html) resource.
-
-## Attributes Reference
-
-The following attributes are exported:
-
-* `id` - The ID of the created AMI.
-
-This resource also exports a full set of attributes corresponding to the arguments of the
-[`aws_ami`](ami.html) resource, allowing the properties of the created AMI to be used elsewhere in the
-configuration.
diff --git a/website/source/docs/providers/aws/r/ami_from_instance.html.markdown b/website/source/docs/providers/aws/r/ami_from_instance.html.markdown
deleted file mode 100644
index 806cc3b3e..000000000
--- a/website/source/docs/providers/aws/r/ami_from_instance.html.markdown
+++ /dev/null
@@ -1,57 +0,0 @@
----
-layout: "aws"
-page_title: "AWS: aws_ami_from_instance"
-sidebar_current: "docs-aws-resource-ami-from-instance"
-description: |-
- Creates an Amazon Machine Image (AMI) from an EBS-backed EC2 instance
----
-
-# aws\_ami\_from\_instance
-
-The "AMI from instance" resource allows the creation of an Amazon Machine
-Image (AMI) modelled after an existing EBS-backed EC2 instance.
-
-The created AMI will refer to implicitly-created snapshots of the instance's
-EBS volumes and mimick its assigned block device configuration at the time
-the resource is created.
-
-This resource is best applied to an instance that is stopped when this instance
-is created, so that the contents of the created image are predictable. When
-applied to an instance that is running, *the instance will be stopped before taking
-the snapshots and then started back up again*, resulting in a period of
-downtime.
-
-Note that the source instance is inspected only at the initial creation of this
-resource. Ongoing updates to the referenced instance will not be propagated into
-the generated AMI. Users may taint or otherwise recreate the resource in order
-to produce a fresh snapshot.
-
-## Example Usage
-
-```hcl
-resource "aws_ami_from_instance" "example" {
- name = "terraform-example"
- source_instance_id = "i-xxxxxxxx"
-}
-```
-
-## Argument Reference
-
-The following arguments are supported:
-
-* `name` - (Required) A region-unique name for the AMI.
-* `source_instance_id` - (Required) The id of the instance to use as the basis of the AMI.
-* `snapshot_without_reboot` - (Optional) Boolean that overrides the behavior of stopping
- the instance before snapshotting. This is risky since it may cause a snapshot of an
- inconsistent filesystem state, but can be used to avoid downtime if the user otherwise
- guarantees that no filesystem writes will be underway at the time of snapshot.
-
-## Attributes Reference
-
-The following attributes are exported:
-
-* `id` - The ID of the created AMI.
-
-This resource also exports a full set of attributes corresponding to the arguments of the
-`aws_ami` resource, allowing the properties of the created AMI to be used elsewhere in the
-configuration.
diff --git a/website/source/docs/providers/aws/r/ami_launch_permission.html.markdown b/website/source/docs/providers/aws/r/ami_launch_permission.html.markdown
deleted file mode 100644
index 838d4fb17..000000000
--- a/website/source/docs/providers/aws/r/ami_launch_permission.html.markdown
+++ /dev/null
@@ -1,33 +0,0 @@
----
-layout: "aws"
-page_title: "AWS: aws_ami_launch_permission"
-sidebar_current: "docs-aws-resource-ami-launch-permission"
-description: |-
- Adds launch permission to Amazon Machine Image (AMI).
----
-
-# aws\_ami\_launch\_permission
-
-Adds launch permission to Amazon Machine Image (AMI) from another AWS account.
-
-## Example Usage
-
-```hcl
-resource "aws_ami_launch_permission" "example" {
- image_id = "ami-12345678"
- account_id = "123456789012"
-}
-```
-
-## Argument Reference
-
-The following arguments are supported:
-
- * `image_id` - (required) A region-unique name for the AMI.
- * `account_id` - (required) An AWS Account ID to add launch permissions.
-
-## Attributes Reference
-
-The following attributes are exported:
-
- * `id` - A combination of "`image_id`-`account_id`".
diff --git a/website/source/docs/providers/aws/r/api_gateway_account.html.markdown b/website/source/docs/providers/aws/r/api_gateway_account.html.markdown
deleted file mode 100644
index 3a6bf993b..000000000
--- a/website/source/docs/providers/aws/r/api_gateway_account.html.markdown
+++ /dev/null
@@ -1,95 +0,0 @@
----
-layout: "aws"
-page_title: "AWS: aws_api_gateway_account"
-sidebar_current: "docs-aws-resource-api-gateway-account"
-description: |-
- Provides a settings of an API Gateway Account.
----
-
-# aws\_api\_gateway\_account
-
-Provides a settings of an API Gateway Account. Settings is applied region-wide per `provider` block.
-
--> **Note:** As there is no API method for deleting account settings or resetting it to defaults, destroying this resource will keep your account settings intact
-
-## Example Usage
-
-```hcl
-resource "aws_api_gateway_account" "demo" {
- cloudwatch_role_arn = "${aws_iam_role.cloudwatch.arn}"
-}
-
-resource "aws_iam_role" "cloudwatch" {
- name = "api_gateway_cloudwatch_global"
-
- assume_role_policy = < **Warning:** Since the API Gateway usage plans feature was launched on August 11, 2016, usage plans are now **required** to associate an API key with an API stage.
-
-## Example Usage
-
-```hcl
-resource "aws_api_gateway_rest_api" "MyDemoAPI" {
- name = "MyDemoAPI"
-}
-
-resource "aws_api_gateway_api_key" "MyDemoApiKey" {
- name = "demo"
-
- stage_key {
- rest_api_id = "${aws_api_gateway_rest_api.MyDemoAPI.id}"
- stage_name = "${aws_api_gateway_deployment.MyDemoDeployment.stage_name}"
- }
-}
-
-resource "aws_api_gateway_deployment" "MyDemoDeployment" {
- rest_api_id = "${aws_api_gateway_rest_api.MyDemoAPI.id}"
- stage_name = "test"
-}
-```
-
-## Argument Reference
-
-The following arguments are supported:
-
-* `name` - (Required) The name of the API key
-* `description` - (Optional) The API key description. Defaults to "Managed by Terraform".
-* `enabled` - (Optional) Specifies whether the API key can be used by callers. Defaults to `true`.
-* `value` - (Optional) The value of the API key. If not specified, it will be automatically generated by AWS on creation.
-* `stage_key` - (Optional) A list of stage keys associated with the API key - see below
-
-`stage_key` block supports the following:
-
-* `rest_api_id` - (Required) The ID of the associated REST API.
-* `stage_name` - (Required) The name of the API Gateway stage.
-
-## Attribute Reference
-
-The following attributes are exported:
-
-* `id` - The ID of the API key
-* `created_date` - The creation date of the API key
-* `last_updated_date` - The last update date of the API key
-* `value` - The value of the API key
-
-
-## Import
-
-API Gateway Keys can be imported using the `id`, e.g.
-
-```
-$ terraform import aws_api_gateway_api_key.my_demo_key 8bklk8bl1k3sB38D9B3l0enyWT8c09B30lkq0blk
-```
diff --git a/website/source/docs/providers/aws/r/api_gateway_authorizer.html.markdown b/website/source/docs/providers/aws/r/api_gateway_authorizer.html.markdown
deleted file mode 100644
index 366bb6202..000000000
--- a/website/source/docs/providers/aws/r/api_gateway_authorizer.html.markdown
+++ /dev/null
@@ -1,115 +0,0 @@
----
-layout: "aws"
-page_title: "AWS: aws_api_gateway_authorizer"
-sidebar_current: "docs-aws-resource-api-gateway-authorizer"
-description: |-
- Provides an API Gateway Authorizer.
----
-
-# aws\_api\_gateway\_authorizer
-
-Provides an API Gateway Authorizer.
-
-## Example Usage
-
-```hcl
-resource "aws_api_gateway_authorizer" "demo" {
- name = "demo"
- rest_api_id = "${aws_api_gateway_rest_api.demo.id}"
- authorizer_uri = "arn:aws:apigateway:region:lambda:path/2015-03-31/functions/${aws_lambda_function.authorizer.arn}/invocations"
- authorizer_credentials = "${aws_iam_role.invocation_role.arn}"
-}
-
-resource "aws_api_gateway_rest_api" "demo" {
- name = "auth-demo"
-}
-
-resource "aws_iam_role" "invocation_role" {
- name = "api_gateway_auth_invocation"
- path = "/"
-
- assume_role_policy = < **Note:** Depends on having `aws_api_gateway_integration` inside your rest api (which in turn depends on `aws_api_gateway_method`). To avoid race conditions
-you might need to add an explicit `depends_on = ["aws_api_gateway_integration.name"]`.
-
-## Example Usage
-
-```hcl
-resource "aws_api_gateway_rest_api" "MyDemoAPI" {
- name = "MyDemoAPI"
- description = "This is my API for demonstration purposes"
-}
-
-resource "aws_api_gateway_resource" "MyDemoResource" {
- rest_api_id = "${aws_api_gateway_rest_api.MyDemoAPI.id}"
- parent_id = "${aws_api_gateway_rest_api.MyDemoAPI.root_resource_id}"
- path_part = "test"
-}
-
-resource "aws_api_gateway_method" "MyDemoMethod" {
- rest_api_id = "${aws_api_gateway_rest_api.MyDemoAPI.id}"
- resource_id = "${aws_api_gateway_resource.MyDemoResource.id}"
- http_method = "GET"
- authorization = "NONE"
-}
-
-resource "aws_api_gateway_integration" "MyDemoIntegration" {
- rest_api_id = "${aws_api_gateway_rest_api.MyDemoAPI.id}"
- resource_id = "${aws_api_gateway_resource.MyDemoResource.id}"
- http_method = "${aws_api_gateway_method.MyDemoMethod.http_method}"
- type = "MOCK"
-}
-
-resource "aws_api_gateway_deployment" "MyDemoDeployment" {
- depends_on = ["aws_api_gateway_method.MyDemoMethod"]
-
- rest_api_id = "${aws_api_gateway_rest_api.MyDemoAPI.id}"
- stage_name = "test"
-
- variables = {
- "answer" = "42"
- }
-}
-```
-
-## Argument Reference
-
-The following arguments are supported:
-
-* `rest_api_id` - (Required) The ID of the associated REST API
-* `stage_name` - (Required) The name of the stage
-* `description` - (Optional) The description of the deployment
-* `stage_description` - (Optional) The description of the stage
-* `variables` - (Optional) A map that defines variables for the stage
-
-## Attribute Reference
-
-The following attributes are exported:
-
-* `id` - The ID of the deployment
-* `invoke_url` - The URL to invoke the API pointing to the stage,
- e.g. `https://z4675bid1j.execute-api.eu-west-2.amazonaws.com/prod`
-* `execution_arn` - The execution ARN to be used in [`lambda_permission`](/docs/providers/aws/r/lambda_permission.html)'s `source_arn`
- when allowing API Gateway to invoke a Lambda function,
- e.g. `arn:aws:execute-api:eu-west-2:123456789012:z4675bid1j/prod`
-* `created_date` - The creation date of the deployment
diff --git a/website/source/docs/providers/aws/r/api_gateway_domain_name.html.markdown b/website/source/docs/providers/aws/r/api_gateway_domain_name.html.markdown
deleted file mode 100644
index 10da7b09a..000000000
--- a/website/source/docs/providers/aws/r/api_gateway_domain_name.html.markdown
+++ /dev/null
@@ -1,81 +0,0 @@
----
-layout: "aws"
-page_title: "AWS: aws_api_gateway_domain_name"
-sidebar_current: "docs-aws-resource-api-gateway-domain-name"
-description: |-
- Registers a custom domain name for use with AWS API Gateway.
----
-
-# aws\_api\_gateway\_domain\_name
-
-Registers a custom domain name for use with AWS API Gateway.
-
-This resource just establishes ownership of and the TLS settings for
-a particular domain name. An API can be attached to a particular path
-under the registered domain name using
-[the `aws_api_gateway_base_path_mapping` resource](api_gateway_base_path_mapping.html).
-
-Internally API Gateway creates a CloudFront distribution to
-route requests on the given hostname. In addition to this resource
-it's necessary to create a DNS record corresponding to the
-given domain name which is an alias (either Route53 alias or
-traditional CNAME) to the Cloudfront domain name exported in the
-`cloudfront_domain_name` attribute.
-
-~> **Note:** All arguments including the private key will be stored in the raw state as plain-text.
-[Read more about sensitive data in state](/docs/state/sensitive-data.html).
-
-## Example Usage
-
-```hcl
-resource "aws_api_gateway_domain_name" "example" {
- domain_name = "api.example.com"
-
- certificate_name = "example-api"
- certificate_body = "${file("${path.module}/example.com/example.crt")}"
- certificate_chain = "${file("${path.module}/example.com/ca.crt")}"
- certificate_private_key = "${file("${path.module}/example.com/example.key")}"
-}
-
-# Example DNS record using Route53.
-# Route53 is not specifically required; any DNS host can be used.
-resource "aws_route53_record" "example" {
- zone_id = "${aws_route53_zone.example.id}" # See aws_route53_zone for how to create this
-
- name = "${aws_api_gateway_domain_name.example.domain_name}"
- type = "A"
-
- alias {
- name = "${aws_api_gateway_domain_name.example.cloudfront_domain_name}"
- zone_id = "${aws_api_gateway_domain_name.example.cloudfront_zone_id}"
- evaluate_target_health = true
- }
-}
-```
-
-## Argument Reference
-
-The following arguments are supported:
-
-* `domain_name` - (Required) The fully-qualified domain name to register
-* `certificate_name` - (Optional) The unique name to use when registering this
- cert as an IAM server certificate. Conflicts with `certificate_arn`.
-* `certificate_body` - (Optional) The certificate issued for the domain name
- being registered, in PEM format. Conflicts with `certificate_arn`.
-* `certificate_chain` - (Optional) The certificate for the CA that issued the
- certificate, along with any intermediate CA certificates required to
- create an unbroken chain to a certificate trusted by the intended API clients. Conflicts with `certificate_arn`.
-* `certificate_private_key` - (Optional) The private key associated with the
- domain certificate given in `certificate_body`. Conflicts with `certificate_arn`.
-* `certificate_arn` - (Optional) The ARN for an AWS-managed certificate. Conflicts with `certificate_name`, `certificate_body`, `certificate_chain` and `certificate_private_key`.
-
-## Attributes Reference
-
-The following attributes are exported:
-
-* `id` - The internal id assigned to this domain name by API Gateway.
-* `certificate_upload_date` - The upload date associated with the domain certificate.
-* `cloudfront_domain_name` - The hostname created by Cloudfront to represent
- the distribution that implements this domain name mapping.
-* `cloudfront_zone_id` - For convenience, the hosted zone id (`Z2FDTNDATAQYW2`)
- that can be used to create a Route53 alias record for the distribution.
diff --git a/website/source/docs/providers/aws/r/api_gateway_integration.html.markdown b/website/source/docs/providers/aws/r/api_gateway_integration.html.markdown
deleted file mode 100644
index 836c55f46..000000000
--- a/website/source/docs/providers/aws/r/api_gateway_integration.html.markdown
+++ /dev/null
@@ -1,148 +0,0 @@
----
-layout: "aws"
-page_title: "AWS: aws_api_gateway_integration"
-sidebar_current: "docs-aws-resource-api-gateway-integration"
-description: |-
- Provides an HTTP Method Integration for an API Gateway Integration.
----
-
-# aws\_api\_gateway\_integration
-
-Provides an HTTP Method Integration for an API Gateway Integration.
-
-## Example Usage
-
-```hcl
-resource "aws_api_gateway_rest_api" "MyDemoAPI" {
- name = "MyDemoAPI"
- description = "This is my API for demonstration purposes"
-}
-
-resource "aws_api_gateway_resource" "MyDemoResource" {
- rest_api_id = "${aws_api_gateway_rest_api.MyDemoAPI.id}"
- parent_id = "${aws_api_gateway_rest_api.MyDemoAPI.root_resource_id}"
- path_part = "mydemoresource"
-}
-
-resource "aws_api_gateway_method" "MyDemoMethod" {
- rest_api_id = "${aws_api_gateway_rest_api.MyDemoAPI.id}"
- resource_id = "${aws_api_gateway_resource.MyDemoResource.id}"
- http_method = "GET"
- authorization = "NONE"
-}
-
-resource "aws_api_gateway_integration" "MyDemoIntegration" {
- rest_api_id = "${aws_api_gateway_rest_api.MyDemoAPI.id}"
- resource_id = "${aws_api_gateway_resource.MyDemoResource.id}"
- http_method = "${aws_api_gateway_method.MyDemoMethod.http_method}"
- type = "MOCK"
-
- request_parameters = {
- "integration.request.header.X-Authorization" = "'static'"
- }
-
- # Transforms the incoming XML request to JSON
- request_templates {
- "application/xml" = < **Note:** Depends on having `aws_api_gateway_integration` inside your rest api. To ensure this
-you might need to add an explicit `depends_on` for clean runs.
-
-## Example Usage
-
-```hcl
-resource "aws_api_gateway_rest_api" "MyDemoAPI" {
- name = "MyDemoAPI"
- description = "This is my API for demonstration purposes"
-}
-
-resource "aws_api_gateway_resource" "MyDemoResource" {
- rest_api_id = "${aws_api_gateway_rest_api.MyDemoAPI.id}"
- parent_id = "${aws_api_gateway_rest_api.MyDemoAPI.root_resource_id}"
- path_part = "mydemoresource"
-}
-
-resource "aws_api_gateway_method" "MyDemoMethod" {
- rest_api_id = "${aws_api_gateway_rest_api.MyDemoAPI.id}"
- resource_id = "${aws_api_gateway_resource.MyDemoResource.id}"
- http_method = "GET"
- authorization = "NONE"
-}
-
-resource "aws_api_gateway_integration" "MyDemoIntegration" {
- rest_api_id = "${aws_api_gateway_rest_api.MyDemoAPI.id}"
- resource_id = "${aws_api_gateway_resource.MyDemoResource.id}"
- http_method = "${aws_api_gateway_method.MyDemoMethod.http_method}"
- type = "MOCK"
-}
-
-resource "aws_api_gateway_method_response" "200" {
- rest_api_id = "${aws_api_gateway_rest_api.MyDemoAPI.id}"
- resource_id = "${aws_api_gateway_resource.MyDemoResource.id}"
- http_method = "${aws_api_gateway_method.MyDemoMethod.http_method}"
- status_code = "200"
-}
-
-resource "aws_api_gateway_integration_response" "MyDemoIntegrationResponse" {
- rest_api_id = "${aws_api_gateway_rest_api.MyDemoAPI.id}"
- resource_id = "${aws_api_gateway_resource.MyDemoResource.id}"
- http_method = "${aws_api_gateway_method.MyDemoMethod.http_method}"
- status_code = "${aws_api_gateway_method_response.200.status_code}"
-
- # Transforms the backend JSON response to XML
- response_templates {
- "application/xml" = <
-
- $inputRoot.body
-
-EOF
- }
-}
-```
-
-## Argument Reference
-
-The following arguments are supported:
-
-* `rest_api_id` - (Required) The ID of the associated REST API
-* `resource_id` - (Required) The API resource ID
-* `http_method` - (Required) The HTTP method (`GET`, `POST`, `PUT`, `DELETE`, `HEAD`, `OPTION`, `ANY`)
-* `status_code` - (Required) The HTTP status code
-* `selection_pattern` - (Optional) Specifies the regular expression pattern used to choose
- an integration response based on the response from the backend.
- If the backend is an `AWS` Lambda function, the AWS Lambda function error header is matched.
- For all other `HTTP` and `AWS` backends, the HTTP status code is matched.
-* `response_templates` - (Optional) A map specifying the templates used to transform the integration response body
-* `response_parameters` - (Optional) A map of response parameters that can be read from the backend response.
- For example: `response_parameters = { "method.response.header.X-Some-Header" = "integration.response.header.X-Some-Other-Header" }`,
-* `response_parameters_in_json` - **Deprecated**, use `response_parameters` instead.
-* `content_handling` - (Optional) Specifies how to handle request payload content type conversions. Supported values are `CONVERT_TO_BINARY` and `CONVERT_TO_TEXT`. If this property is not defined, the response payload will be passed through from the integration response to the method response without modification.
diff --git a/website/source/docs/providers/aws/r/api_gateway_method.html.markdown b/website/source/docs/providers/aws/r/api_gateway_method.html.markdown
deleted file mode 100644
index fd87fb850..000000000
--- a/website/source/docs/providers/aws/r/api_gateway_method.html.markdown
+++ /dev/null
@@ -1,51 +0,0 @@
----
-layout: "aws"
-page_title: "AWS: aws_api_gateway_method"
-sidebar_current: "docs-aws-resource-api-gateway-method"
-description: |-
- Provides a HTTP Method for an API Gateway Resource.
----
-
-# aws\_api\_gateway\_method
-
-Provides a HTTP Method for an API Gateway Resource.
-
-## Example Usage
-
-```hcl
-resource "aws_api_gateway_rest_api" "MyDemoAPI" {
- name = "MyDemoAPI"
- description = "This is my API for demonstration purposes"
-}
-
-resource "aws_api_gateway_resource" "MyDemoResource" {
- rest_api_id = "${aws_api_gateway_rest_api.MyDemoAPI.id}"
- parent_id = "${aws_api_gateway_rest_api.MyDemoAPI.root_resource_id}"
- path_part = "mydemoresource"
-}
-
-resource "aws_api_gateway_method" "MyDemoMethod" {
- rest_api_id = "${aws_api_gateway_rest_api.MyDemoAPI.id}"
- resource_id = "${aws_api_gateway_resource.MyDemoResource.id}"
- http_method = "GET"
- authorization = "NONE"
-}
-```
-
-## Argument Reference
-
-The following arguments are supported:
-
-* `rest_api_id` - (Required) The ID of the associated REST API
-* `resource_id` - (Required) The API resource ID
-* `http_method` - (Required) The HTTP Method (`GET`, `POST`, `PUT`, `DELETE`, `HEAD`, `OPTION`, `ANY`)
-* `authorization` - (Required) The type of authorization used for the method (`NONE`, `CUSTOM`, `AWS_IAM`)
-* `authorizer_id` - (Optional) The authorizer id to be used when the authorization is `CUSTOM`
-* `api_key_required` - (Optional) Specify if the method requires an API key
-* `request_models` - (Optional) A map of the API models used for the request's content type
- where key is the content type (e.g. `application/json`)
- and value is either `Error`, `Empty` (built-in models) or `aws_api_gateway_model`'s `name`.
-* `request_parameters` - (Optional) A map of request query string parameters and headers that should be passed to the integration.
- For example: `request_parameters = { "method.request.header.X-Some-Header" = true }`
- would define that the header `X-Some-Header` must be provided on the request.
-* `request_parameters_in_json` - **Deprecated**, use `request_parameters` instead.
diff --git a/website/source/docs/providers/aws/r/api_gateway_method_response.html.markdown b/website/source/docs/providers/aws/r/api_gateway_method_response.html.markdown
deleted file mode 100644
index 7e6041ab8..000000000
--- a/website/source/docs/providers/aws/r/api_gateway_method_response.html.markdown
+++ /dev/null
@@ -1,61 +0,0 @@
----
-layout: "aws"
-page_title: "AWS: aws_api_gateway_method_response"
-sidebar_current: "docs-aws-resource-api-gateway-method-response"
-description: |-
- Provides an HTTP Method Response for an API Gateway Resource.
----
-
-# aws\_api\_gateway\_method\_response
-
-Provides an HTTP Method Response for an API Gateway Resource.
-
-## Example Usage
-
-```hcl
-resource "aws_api_gateway_rest_api" "MyDemoAPI" {
- name = "MyDemoAPI"
- description = "This is my API for demonstration purposes"
-}
-
-resource "aws_api_gateway_resource" "MyDemoResource" {
- rest_api_id = "${aws_api_gateway_rest_api.MyDemoAPI.id}"
- parent_id = "${aws_api_gateway_rest_api.MyDemoAPI.root_resource_id}"
- path_part = "mydemoresource"
-}
-
-resource "aws_api_gateway_method" "MyDemoMethod" {
- rest_api_id = "${aws_api_gateway_rest_api.MyDemoAPI.id}"
- resource_id = "${aws_api_gateway_resource.MyDemoResource.id}"
- http_method = "GET"
- authorization = "NONE"
-}
-
-resource "aws_api_gateway_integration" "MyDemoIntegration" {
- rest_api_id = "${aws_api_gateway_rest_api.MyDemoAPI.id}"
- resource_id = "${aws_api_gateway_resource.MyDemoResource.id}"
- http_method = "${aws_api_gateway_method.MyDemoMethod.http_method}"
- type = "MOCK"
-}
-
-resource "aws_api_gateway_method_response" "200" {
- rest_api_id = "${aws_api_gateway_rest_api.MyDemoAPI.id}"
- resource_id = "${aws_api_gateway_resource.MyDemoResource.id}"
- http_method = "${aws_api_gateway_method.MyDemoMethod.http_method}"
- status_code = "200"
-}
-```
-
-## Argument Reference
-
-The following arguments are supported:
-
-* `rest_api_id` - (Required) The ID of the associated REST API
-* `resource_id` - (Required) The API resource ID
-* `http_method` - (Required) The HTTP Method (`GET`, `POST`, `PUT`, `DELETE`, `HEAD`, `OPTION`, `ANY`)
-* `status_code` - (Required) The HTTP status code
-* `response_models` - (Optional) A map of the API models used for the response's content type
-* `response_parameters` - (Optional) A map of response parameters that can be sent to the caller.
- For example: `response_parameters = { "method.response.header.X-Some-Header" = true }`
- would define that the header `X-Some-Header` can be provided on the response.
-* `response_parameters_in_json` - **Deprecated**, use `response_parameters` instead.
diff --git a/website/source/docs/providers/aws/r/api_gateway_method_settings.html.markdown b/website/source/docs/providers/aws/r/api_gateway_method_settings.html.markdown
deleted file mode 100644
index 852860616..000000000
--- a/website/source/docs/providers/aws/r/api_gateway_method_settings.html.markdown
+++ /dev/null
@@ -1,93 +0,0 @@
----
-layout: "aws"
-page_title: "AWS: aws_api_gateway_method_settings"
-sidebar_current: "docs-aws-resource-api-gateway-method-settings"
-description: |-
- Provides an API Gateway Method Settings, e.g. logging or monitoring.
----
-
-# aws\_api\_gateway\_method\_settings
-
-Provides an API Gateway Method Settings, e.g. logging or monitoring.
-
-## Example Usage
-
-```hcl
-resource "aws_api_gateway_method_settings" "s" {
- rest_api_id = "${aws_api_gateway_rest_api.test.id}"
- stage_name = "${aws_api_gateway_stage.test.stage_name}"
- method_path = "${aws_api_gateway_resource.test.path_part}/${aws_api_gateway_method.test.http_method}"
-
- settings {
- metrics_enabled = true
- logging_level = "INFO"
- }
-}
-
-resource "aws_api_gateway_rest_api" "test" {
- name = "MyDemoAPI"
- description = "This is my API for demonstration purposes"
-}
-
-resource "aws_api_gateway_deployment" "test" {
- depends_on = ["aws_api_gateway_integration.test"]
- rest_api_id = "${aws_api_gateway_rest_api.test.id}"
- stage_name = "dev"
-}
-
-resource "aws_api_gateway_stage" "test" {
- stage_name = "prod"
- rest_api_id = "${aws_api_gateway_rest_api.test.id}"
- deployment_id = "${aws_api_gateway_deployment.test.id}"
-}
-
-resource "aws_api_gateway_resource" "test" {
- rest_api_id = "${aws_api_gateway_rest_api.test.id}"
- parent_id = "${aws_api_gateway_rest_api.test.root_resource_id}"
- path_part = "mytestresource"
-}
-
-resource "aws_api_gateway_method" "test" {
- rest_api_id = "${aws_api_gateway_rest_api.test.id}"
- resource_id = "${aws_api_gateway_resource.test.id}"
- http_method = "GET"
- authorization = "NONE"
-}
-
-resource "aws_api_gateway_integration" "test" {
- rest_api_id = "${aws_api_gateway_rest_api.test.id}"
- resource_id = "${aws_api_gateway_resource.test.id}"
- http_method = "${aws_api_gateway_method.test.http_method}"
- type = "MOCK"
-
- request_templates {
- "application/xml" = <
-```
diff --git a/website/source/docs/providers/aws/r/api_gateway_usage_plan_key.html.markdown b/website/source/docs/providers/aws/r/api_gateway_usage_plan_key.html.markdown
deleted file mode 100644
index 7b1fdf767..000000000
--- a/website/source/docs/providers/aws/r/api_gateway_usage_plan_key.html.markdown
+++ /dev/null
@@ -1,59 +0,0 @@
----
-layout: "aws"
-page_title: "AWS: aws_api_gateway_usage_plan_key"
-sidebar_current: "docs-aws-resource-api-gateway-usage-plan-key"
-description: |-
- Provides an API Gateway Usage Plan Key.
----
-
-# aws_api_gateway_usage_plan_key
-
-Provides an API Gateway Usage Plan Key.
-
-## Example Usage
-
-```hcl
-resource "aws_api_gateway_rest_api" "test" {
- name = "MyDemoAPI"
-}
-
-...
-
-resource "aws_api_gateway_usage_plan" "myusageplan" {
- name = "my_usage_plan"
-}
-
-resource "aws_api_gateway_api_key" "mykey" {
- name = "my_key"
-
- stage_key {
- rest_api_id = "${aws_api_gateway_rest_api.test.id}"
- stage_name = "${aws_api_gateway_deployment.foo.stage_name}"
- }
-}
-
-resource "aws_api_gateway_usage_plan_key" "main" {
- key_id = "${aws_api_gateway_api_key.mykey.id}"
- key_type = "API_KEY"
- usage_plan_id = "${aws_api_gateway_usage_plan.myusageplan.id}"
-}
-```
-
-## Argument Reference
-
-The following arguments are supported:
-
-* `key_id` - (Required) The identifier of the API key resource.
-* `key_type` - (Required) The type of the API key resource. Currently, the valid key type is API_KEY.
-* `usage_plan_id` - (Required) The Id of the usage plan resource representing to associate the key to.
-
-## Attributes Reference
-
-The following attributes are exported:
-
-* `id` - The Id of a usage plan key.
-* `key_id` - The type of a usage plan key. Currently, the valid key type is API_KEY.
-* `key_type` - The ID of the API resource
-* `usage_plan_id` - The ID of the API resource
-* `name` - The name of a usage plan key.
-* `value` - The value of a usage plan key.
diff --git a/website/source/docs/providers/aws/r/app_cookie_stickiness_policy.html.markdown b/website/source/docs/providers/aws/r/app_cookie_stickiness_policy.html.markdown
deleted file mode 100644
index 8184db371..000000000
--- a/website/source/docs/providers/aws/r/app_cookie_stickiness_policy.html.markdown
+++ /dev/null
@@ -1,56 +0,0 @@
----
-layout: "aws"
-page_title: "AWS: aws_app_cookie_stickiness_policy"
-sidebar_current: "docs-aws-resource-app-cookie-stickiness-policy"
-description: |-
- Provides an application cookie stickiness policy, which allows an ELB to wed its stickiness cookie to a cookie generated by your application.
----
-
-# aws\_app\_cookie\_stickiness\_policy
-
-Provides an application cookie stickiness policy, which allows an ELB to wed its sticky cookie's expiration to a cookie generated by your application.
-
-## Example Usage
-
-```hcl
-resource "aws_elb" "lb" {
- name = "test-lb"
- availability_zones = ["us-east-1a"]
-
- listener {
- instance_port = 8000
- instance_protocol = "http"
- lb_port = 80
- lb_protocol = "http"
- }
-}
-
-resource "aws_app_cookie_stickiness_policy" "foo" {
- name = "foo_policy"
- load_balancer = "${aws_elb.lb.name}"
- lb_port = 80
- cookie_name = "MyAppCookie"
-}
-```
-
-## Argument Reference
-
-The following arguments are supported:
-
-* `name` - (Required) The name of the stickiness policy.
-* `load_balancer` - (Required) The name of load balancer to which the policy
- should be attached.
-* `lb_port` - (Required) The load balancer port to which the policy
- should be applied. This must be an active listener on the load
-balancer.
-* `cookie_name` - (Required) The application cookie whose lifetime the ELB's cookie should follow.
-
-## Attributes Reference
-
-The following attributes are exported:
-
-* `id` - The ID of the policy.
-* `name` - The name of the stickiness policy.
-* `load_balancer` - The name of load balancer to which the policy is attached.
-* `lb_port` - The load balancer port to which the policy is applied.
-* `cookie_name` - The application cookie whose lifetime the ELB's cookie should follow.
diff --git a/website/source/docs/providers/aws/r/appautoscaling_policy.html.markdown b/website/source/docs/providers/aws/r/appautoscaling_policy.html.markdown
deleted file mode 100644
index ff1465ed4..000000000
--- a/website/source/docs/providers/aws/r/appautoscaling_policy.html.markdown
+++ /dev/null
@@ -1,78 +0,0 @@
----
-layout: "aws"
-page_title: "AWS: aws_appautoscaling_policy"
-sidebar_current: "docs-aws-resource-appautoscaling-policy"
-description: |-
- Provides an Application AutoScaling Policy resource.
----
-
-# aws\_appautoscaling\_policy
-
-Provides an Application AutoScaling Policy resource.
-
-## Example Usage
-
-```hcl
-resource "aws_appautoscaling_target" "ecs_target" {
- max_capacity = 4
- min_capacity = 1
- resource_id = "service/clusterName/serviceName"
- role_arn = "${var.ecs_iam_role}"
- scalable_dimension = "ecs:service:DesiredCount"
- service_namespace = "ecs"
-}
-
-resource "aws_appautoscaling_policy" "ecs_policy" {
- adjustment_type = "ChangeInCapacity"
- cooldown = 60
- metric_aggregation_type = "Maximum"
- name = "scale-down"
- resource_id = "service/clusterName/serviceName"
- scalable_dimension = "ecs:service:DesiredCount"
- service_namespace = "ecs"
-
- step_adjustment {
- metric_interval_upper_bound = 0
- scaling_adjustment = -1
- }
-
- depends_on = ["aws_appautoscaling_target.ecs_target"]
-}
-```
-
-## Argument Reference
-
-The following arguments are supported:
-
-* `adjustment_type` - (Required) Specifies whether the adjustment is an absolute number or a percentage of the current capacity. Valid values are `ChangeInCapacity`, `ExactCapacity`, and `PercentChangeInCapacity`.
-* `cooldown` - (Required) The amount of time, in seconds, after a scaling activity completes and before the next scaling activity can start.
-* `metric_aggregation_type` - (Required) The aggregation type for the policy's metrics. Valid values are "Minimum", "Maximum", and "Average". Without a value, AWS will treat the aggregation type as "Average".
-* `name` - (Required) The name of the policy.
-* `policy_type` - (Optional) Defaults to "StepScaling" because it is the only option available.
-* `resource_id` - (Required) The resource type and unique identifier string for the resource associated with the scaling policy. For Amazon ECS services, this value is the resource type, followed by the cluster name and service name, such as `service/default/sample-webapp`. For Amazon EC2 Spot fleet requests, the resource type is `spot-fleet-request`, and the identifier is the Spot fleet request ID; for example, `spot-fleet-request/sfr-73fbd2ce-aa30-494c-8788-1cee4EXAMPLE`.
-* `scalable_dimension` - (Required) The scalable dimension of the scalable target. The scalable dimension contains the service namespace, resource type, and scaling property, such as `ecs:service:DesiredCount` for the desired task count of an Amazon ECS service, or `ec2:spot-fleet-request:TargetCapacity` for the target capacity of an Amazon EC2 Spot fleet request.
-* `service_namespace` - (Required) The AWS service namespace of the scalable target. Valid values are `ecs` for Amazon ECS services and `ec2` Amazon EC2 Spot fleet requests.
-* `step_adjustment` - (Optional) A set of adjustments that manage scaling. These have the following structure:
-
- ```hcl
- step_adjustment {
- metric_interval_lower_bound = 1.0
- metric_interval_upper_bound = 2.0
- scaling_adjustment = -1
- }
- step_adjustment {
- metric_interval_lower_bound = 2.0
- metric_interval_upper_bound = 3.0
- scaling_adjustment = 1
- }
- ```
-
- * `metric_interval_lower_bound` - (Optional) The lower bound for the difference between the alarm threshold and the CloudWatch metric. Without a value, AWS will treat this bound as infinity.
- * `metric_interval_upper_bound` - (Optional) The upper bound for the difference between the alarm threshold and the CloudWatch metric. Without a value, AWS will treat this bound as infinity. The upper bound must be greater than the lower bound.
- * `scaling_adjustment` - (Required) The number of members by which to scale, when the adjustment bounds are breached. A positive value scales up. A negative value scales down.
-
-## Attribute Reference
-* `adjustment_type` - The scaling policy's adjustment type.
-* `arn` - The ARN assigned by AWS to the scaling policy.
-* `name` - The scaling policy's name.
-* `policy_type` - The scaling policy's type.
diff --git a/website/source/docs/providers/aws/r/appautoscaling_target.html.markdown b/website/source/docs/providers/aws/r/appautoscaling_target.html.markdown
deleted file mode 100644
index c52834977..000000000
--- a/website/source/docs/providers/aws/r/appautoscaling_target.html.markdown
+++ /dev/null
@@ -1,43 +0,0 @@
----
-layout: "aws"
-page_title: "AWS: aws_appautoscaling_target"
-sidebar_current: "docs-aws-resource-appautoscaling-target"
-description: |-
- Provides an Application AutoScaling ScalableTarget resource.
----
-
-# aws\_appautoscaling\_target
-
-Provides an Application AutoScaling ScalableTarget resource.
-
-## Example Usage
-
-```hcl
-resource "aws_appautoscaling_target" "ecs_target" {
- max_capacity = 4
- min_capacity = 1
- resource_id = "service/clusterName/serviceName"
- role_arn = "${var.ecs_iam_role}"
- scalable_dimension = "ecs:service:DesiredCount"
- service_namespace = "ecs"
-}
-```
-
-## Argument Reference
-
-The following arguments are supported:
-
-* `max_capacity` - (Required) The max capacity of the scalable target.
-* `min_capacity` - (Required) The min capacity of the scalable target.
-* `resource_id` - (Required) The resource type and unique identifier string for the resource associated with the scalable target.
-For Amazon ECS services, this value is the resource type, followed by the cluster name and service name, for example, `service/default/sample-webapp`.
-For Amazon EC2 Spot fleet requests, the resource type is `spot-fleet-request`, and the identifier is the Spot fleet request ID; for example, `spot-fleet-request/sfr-73fbd2ce-aa30-494c-8788-1cee4EXAMPLE`.
-For Amazon EMR Cluster Instance Groups, the resource type is `instancegroup`, the identifier is the Cluster Id and the Instance Group Id; for example, `instancegroup/j-2EEZNYKUA1NTV/ig-1791Y4E1L8YI0`.
-* `role_arn` - (Required) The ARN of the IAM role that allows Application
-AutoScaling to modify your scalable target on your behalf.
-* `scalable_dimension` - (Required) The scalable dimension of the scalable target. The scalable dimension contains the service namespace, resource
-type, and scaling property, such as `ecs:service:DesiredCount` for the desired task count of an Amazon ECS service,
-`ec2:spot-fleet-request:TargetCapacity` for the target capacity of an Amazon EC2 Spot fleet request or
-`elasticmapreduce:instancegroup:InstanceCount` for the Instance count of an EMR Cluster Instance Group.
-* `service_namespace` - (Required) The AWS service namespace of the scalable target.
-Valid values are `ecs` for Amazon ECS services, `ec2` Amazon EC2 Spot fleet requests and `elasticmapreduce` for Amazon EMR Clusters.
diff --git a/website/source/docs/providers/aws/r/autoscaling_attachment.html.markdown b/website/source/docs/providers/aws/r/autoscaling_attachment.html.markdown
deleted file mode 100644
index faf877815..000000000
--- a/website/source/docs/providers/aws/r/autoscaling_attachment.html.markdown
+++ /dev/null
@@ -1,45 +0,0 @@
----
-layout: "aws"
-page_title: "AWS: aws_autoscaling_attachment"
-sidebar_current: "docs-aws-resource-autoscaling-attachment"
-description: |-
- Provides an AutoScaling Group Attachment resource.
----
-
-# aws\_autoscaling\_attachment
-
-Provides an AutoScaling Attachment resource.
-
-~> **NOTE on AutoScaling Groups and ASG Attachments:** Terraform currently provides
-both a standalone ASG Attachment resource (describing an ASG attached to
-an ELB), and an [AutoScaling Group resource](autoscaling_group.html) with
-`load_balancers` defined in-line. At this time you cannot use an ASG with in-line
-load balancers in conjunction with an ASG Attachment resource. Doing so will cause a
-conflict and will overwrite attachments.
-
-## Example Usage
-
-```hcl
-# Create a new load balancer attachment
-resource "aws_autoscaling_attachment" "asg_attachment_bar" {
- autoscaling_group_name = "${aws_autoscaling_group.asg.id}"
- elb = "${aws_elb.bar.id}"
-}
-```
-
-```hcl
-# Create a new ALB Target Group attachment
-resource "aws_autoscaling_attachment" "asg_attachment_bar" {
- autoscaling_group_name = "${aws_autoscaling_group.asg.id}"
- alb_target_group_arn = "${aws_alb_target_group.test.arn}"
-}
-```
-
-## Argument Reference
-
-The following arguments are supported:
-
-* `autoscaling_group_name` - (Required) Name of ASG to associate with the ELB.
-* `elb` - (Optional) The name of the ELB.
-* `alb_target_group_arn` - (Optional) The ARN of an ALB Target Group.
-
diff --git a/website/source/docs/providers/aws/r/autoscaling_group.html.markdown b/website/source/docs/providers/aws/r/autoscaling_group.html.markdown
deleted file mode 100644
index 21bafe716..000000000
--- a/website/source/docs/providers/aws/r/autoscaling_group.html.markdown
+++ /dev/null
@@ -1,290 +0,0 @@
----
-layout: "aws"
-page_title: "AWS: aws_autoscaling_group"
-sidebar_current: "docs-aws-resource-autoscaling-group"
-description: |-
- Provides an AutoScaling Group resource.
----
-
-# aws\_autoscaling\_group
-
-Provides an AutoScaling Group resource.
-
-## Example Usage
-
-```hcl
-resource "aws_placement_group" "test" {
- name = "test"
- strategy = "cluster"
-}
-
-resource "aws_autoscaling_group" "bar" {
- availability_zones = ["us-east-1a"]
- name = "foobar3-terraform-test"
- max_size = 5
- min_size = 2
- health_check_grace_period = 300
- health_check_type = "ELB"
- desired_capacity = 4
- force_delete = true
- placement_group = "${aws_placement_group.test.id}"
- launch_configuration = "${aws_launch_configuration.foobar.name}"
-
- initial_lifecycle_hook {
- name = "foobar"
- default_result = "CONTINUE"
- heartbeat_timeout = 2000
- lifecycle_transition = "autoscaling:EC2_INSTANCE_LAUNCHING"
-
- notification_metadata = < **NOTE:** When using `ELB` as the `health_check_type`, `health_check_grace_period` is required.
-
-~> **NOTE:** Terraform has two types of ways you can add lifecycle hooks - via
-the `initial_lifecycle_hook` attribute from this resource, or via the separate
-[`aws_autoscaling_lifecycle_hook`](/docs/providers/aws/r/autoscaling_lifecycle_hooks.html)
-resource. `initial_lifecycle_hook` exists here because any lifecycle hooks
-added with `aws_autoscaling_lifecycle_hook` will not be added until the
-autoscaling group has been created, and depending on your
-[capacity](#waiting-for-capacity) settings, after the initial instances have
-been launched, creating unintended behavior. If you need hooks to run on all
-instances, add them with `initial_lifecycle_hook` here, but take
-care to not duplicate these hooks in `aws_autoscaling_lifecycle_hook`.
-
-## Waiting for Capacity
-
-A newly-created ASG is initially empty and begins to scale to `min_size` (or
-`desired_capacity`, if specified) by launching instances using the provided
-Launch Configuration. These instances take time to launch and boot.
-
-On ASG Update, changes to these values also take time to result in the target
-number of instances providing service.
-
-Terraform provides two mechanisms to help consistently manage ASG scale up
-time across dependent resources.
-
-#### Waiting for ASG Capacity
-
-The first is default behavior. Terraform waits after ASG creation for
-`min_size` (or `desired_capacity`, if specified) healthy instances to show up
-in the ASG before continuing.
-
-If `min_size` or `desired_capacity` are changed in a subsequent update,
-Terraform will also wait for the correct number of healthy instances before
-continuing.
-
-Terraform considers an instance "healthy" when the ASG reports `HealthStatus:
-"Healthy"` and `LifecycleState: "InService"`. See the [AWS AutoScaling
-Docs](https://docs.aws.amazon.com/AutoScaling/latest/DeveloperGuide/AutoScalingGroupLifecycle.html)
-for more information on an ASG's lifecycle.
-
-Terraform will wait for healthy instances for up to
-`wait_for_capacity_timeout`. If ASG creation is taking more than a few minutes,
-it's worth investigating for scaling activity errors, which can be caused by
-problems with the selected Launch Configuration.
-
-Setting `wait_for_capacity_timeout` to `"0"` disables ASG Capacity waiting.
-
-#### Waiting for ELB Capacity
-
-The second mechanism is optional, and affects ASGs with attached ELBs specified
-via the `load_balancers` attribute.
-
-The `min_elb_capacity` parameter causes Terraform to wait for at least the
-requested number of instances to show up `"InService"` in all attached ELBs
-during ASG creation. It has no effect on ASG updates.
-
-If `wait_for_elb_capacity` is set, Terraform will wait for exactly that number
-of Instances to be `"InService"` in all attached ELBs on both creation and
-updates.
-
-These parameters can be used to ensure that service is being provided before
-Terraform moves on. If new instances don't pass the ELB's health checks for any
-reason, the Terraform apply will time out, and the ASG will be marked as
-tainted (i.e. marked to be destroyed in a follow up run).
-
-As with ASG Capacity, Terraform will wait for up to `wait_for_capacity_timeout`
-for the proper number of instances to be healthy.
-
-#### Troubleshooting Capacity Waiting Timeouts
-
-If ASG creation takes more than a few minutes, this could indicate one of a
-number of configuration problems. See the [AWS Docs on Load Balancer
-Troubleshooting](https://docs.aws.amazon.com/ElasticLoadBalancing/latest/DeveloperGuide/elb-troubleshooting.html)
-for more information.
-
-
-## Import
-
-AutoScaling Groups can be imported using the `name`, e.g.
-
-```
-$ terraform import aws_autoscaling_group.web web-asg
-```
diff --git a/website/source/docs/providers/aws/r/autoscaling_lifecycle_hooks.html.markdown b/website/source/docs/providers/aws/r/autoscaling_lifecycle_hooks.html.markdown
deleted file mode 100644
index 6a1d9468f..000000000
--- a/website/source/docs/providers/aws/r/autoscaling_lifecycle_hooks.html.markdown
+++ /dev/null
@@ -1,70 +0,0 @@
----
-layout: "aws"
-page_title: "AWS: aws_autoscaling_lifecycle_hook"
-sidebar_current: "docs-aws-resource-autoscaling-lifecycle-hook"
-description: |-
- Provides an AutoScaling Lifecycle Hooks resource.
----
-
-# aws\_autoscaling\_lifecycle\_hook
-
-Provides an AutoScaling Lifecycle Hook resource.
-
-~> **NOTE:** Terraform has two types of ways you can add lifecycle hooks - via
-the `initial_lifecycle_hook` attribute from the
-[`aws_autoscaling_group`](/docs/providers/aws/r/autoscaling_group.html)
-resource, or via this one. Hooks added via this resource will not be added
-until the autoscaling group has been created, and depending on your
-[capacity](/docs/providers/aws/r/autoscaling_group.html#waiting-for-capacity)
-settings, after the initial instances have been launched, creating unintended
-behavior. If you need hooks to run on all instances, add them with
-`initial_lifecycle_hook` in
-[`aws_autoscaling_group`](/docs/providers/aws/r/autoscaling_group.html),
-but take care to not duplicate those hooks with this resource.
-
-## Example Usage
-
-```hcl
-resource "aws_autoscaling_group" "foobar" {
- availability_zones = ["us-west-2a"]
- name = "terraform-test-foobar5"
- health_check_type = "EC2"
- termination_policies = ["OldestInstance"]
-
- tag {
- key = "Foo"
- value = "foo-bar"
- propagate_at_launch = true
- }
-}
-
-resource "aws_autoscaling_lifecycle_hook" "foobar" {
- name = "foobar"
- autoscaling_group_name = "${aws_autoscaling_group.foobar.name}"
- default_result = "CONTINUE"
- heartbeat_timeout = 2000
- lifecycle_transition = "autoscaling:EC2_INSTANCE_LAUNCHING"
-
- notification_metadata = < **NOTE:** You may want to omit `desired_capacity` attribute from attached `aws_autoscaling_group`
-when using autoscaling policies. It's good practice to pick either
-[manual](https://docs.aws.amazon.com/AutoScaling/latest/DeveloperGuide/as-manual-scaling.html)
-or [dynamic](https://docs.aws.amazon.com/AutoScaling/latest/DeveloperGuide/as-scale-based-on-demand.html)
-(policy-based) scaling.
-
-## Example Usage
-
-```hcl
-resource "aws_autoscaling_policy" "bat" {
- name = "foobar3-terraform-test"
- scaling_adjustment = 4
- adjustment_type = "ChangeInCapacity"
- cooldown = 300
- autoscaling_group_name = "${aws_autoscaling_group.bar.name}"
-}
-
-resource "aws_autoscaling_group" "bar" {
- availability_zones = ["us-east-1a"]
- name = "foobar3-terraform-test"
- max_size = 5
- min_size = 2
- health_check_grace_period = 300
- health_check_type = "ELB"
- force_delete = true
- launch_configuration = "${aws_launch_configuration.foo.name}"
-}
-```
-
-## Argument Reference
-
-The following arguments are supported:
-
-* `name` - (Required) The name of the policy.
-* `autoscaling_group_name` - (Required) The name or ARN of the group.
-* `adjustment_type` - (Required) Specifies whether the adjustment is an absolute number or a percentage of the current capacity. Valid values are `ChangeInCapacity`, `ExactCapacity`, and `PercentChangeInCapacity`.
-* `policy_type` - (Optional) The policy type, either "SimpleScaling" or "StepScaling". If this value isn't provided, AWS will default to "SimpleScaling."
-
-The following arguments are only available to "SimpleScaling" type policies:
-
-* `cooldown` - (Optional) The amount of time, in seconds, after a scaling activity completes and before the next scaling activity can start.
-* `scaling_adjustment` - (Optional) The number of instances by which to scale. `adjustment_type` determines the interpretation of this number (e.g., as an absolute number or as a percentage of the existing Auto Scaling group size). A positive increment adds to the current capacity and a negative value removes from the current capacity.
-
-The following arguments are only available to "StepScaling" type policies:
-
-* `metric_aggregation_type` - (Optional) The aggregation type for the policy's metrics. Valid values are "Minimum", "Maximum", and "Average". Without a value, AWS will treat the aggregation type as "Average".
-* `estimated_instance_warmup` - (Optional) The estimated time, in seconds, until a newly launched instance will contribute CloudWatch metrics. Without a value, AWS will default to the group's specified cooldown period.
-* `step_adjustments` - (Optional) A set of adjustments that manage
-group scaling. These have the following structure:
-
-```hcl
-step_adjustment {
- scaling_adjustment = -1
- metric_interval_lower_bound = 1.0
- metric_interval_upper_bound = 2.0
-}
-step_adjustment {
- scaling_adjustment = 1
- metric_interval_lower_bound = 2.0
- metric_interval_upper_bound = 3.0
-}
-```
-
-The following fields are available in step adjustments:
-
-* `scaling_adjustment` - (Required) The number of members by which to
-scale, when the adjustment bounds are breached. A positive value scales
-up. A negative value scales down.
-* `metric_interval_lower_bound` - (Optional) The lower bound for the
-difference between the alarm threshold and the CloudWatch metric.
-Without a value, AWS will treat this bound as infinity.
-* `metric_interval_upper_bound` - (Optional) The upper bound for the
-difference between the alarm threshold and the CloudWatch metric.
-Without a value, AWS will treat this bound as infinity. The upper bound
-must be greater than the lower bound.
-
-The following arguments are supported for backwards compatibility but should not be used:
-
-* `min_adjustment_step` - (Optional) Use `min_adjustment_magnitude` instead.
-
-## Attribute Reference
-* `arn` - The ARN assigned by AWS to the scaling policy.
-* `name` - The scaling policy's name.
-* `autoscaling_group_name` - The scaling policy's assigned autoscaling group.
-* `adjustment_type` - The scaling policy's adjustment type.
-* `policy_type` - The scaling policy's type.
diff --git a/website/source/docs/providers/aws/r/autoscaling_schedule.html.markdown b/website/source/docs/providers/aws/r/autoscaling_schedule.html.markdown
deleted file mode 100644
index 1392361ac..000000000
--- a/website/source/docs/providers/aws/r/autoscaling_schedule.html.markdown
+++ /dev/null
@@ -1,58 +0,0 @@
----
-layout: "aws"
-page_title: "AWS: aws_autoscaling_schedule"
-sidebar_current: "docs-aws-resource-autoscaling-schedule"
-description: |-
- Provides an AutoScaling Schedule resource.
----
-
-# aws\_autoscaling\_schedule
-
-Provides an AutoScaling Schedule resource.
-
-## Example Usage
-
-```hcl
-resource "aws_autoscaling_group" "foobar" {
- availability_zones = ["us-west-2a"]
- name = "terraform-test-foobar5"
- max_size = 1
- min_size = 1
- health_check_grace_period = 300
- health_check_type = "ELB"
- force_delete = true
- termination_policies = ["OldestInstance"]
-}
-
-resource "aws_autoscaling_schedule" "foobar" {
- scheduled_action_name = "foobar"
- min_size = 0
- max_size = 1
- desired_capacity = 0
- start_time = "2016-12-11T18:00:00Z"
- end_time = "2016-12-12T06:00:00Z"
- autoscaling_group_name = "${aws_autoscaling_group.foobar.name}"
-}
-```
-
-## Argument Reference
-
-The following arguments are supported:
-
-* `autoscaling_group_name` - (Required) The name or Amazon Resource Name (ARN) of the Auto Scaling group.
-* `scheduled_action_name` - (Required) The name of this scaling action.
-* `start_time` - (Optional) The time for this action to start, in "YYYY-MM-DDThh:mm:ssZ" format in UTC/GMT only (for example, 2014-06-01T00:00:00Z ).
- If you try to schedule your action in the past, Auto Scaling returns an error message.
-* `end_time` - (Optional) The time for this action to end, in "YYYY-MM-DDThh:mm:ssZ" format in UTC/GMT only (for example, 2014-06-01T00:00:00Z ).
- If you try to schedule your action in the past, Auto Scaling returns an error message.
-* `recurrence` - (Optional) The time when recurring future actions will start. Start time is specified by the user following the Unix cron syntax format.
-* `min_size` - (Optional) The minimum size for the Auto Scaling group. Default
-0.
-* `max_size` - (Optional) The maximum size for the Auto Scaling group. Default
-0.
-* `desired_capacity` - (Optional) The number of EC2 instances that should be running in the group. Default 0.
-
-~> **NOTE:** When `start_time` and `end_time` are specified with `recurrence` , they form the boundaries of when the recurring action will start and stop.
-
-## Attribute Reference
-* `arn` - The ARN assigned by AWS to the autoscaling schedule.
diff --git a/website/source/docs/providers/aws/r/cloudformation_stack.html.markdown b/website/source/docs/providers/aws/r/cloudformation_stack.html.markdown
deleted file mode 100644
index 50cacf6be..000000000
--- a/website/source/docs/providers/aws/r/cloudformation_stack.html.markdown
+++ /dev/null
@@ -1,76 +0,0 @@
----
-layout: "aws"
-page_title: "AWS: aws_cloudformation_stack"
-sidebar_current: "docs-aws-resource-cloudformation-stack"
-description: |-
- Provides a CloudFormation Stack resource.
----
-
-# aws\_cloudformation\_stack
-
-Provides a CloudFormation Stack resource.
-
-## Example Usage
-
-```hcl
-resource "aws_cloudformation_stack" "network" {
- name = "networking-stack"
-
- parameters {
- VPCCidr = "10.0.0.0/16"
- }
-
- template_body = < **NOTE:** CloudFront distributions take about 15 minutes to a deployed state
-after creation or modification. During this time, deletes to resources will be
-blocked. If you need to delete a distribution that is enabled and you do not
-want to wait, you need to use the `retain_on_delete` flag.
-
-## Example Usage
-
-The following example below creates a CloudFront distribution with an S3 origin.
-
-```hcl
-resource "aws_s3_bucket" "b" {
- bucket = "mybucket"
- acl = "private"
-
- tags {
- Name = "My bucket"
- }
-}
-
-resource "aws_cloudfront_distribution" "s3_distribution" {
- origin {
- domain_name = "${aws_s3_bucket.b.bucket_domain_name}"
- origin_id = "myS3Origin"
-
- s3_origin_config {
- origin_access_identity = "origin-access-identity/cloudfront/ABCDEFG1234567"
- }
- }
-
- enabled = true
- is_ipv6_enabled = true
- comment = "Some comment"
- default_root_object = "index.html"
-
- logging_config {
- include_cookies = false
- bucket = "mylogs.s3.amazonaws.com"
- prefix = "myprefix"
- }
-
- aliases = ["mysite.example.com", "yoursite.example.com"]
-
- default_cache_behavior {
- allowed_methods = ["DELETE", "GET", "HEAD", "OPTIONS", "PATCH", "POST", "PUT"]
- cached_methods = ["GET", "HEAD"]
- target_origin_id = "myS3Origin"
-
- forwarded_values {
- query_string = false
-
- cookies {
- forward = "none"
- }
- }
-
- viewer_protocol_policy = "allow-all"
- min_ttl = 0
- default_ttl = 3600
- max_ttl = 86400
- }
-
- price_class = "PriceClass_200"
-
- restrictions {
- geo_restriction {
- restriction_type = "whitelist"
- locations = ["US", "CA", "GB", "DE"]
- }
- }
-
- tags {
- Environment = "production"
- }
-
- viewer_certificate {
- cloudfront_default_certificate = true
- }
-}
-```
-
-## Argument Reference
-
-The CloudFront distribution argument layout is a complex structure composed
-of several sub-resources - these resources are laid out below.
-
-### Top-Level Arguments
-
- * `aliases` (Optional) - Extra CNAMEs (alternate domain names), if any, for
- this distribution.
-
- * `cache_behavior` (Optional) - A [cache behavior](#cache-behavior-arguments)
- resource for this distribution (multiples allowed).
-
- * `comment` (Optional) - Any comments you want to include about the
- distribution.
-
- * `custom_error_response` (Optional) - One or more [custom error response](#custom-error-response-arguments) elements (multiples allowed).
-
- * `default_cache_behavior` (Required) - The [default cache behavior](#default-cache-behavior-arguments) for this distribution (maximum
- one).
-
- * `default_root_object` (Optional) - The object that you want CloudFront to
- return (for example, index.html) when an end user requests the root URL.
-
- * `enabled` (Required) - Whether the distribution is enabled to accept end
- user requests for content.
-
- * `is_ipv6_enabled` (Optional) - Whether the IPv6 is enabled for the distribution.
-
- * `http_version` (Optional) - The maximum HTTP version to support on the
- distribution. Allowed values are `http1.1` and `http2`. The default is
- `http2`.
-
- * `logging_config` (Optional) - The [logging
- configuration](#logging-config-arguments) that controls how logs are written
- to your distribution (maximum one).
-
- * `origin` (Required) - One or more [origins](#origin-arguments) for this
- distribution (multiples allowed).
-
- * `price_class` (Optional) - The price class for this distribution. One of
- `PriceClass_All`, `PriceClass_200`, `PriceClass_100`
-
- * `restrictions` (Required) - The [restriction
- configuration](#restrictions-arguments) for this distribution (maximum one).
-
- * `tags` - (Optional) A mapping of tags to assign to the resource.
-
- * `viewer_certificate` (Required) - The [SSL
- configuration](#viewer-certificate-arguments) for this distribution (maximum
- one).
-
- * `web_acl_id` (Optional) - If you're using AWS WAF to filter CloudFront
- requests, the Id of the AWS WAF web ACL that is associated with the
- distribution.
-
- * `retain_on_delete` (Optional) - Disables the distribution instead of
- deleting it when destroying the resource through Terraform. If this is set,
- the distribution needs to be deleted manually afterwards. Default: `false`.
-
-#### Cache Behavior Arguments
-
- * `allowed_methods` (Required) - Controls which HTTP methods CloudFront
- processes and forwards to your Amazon S3 bucket or your custom origin.
-
- * `cached_methods` (Required) - Controls whether CloudFront caches the
- response to requests using the specified HTTP methods.
-
- * `compress` (Optional) - Whether you want CloudFront to automatically
- compress content for web requests that include `Accept-Encoding: gzip` in
- the request header (default: `false`).
-
- * `default_ttl` (Required) - The default amount of time (in seconds) that an
- object is in a CloudFront cache before CloudFront forwards another request
- in the absence of an `Cache-Control max-age` or `Expires` header.
-
- * `forwarded_values` (Required) - The [forwarded values configuration](#forwarded-values-arguments) that specifies how CloudFront
- handles query strings, cookies and headers (maximum one).
-
- * `lambda_function_association` (Optional) - A config block that triggers a lambda function with
- specific actions. Defined below, maximum 4. **Lambda@Edge is in technical
- Preview, and must be enabled on your AWS account to be used**
-
- * `max_ttl` (Required) - The maximum amount of time (in seconds) that an
- object is in a CloudFront cache before CloudFront forwards another request
- to your origin to determine whether the object has been updated. Only
- effective in the presence of `Cache-Control max-age`, `Cache-Control
- s-maxage`, and `Expires` headers.
-
- * `min_ttl` (Required) - The minimum amount of time that you want objects to
- stay in CloudFront caches before CloudFront queries your origin to see
- whether the object has been updated.
-
- * `path_pattern` (Required) - The pattern (for example, `images/*.jpg)` that
- specifies which requests you want this cache behavior to apply to.
-
- * `smooth_streaming` (Optional) - Indicates whether you want to distribute
- media files in Microsoft Smooth Streaming format using the origin that is
- associated with this cache behavior.
-
- * `target_origin_id` (Required) - The value of ID for the origin that you want
- CloudFront to route requests to when a request matches the path pattern
- either for a cache behavior or for the default cache behavior.
-
- * `trusted_signers` (Optional) - The AWS accounts, if any, that you want to
- allow to create signed URLs for private content.
-
- * `viewer_protocol_policy` (Required) - Use this element to specify the
- protocol that users can use to access the files in the origin specified by
- TargetOriginId when a request matches the path pattern in PathPattern. One
- of `allow-all`, `https-only`, or `redirect-to-https`.
-
-##### Forwarded Values Arguments
-
- * `cookies` (Required) - The [forwarded values cookies](#cookies-arguments)
- that specifies how CloudFront handles cookies (maximum one).
-
- * `headers` (Optional) - Specifies the Headers, if any, that you want
- CloudFront to vary upon for this cache behavior. Specify `*` to include all
- headers.
-
- * `query_string` (Required) - Indicates whether you want CloudFront to forward
- query strings to the origin that is associated with this cache behavior.
-
- * `query_string_cache_keys` (Optional) - When specified, along with a value of
- `true` for `query_string`, all query strings are forwarded, however only the
- query string keys listed in this argument are cached. When omitted with a
- value of `true` for `query_string`, all query string keys are cached.
-
-##### Lambda Function Association
-
-Lambda@Edge allows you to associate an AWS Lambda Function with a predefined
-event. You can associate a single function per event type. See [What is
-Lambda@Edge](http://docs.aws.amazon.com/AmazonCloudFront/latest/DeveloperGuide/what-is-lambda-at-edge.html)
-for more information
-
- * `event_type` (Required) - The specific event to trigger this function.
- Valid values: `viewer-request`, `origin-request`, `viewer-response`,
- `origin-response`
-
- * `lambda_arn` (Required) - ARN of the Lambda function.
-
-##### Cookies Arguments
-
- * `forward` (Required) - Specifies whether you want CloudFront to forward
- cookies to the origin that is associated with this cache behavior. You can
- specify `all`, `none` or `whitelist`. If `whitelist`, you must include the
- subsequent `whitelisted_names`
-
- * `whitelisted_names` (Optional) - If you have specified `whitelist` to
- `forward`, the whitelisted cookies that you want CloudFront to forward to
- your origin.
-
-#### Custom Error Response Arguments
-
- * `error_caching_min_ttl` (Optional) - The minimum amount of time you want
- HTTP error codes to stay in CloudFront caches before CloudFront queries your
- origin to see whether the object has been updated.
-
- * `error_code` (Required) - The 4xx or 5xx HTTP status code that you want to
- customize.
-
- * `response_code` (Optional) - The HTTP status code that you want CloudFront
- to return with the custom error page to the viewer.
-
- * `response_page_path` (Optional) - The path of the custom error page (for
- example, `/custom_404.html`).
-
-#### Default Cache Behavior Arguments
-
-The arguments for `default_cache_behavior` are the same as for
-[`cache_behavior`](#cache-behavior-arguments), except for the `path_pattern`
-argument is not required.
-
-#### Logging Config Arguments
-
- * `bucket` (Required) - The Amazon S3 bucket to store the access logs in, for
- example, `myawslogbucket.s3.amazonaws.com`.
-
- * `include_cookies` (Optional) - Specifies whether you want CloudFront to
- include cookies in access logs (default: `false`).
-
- * `prefix` (Optional) - An optional string that you want CloudFront to prefix
- to the access log filenames for this distribution, for example, `myprefix/`.
-
-#### Origin Arguments
-
- * `custom_origin_config` - The [CloudFront custom
- origin](#custom-origin-config-arguments) configuration information. If an S3
- origin is required, use `s3_origin_config` instead.
-
- * `domain_name` (Required) - The DNS domain name of either the S3 bucket, or
- web site of your custom origin.
-
- * `custom_header` (Optional) - One or more sub-resources with `name` and
- `value` parameters that specify header data that will be sent to the origin
- (multiples allowed).
-
- * `origin_id` (Required) - A unique identifier for the origin.
-
- * `origin_path` (Optional) - An optional element that causes CloudFront to
- request your content from a directory in your Amazon S3 bucket or your
- custom origin.
-
- * `s3_origin_config` - The [CloudFront S3 origin](#s3-origin-config-arguments)
- configuration information. If a custom origin is required, use
- `custom_origin_config` instead.
-
-##### Custom Origin Config Arguments
-
- * `http_port` (Required) - The HTTP port the custom origin listens on.
-
- * `https_port` (Required) - The HTTPS port the custom origin listens on.
-
- * `origin_protocol_policy` (Required) - The origin protocol policy to apply to
- your origin. One of `http-only`, `https-only`, or `match-viewer`.
-
- * `origin_ssl_protocols` (Required) - The SSL/TLS protocols that you want
- CloudFront to use when communicating with your origin over HTTPS. A list of
- one or more of `SSLv3`, `TLSv1`, `TLSv1.1`, and `TLSv1.2`.
-
- * `origin_keepalive_timeout` - (Optional) The Custom KeepAlive timeout, in seconds. Value must be between `1` and `60`.
-
- * `origin_read_timeout` - (Optional) The Custom Read timeout, in seconds. Value must be between `4` and `60`.
-
-##### S3 Origin Config Arguments
-
-* `origin_access_identity` (Optional) - The [CloudFront origin access
- identity][5] to associate with the origin.
-
-#### Restrictions Arguments
-
-The `restrictions` sub-resource takes another single sub-resource named
-`geo_restriction` (see the example for usage).
-
-The arguments of `geo_restriction` are:
-
- * `locations` (Optional) - The [ISO 3166-1-alpha-2 codes][4] for which you
- want CloudFront either to distribute your content (`whitelist`) or not
- distribute your content (`blacklist`).
-
- * `restriction_type` (Required) - The method that you want to use to restrict
- distribution of your content by country: `none`, `whitelist`, or
- `blacklist`.
-
-#### Viewer Certificate Arguments
-
- * `acm_certificate_arn` - The ARN of the [AWS Certificate Manager][6]
- certificate that you wish to use with this distribution. Specify this,
- `cloudfront_default_certificate`, or `iam_certificate_id`. The ACM
- certificate must be in US-EAST-1.
-
- * `cloudfront_default_certificate` - `true` if you want viewers to use HTTPS
- to request your objects and you're using the CloudFront domain name for your
- distribution. Specify this, `acm_certificate_arn`, or `iam_certificate_id`.
-
- * `iam_certificate_id` - The IAM certificate identifier of the custom viewer
- certificate for this distribution if you are using a custom domain. Specify
- this, `acm_certificate_arn`, or `cloudfront_default_certificate`.
-
- * `minimum_protocol_version` - The minimum version of the SSL protocol that
- you want CloudFront to use for HTTPS connections. One of `SSLv3` or `TLSv1`.
- Default: `SSLv3`. **NOTE**: If you are using a custom certificate (specified
- with `acm_certificate_arn` or `iam_certificate_id`), and have specified
- `sni-only` in `ssl_support_method`, `TLSv1` must be specified.
-
- * `ssl_support_method`: Specifies how you want CloudFront to serve HTTPS
- requests. One of `vip` or `sni-only`. Required if you specify
- `acm_certificate_arn` or `iam_certificate_id`. **NOTE:** `vip` causes
- CloudFront to use a dedicated IP address and may incur extra charges.
-
-## Attribute Reference
-
-The following attributes are exported:
-
- * `id` - The identifier for the distribution. For example: `EDFDVBD632BHDS5`.
-
- * `arn` - The ARN (Amazon Resource Name) for the distribution. For example: arn:aws:cloudfront::123456789012:distribution/EDFDVBD632BHDS5, where 123456789012 is your AWS account ID.
-
- * `caller_reference` - Internal value used by CloudFront to allow future
- updates to the distribution configuration.
-
- * `status` - The current status of the distribution. `Deployed` if the
- distribution's information is fully propagated throughout the Amazon
- CloudFront system.
-
- * `active_trusted_signers` - The key pair IDs that CloudFront is aware of for
- each trusted signer, if the distribution is set up to serve private content
- with signed URLs.
-
- * `domain_name` - The domain name corresponding to the distribution. For
- example: `d604721fxaaqy9.cloudfront.net`.
-
- * `last_modified_time` - The date and time the distribution was last modified.
-
- * `in_progress_validation_batches` - The number of invalidation batches
- currently in progress.
-
- * `etag` - The current version of the distribution's information. For example:
- `E2QWRUHAPOMQZL`.
-
- * `hosted_zone_id` - The CloudFront Route 53 zone ID that can be used to
- route an [Alias Resource Record Set][7] to. This attribute is simply an
- alias for the zone ID `Z2FDTNDATAQYW2`.
-
-
-[1]: http://docs.aws.amazon.com/AmazonCloudFront/latest/DeveloperGuide/Introduction.html
-[2]: http://docs.aws.amazon.com/AmazonCloudFront/latest/APIReference/CreateDistribution.html
-[3]: http://docs.aws.amazon.com/AmazonCloudFront/latest/DeveloperGuide/private-content-restricting-access-to-s3.html
-[4]: http://www.iso.org/iso/country_codes/iso_3166_code_lists/country_names_and_code_elements.htm
-[5]: /docs/providers/aws/r/cloudfront_origin_access_identity.html
-[6]: https://aws.amazon.com/certificate-manager/
-[7]: http://docs.aws.amazon.com/Route53/latest/APIReference/CreateAliasRRSAPI.html
-
-
-## Import
-
-Cloudfront Distributions can be imported using the `id`, e.g.
-
-```
-$ terraform import aws_cloudfront_distribution.distribution E74FTE3EXAMPLE
-```
diff --git a/website/source/docs/providers/aws/r/cloudfront_origin_access_identity.html.markdown b/website/source/docs/providers/aws/r/cloudfront_origin_access_identity.html.markdown
deleted file mode 100644
index b35ebc661..000000000
--- a/website/source/docs/providers/aws/r/cloudfront_origin_access_identity.html.markdown
+++ /dev/null
@@ -1,112 +0,0 @@
----
-layout: "aws"
-page_title: "AWS: cloudfront_origin_access_identity"
-sidebar_current: "docs-aws-resource-cloudfront-origin-access-identity"
-description: |-
- Provides a CloudFront origin access identity.
----
-
-# aws\_cloudfront\_origin\_access\_identity
-
-Creates an Amazon CloudFront origin access identity.
-
-For information about CloudFront distributions, see the
-[Amazon CloudFront Developer Guide][1]. For more information on generating
-origin access identities, see
-[Using an Origin Access Identity to Restrict Access to Your Amazon S3 Content][2].
-
-## Example Usage
-
-The following example below creates a CloudFront origin access identity.
-
-```hcl
-resource "aws_cloudfront_origin_access_identity" "origin_access_identity" {
- comment = "Some comment"
-}
-```
-
-## Argument Reference
-
-* `comment` (Optional) - An optional comment for the origin access identity.
-
-## Attribute Reference
-
-The following attributes are exported:
-
-* `id` - The identifier for the distribution. For example: `EDFDVBD632BHDS5`.
-* `caller_reference` - Internal value used by CloudFront to allow future
- updates to the origin access identity.
-* `cloudfront_access_identity_path` - A shortcut to the full path for the
- origin access identity to use in CloudFront, see below.
-* `etag` - The current version of the origin access identity's information.
- For example: `E2QWRUHAPOMQZL`.
-* `iam_arn` - A pre-generated ARN for use in S3 bucket policies (see below).
- Example: `arn:aws:iam::cloudfront:user/CloudFront Origin Access Identity
- E2QWRUHAPOMQZL`.
-* `s3_canonical_user_id` - The Amazon S3 canonical user ID for the origin
- access identity, which you use when giving the origin access identity read
- permission to an object in Amazon S3.
-
-## Using With CloudFront
-
-Normally, when referencing an origin access identity in CloudFront, you need to
-prefix the ID with the `origin-access-identity/cloudfront/` special path.
-The `cloudfront_access_identity_path` allows this to be circumvented.
-The below snippet demonstrates use with the `s3_origin_config` structure for the
-[`aws_cloudfront_web_distribution`][3] resource:
-
-```hcl
-s3_origin_config {
- origin_access_identity = "${aws_cloudfront_origin_access_identity.origin_access_identity.cloudfront_access_identity_path}"
-}
-```
-
-### Updating your bucket policy
-
-Note that the AWS API may translate the `s3_canonical_user_id` `CanonicalUser`
-principal into an `AWS` IAM ARN principal when supplied in an
-[`aws_s3_bucket`][4] bucket policy, causing spurious diffs in Terraform. If
-you see this behaviour, use the `iam_arn` instead:
-
-```hcl
-data "aws_iam_policy_document" "s3_policy" {
- statement {
- actions = ["s3:GetObject"]
- resources = ["${module.names.s3_endpoint_arn_base}/*"]
-
- principals {
- type = "AWS"
- identifiers = ["${aws_cloudfront_origin_access_identity.origin_access_identity.iam_arn}"]
- }
- }
-
- statement {
- actions = ["s3:ListBucket"]
- resources = ["${module.names.s3_endpoint_arn_base}"]
-
- principals {
- type = "AWS"
- identifiers = ["${aws_cloudfront_origin_access_identity.origin_access_identity.iam_arn}"]
- }
- }
-}
-
-resource "aws_s3_bucket" "bucket" {
- # ...
- policy = "${data.aws_iam_policy_document.s3_policy.json}"
-}
-```
-
-[1]: http://docs.aws.amazon.com/AmazonCloudFront/latest/DeveloperGuide/Introduction.html
-[2]: http://docs.aws.amazon.com/AmazonCloudFront/latest/DeveloperGuide/private-content-restricting-access-to-s3.html
-[3]: /docs/providers/aws/r/cloudfront_distribution.html
-[4]: /docs/providers/aws/r/s3_bucket.html
-
-
-## Import
-
-Cloudfront Origin Access Identities can be imported using the `id`, e.g.
-
-```
-$ terraform import aws_cloudfront_origin_access_identity.origin_access E74FTE3AEXAMPLE
-```
diff --git a/website/source/docs/providers/aws/r/cloudtrail.html.markdown b/website/source/docs/providers/aws/r/cloudtrail.html.markdown
deleted file mode 100644
index f03cb5b38..000000000
--- a/website/source/docs/providers/aws/r/cloudtrail.html.markdown
+++ /dev/null
@@ -1,100 +0,0 @@
----
-layout: "aws"
-page_title: "AWS: cloudtrail"
-sidebar_current: "docs-aws-resource-cloudtrail"
-description: |-
- Provides a CloudTrail resource.
----
-
-# aws\_cloudtrail
-
-Provides a CloudTrail resource.
-
-## Example Usage
-
-```hcl
-resource "aws_cloudtrail" "foobar" {
- name = "tf-trail-foobar"
- s3_bucket_name = "${aws_s3_bucket.foo.id}"
- s3_key_prefix = "prefix"
- include_global_service_events = false
-}
-
-resource "aws_s3_bucket" "foo" {
- bucket = "tf-test-trail"
- force_destroy = true
-
- policy = < **Note:** `input` and `input_path` are mutually exclusive options.
-
--> **Note:** In order to be able to have your AWS Lambda function or
- SNS topic invoked by a CloudWatch Events rule, you must setup the right permissions
- using [`aws_lambda_permission`](https://www.terraform.io/docs/providers/aws/r/lambda_permission.html)
- or [`aws_sns_topic.policy`](https://www.terraform.io/docs/providers/aws/r/sns_topic.html#policy).
- More info [here](http://docs.aws.amazon.com/AmazonCloudWatch/latest/DeveloperGuide/EventsResourceBasedPermissions.html).
-
-The following arguments are supported:
-
-* `rule` - (Required) The name of the rule you want to add targets to.
-* `target_id` - (Optional) The unique target assignment ID. If missing, will generate a random, unique id.
-* `arn` - (Required) The Amazon Resource Name (ARN) associated of the target.
-* `input` - (Optional) Valid JSON text passed to the target.
-* `input_path` - (Optional) The value of the [JSONPath](http://goessner.net/articles/JsonPath/)
- that is used for extracting part of the matched event when passing it to the target.
-* `role_arn` - (Optional) The Amazon Resource Name (ARN) of the IAM role to be used for this target when the rule is triggered.
-* `run_command_targets` - (Optional) Parameters used when you are using the rule to invoke Amazon EC2 Run Command. Documented below. A maximum of 5 are allowed.
-
-`run_command_parameters` support the following:
-
-* `key` - (Required) Can be either `tag:tag-key` or `InstanceIds`.
-* `values` - (Required) If Key is `tag:tag-key`, Values is a list of tag values. If Key is `InstanceIds`, Values is a list of Amazon EC2 instance IDs.
-
diff --git a/website/source/docs/providers/aws/r/cloudwatch_log_destination.html.markdown b/website/source/docs/providers/aws/r/cloudwatch_log_destination.html.markdown
deleted file mode 100644
index b00ac1dbf..000000000
--- a/website/source/docs/providers/aws/r/cloudwatch_log_destination.html.markdown
+++ /dev/null
@@ -1,43 +0,0 @@
----
-layout: "aws"
-page_title: "AWS: aws_cloudwatch_log_destination"
-sidebar_current: "docs-aws-resource-cloudwatch-log-destination"
-description: |-
- Provides a CloudWatch Logs destination.
----
-
-# aws\_cloudwatch\_log\_destination
-
-Provides a CloudWatch Logs destination resource.
-
-## Example Usage
-
-```hcl
-resource "aws_cloudwatch_log_destination" "test_destination" {
- name = "test_destination"
- role_arn = "${aws_iam_role.iam_for_cloudwatch.arn}"
- target_arn = "${aws_kinesis_stream.kinesis_for_cloudwatch.arn}"
-}
-```
-
-## Argument Reference
-
-The following arguments are supported:
-
-* `name` - (Required) A name for the log destination
-* `role_arn` - (Required) The ARN of an IAM role that grants Amazon CloudWatch Logs permissions to put data into the target
-* `target_arn` - (Required) The ARN of the target Amazon Kinesis stream or Amazon Lambda resource for the destination
-
-## Attributes Reference
-
-The following attributes are exported:
-
-* `arn` - The Amazon Resource Name (ARN) specifying the log destination.
-
-## Import
-
-CloudWatch Logs destinations can be imported using the `name`, e.g.
-
-```
-$ terraform import aws_cloudwatch_log_destination.test_destination test_destination
-```
diff --git a/website/source/docs/providers/aws/r/cloudwatch_log_destination_policy.html.markdown b/website/source/docs/providers/aws/r/cloudwatch_log_destination_policy.html.markdown
deleted file mode 100644
index 46172d613..000000000
--- a/website/source/docs/providers/aws/r/cloudwatch_log_destination_policy.html.markdown
+++ /dev/null
@@ -1,63 +0,0 @@
----
-layout: "aws"
-page_title: "AWS: aws_cloudwatch_log_destination_policy"
-sidebar_current: "docs-aws-resource-cloudwatch-log-destination-policy"
-description: |-
- Provides a CloudWatch Logs destination policy.
----
-
-# aws\_cloudwatch\_log\_destination\_policy
-
-Provides a CloudWatch Logs destination policy resource.
-
-## Example Usage
-
-```hcl
-resource "aws_cloudwatch_log_destination" "test_destination" {
- name = "test_destination"
- role_arn = "${aws_iam_role.iam_for_cloudwatch.arn}"
- target_arn = "${aws_kinesis_stream.kinesis_for_cloudwatch.arn}"
-}
-
-data "aws_iam_policy_document" "test_destination_policy" {
- statement {
- effect = "Allow"
-
- principals = {
- type = "AWS"
-
- identifiers = [
- "123456789012",
- ]
- }
-
- actions = [
- "logs:PutSubscriptionFilter",
- ]
-
- resources = [
- "${aws_cloudwatch_log_destination.test_destination.arn}",
- ]
- }
-}
-
-resource "aws_cloudwatch_log_destination_policy" "test_destination_policy" {
- destination_name = "${aws_cloudwatch_log_destination.test_destination.name}"
- access_policy = "${data.aws_iam_policy_document.test_destination_policy.json}"
-}
-```
-
-## Argument Reference
-
-The following arguments are supported:
-
-* `destination_name` - (Required) A name for the subscription filter
-* `access_policy` - (Required) The policy document. This is a JSON formatted string.
-
-## Import
-
-CloudWatch Logs destination policies can be imported using the `destination_name`, e.g.
-
-```
-$ terraform import aws_cloudwatch_log_destination_policy.test_destination_policy test_destination
-```
diff --git a/website/source/docs/providers/aws/r/cloudwatch_log_group.html.markdown b/website/source/docs/providers/aws/r/cloudwatch_log_group.html.markdown
deleted file mode 100644
index 88e7e0c62..000000000
--- a/website/source/docs/providers/aws/r/cloudwatch_log_group.html.markdown
+++ /dev/null
@@ -1,49 +0,0 @@
----
-layout: "aws"
-page_title: "AWS: aws_cloudwatch_log_group"
-sidebar_current: "docs-aws-resource-cloudwatch-log-group"
-description: |-
- Provides a CloudWatch Log Group resource.
----
-
-# aws\_cloudwatch\_log\_group
-
-Provides a CloudWatch Log Group resource.
-
-## Example Usage
-
-```hcl
-resource "aws_cloudwatch_log_group" "yada" {
- name = "Yada"
-
- tags {
- Environment = "production"
- Application = "serviceA"
- }
-}
-```
-
-## Argument Reference
-
-The following arguments are supported:
-
-* `name` - (Optional, Forces new resource) The name of the log group. If omitted, Terraform will assign a random, unique name.
-* `name_prefix` - (Optional, Forces new resource) Creates a unique name beginning with the specified prefix. Conflicts with `name`.
-* `retention_in_days` - (Optional) Specifies the number of days
- you want to retain log events in the specified log group.
-* `tags` - (Optional) A mapping of tags to assign to the resource.
-
-## Attributes Reference
-
-The following attributes are exported:
-
-* `arn` - The Amazon Resource Name (ARN) specifying the log group.
-
-
-## Import
-
-Cloudwatch Log Groups can be imported using the `name`, e.g.
-
-```
-$ terraform import aws_cloudwatch_log_group.test_group yada
-```
diff --git a/website/source/docs/providers/aws/r/cloudwatch_log_metric_filter.html.markdown b/website/source/docs/providers/aws/r/cloudwatch_log_metric_filter.html.markdown
deleted file mode 100644
index 492b111c0..000000000
--- a/website/source/docs/providers/aws/r/cloudwatch_log_metric_filter.html.markdown
+++ /dev/null
@@ -1,54 +0,0 @@
----
-layout: "aws"
-page_title: "AWS: aws_cloudwatch_log_metric_filter"
-sidebar_current: "docs-aws-resource-cloudwatch-log-metric-filter"
-description: |-
- Provides a CloudWatch Log Metric Filter resource.
----
-
-# aws\_cloudwatch\_log\_metric\_filter
-
-Provides a CloudWatch Log Metric Filter resource.
-
-## Example Usage
-
-```hcl
-resource "aws_cloudwatch_log_metric_filter" "yada" {
- name = "MyAppAccessCount"
- pattern = ""
- log_group_name = "${aws_cloudwatch_log_group.dada.name}"
-
- metric_transformation {
- name = "EventCount"
- namespace = "YourNamespace"
- value = "1"
- }
-}
-
-resource "aws_cloudwatch_log_group" "dada" {
- name = "MyApp/access.log"
-}
-```
-
-## Argument Reference
-
-The following arguments are supported:
-
-* `name` - (Required) A name for the metric filter.
-* `pattern` - (Required) A valid [CloudWatch Logs filter pattern](https://docs.aws.amazon.com/AmazonCloudWatch/latest/DeveloperGuide/FilterAndPatternSyntax.html)
- for extracting metric data out of ingested log events.
-* `log_group_name` - (Required) The name of the log group to associate the metric filter with.
-* `metric_transformation` - (Required) A block defining collection of information
- needed to define how metric data gets emitted. See below.
-
-The `metric_transformation` block supports the following arguments:
-
-* `name` - (Required) The name of the CloudWatch metric to which the monitored log information should be published (e.g. `ErrorCount`)
-* `namespace` - (Required) The destination namespace of the CloudWatch metric.
-* `value` - (Required) What to publish to the metric. For example, if you're counting the occurrences of a particular term like "Error", the value will be "1" for each occurrence. If you're counting the bytes transferred the published value will be the value in the log event.
-
-## Attributes Reference
-
-The following attributes are exported:
-
-* `id` - The name of the metric filter.
diff --git a/website/source/docs/providers/aws/r/cloudwatch_log_stream.html.markdown b/website/source/docs/providers/aws/r/cloudwatch_log_stream.html.markdown
deleted file mode 100644
index 0591814a4..000000000
--- a/website/source/docs/providers/aws/r/cloudwatch_log_stream.html.markdown
+++ /dev/null
@@ -1,37 +0,0 @@
----
-layout: "aws"
-page_title: "AWS: aws_cloudwatch_log_stream"
-sidebar_current: "docs-aws-resource-cloudwatch-log-stream"
-description: |-
- Provides a CloudWatch Log Stream resource.
----
-
-# aws\_cloudwatch\_log\_stream
-
-Provides a CloudWatch Log Stream resource.
-
-## Example Usage
-
-```hcl
-resource "aws_cloudwatch_log_group" "yada" {
- name = "Yada"
-}
-
-resource "aws_cloudwatch_log_stream" "foo" {
- name = "SampleLogStream1234"
- log_group_name = "${aws_cloudwatch_log_group.yada.name}"
-}
-```
-
-## Argument Reference
-
-The following arguments are supported:
-
-* `name` - (Required) The name of the log stream. Must not be longer than 512 characters and must not contain `:`
-* `log_group_name` - (Required) The name of the log group under which the log stream is to be created.
-
-## Attributes Reference
-
-The following attributes are exported:
-
-* `arn` - The Amazon Resource Name (ARN) specifying the log stream.
\ No newline at end of file
diff --git a/website/source/docs/providers/aws/r/cloudwatch_log_subscription_filter.html.markdown b/website/source/docs/providers/aws/r/cloudwatch_log_subscription_filter.html.markdown
deleted file mode 100644
index 4cbc38ea7..000000000
--- a/website/source/docs/providers/aws/r/cloudwatch_log_subscription_filter.html.markdown
+++ /dev/null
@@ -1,39 +0,0 @@
----
-layout: "aws"
-page_title: "AWS: aws_cloudwatch_log_subscription_filter"
-sidebar_current: "docs-aws-resource-cloudwatch-log-subscription-filter"
-description: |-
- Provides a CloudWatch Logs subscription filter.
----
-
-# aws\_cloudwatch\_log\_subscription\_filter
-
-Provides a CloudWatch Logs subscription filter resource.
-
-## Example Usage
-
-```hcl
-resource "aws_cloudwatch_log_subscription_filter" "test_lambdafunction_logfilter" {
- name = "test_lambdafunction_logfilter"
- role_arn = "${aws_iam_role.iam_for_lambda.arn}"
- log_group_name = "/aws/lambda/example_lambda_name"
- filter_pattern = "logtype test"
- destination_arn = "${aws_kinesis_stream.test_logstream.arn}"
-}
-```
-
-## Argument Reference
-
-The following arguments are supported:
-
-* `name` - (Required) A name for the subscription filter
-* `destination_arn` - (Required) The ARN of the destination to deliver matching log events to. Kinesis stream or Lambda function ARN.
-* `filter_pattern` - (Required) A valid CloudWatch Logs filter pattern for subscribing to a filtered stream of log events.
-* `log_group_name` - (Required) The name of the log group to associate the subscription filter with
-* `role_arn` - (Optional) The ARN of an IAM role that grants Amazon CloudWatch Logs permissions to deliver ingested log events to the destination. If you use Lambda as a destination, you should skip this argument and use `aws_lambda_permission` resource for granting access from CloudWatch logs to the destination Lambda function.
-
-## Attributes Reference
-
-The following attributes are exported:
-
-* `arn` - The Amazon Resource Name (ARN) specifying the log subscription filter.
diff --git a/website/source/docs/providers/aws/r/cloudwatch_metric_alarm.html.markdown b/website/source/docs/providers/aws/r/cloudwatch_metric_alarm.html.markdown
deleted file mode 100644
index 1333f46f3..000000000
--- a/website/source/docs/providers/aws/r/cloudwatch_metric_alarm.html.markdown
+++ /dev/null
@@ -1,109 +0,0 @@
----
-layout: "aws"
-page_title: "AWS: cloudwatch_metric_alarm"
-sidebar_current: "docs-aws-resource-cloudwatch-metric-alarm"
-description: |-
- Provides an AutoScaling Scaling Group resource.
----
-
-# aws\_cloudwatch\_metric\_alarm
-
-Provides a CloudWatch Metric Alarm resource.
-
-## Example Usage
-
-```hcl
-resource "aws_cloudwatch_metric_alarm" "foobar" {
- alarm_name = "terraform-test-foobar5"
- comparison_operator = "GreaterThanOrEqualToThreshold"
- evaluation_periods = "2"
- metric_name = "CPUUtilization"
- namespace = "AWS/EC2"
- period = "120"
- statistic = "Average"
- threshold = "80"
- alarm_description = "This metric monitors ec2 cpu utilization"
- insufficient_data_actions = []
-}
-```
-
-## Example in Conjunction with Scaling Policies
-
-```hcl
-resource "aws_autoscaling_policy" "bat" {
- name = "foobar3-terraform-test"
- scaling_adjustment = 4
- adjustment_type = "ChangeInCapacity"
- cooldown = 300
- autoscaling_group_name = "${aws_autoscaling_group.bar.name}"
-}
-
-resource "aws_cloudwatch_metric_alarm" "bat" {
- alarm_name = "terraform-test-foobar5"
- comparison_operator = "GreaterThanOrEqualToThreshold"
- evaluation_periods = "2"
- metric_name = "CPUUtilization"
- namespace = "AWS/EC2"
- period = "120"
- statistic = "Average"
- threshold = "80"
-
- dimensions {
- AutoScalingGroupName = "${aws_autoscaling_group.bar.name}"
- }
-
- alarm_description = "This metric monitors ec2 cpu utilization"
- alarm_actions = ["${aws_autoscaling_policy.bat.arn}"]
-}
-```
-
-~> **NOTE:** You cannot create a metric alarm consisting of both `statistic` and `extended_statistic` parameters.
-You must choose one or the other
-
-## Argument Reference
-
-See [related part of AWS Docs](https://docs.aws.amazon.com/AmazonCloudWatch/latest/APIReference/API_PutMetricAlarm.html)
-for details about valid values.
-
-The following arguments are supported:
-
-* `alarm_name` - (Required) The descriptive name for the alarm. This name must be unique within the user's AWS account
-* `comparison_operator` - (Required) The arithmetic operation to use when comparing the specified Statistic and Threshold. The specified Statistic value is used as the first operand. Either of the following is supported: `GreaterThanOrEqualToThreshold`, `GreaterThanThreshold`, `LessThanThreshold`, `LessThanOrEqualToThreshold`.
-* `evaluation_periods` - (Required) The number of periods over which data is compared to the specified threshold.
-* `metric_name` - (Required) The name for the alarm's associated metric.
- See docs for [supported metrics](https://docs.aws.amazon.com/AmazonCloudWatch/latest/DeveloperGuide/CW_Support_For_AWS.html).
-* `namespace` - (Required) The namespace for the alarm's associated metric. See docs for the [list of namespaces](https://docs.aws.amazon.com/AmazonCloudWatch/latest/DeveloperGuide/aws-namespaces.html).
- See docs for [supported metrics](https://docs.aws.amazon.com/AmazonCloudWatch/latest/DeveloperGuide/CW_Support_For_AWS.html).
-* `period` - (Required) The period in seconds over which the specified `statistic` is applied.
-* `statistic` - (Optional) The statistic to apply to the alarm's associated metric.
- Either of the following is supported: `SampleCount`, `Average`, `Sum`, `Minimum`, `Maximum`
-* `threshold` - (Required) The value against which the specified statistic is compared.
-* `actions_enabled` - (Optional) Indicates whether or not actions should be executed during any changes to the alarm's state. Defaults to `true`.
-* `alarm_actions` - (Optional) The list of actions to execute when this alarm transitions into an ALARM state from any other state. Each action is specified as an Amazon Resource Number (ARN).
-* `alarm_description` - (Optional) The description for the alarm.
-* `dimensions` - (Optional) The dimensions for the alarm's associated metric. For the list of available dimensions see the AWS documentation [here](http://docs.aws.amazon.com/AmazonCloudWatch/latest/DeveloperGuide/CW_Support_For_AWS.html).
-* `insufficient_data_actions` - (Optional) The list of actions to execute when this alarm transitions into an INSUFFICIENT_DATA state from any other state. Each action is specified as an Amazon Resource Number (ARN).
-* `ok_actions` - (Optional) The list of actions to execute when this alarm transitions into an OK state from any other state. Each action is specified as an Amazon Resource Number (ARN).
-* `unit` - (Optional) The unit for the alarm's associated metric.
-* `extended_statistic` - (Optional) The percentile statistic for the metric associated with the alarm. Specify a value between p0.0 and p100.
-* `treat_missing_data` - (Optional) Sets how this alarm is to handle missing data points. The following values are supported: `missing`, `ignore`, `breaching` and `notBreaching`. Defaults to `missing`.
-* `evaluate_low_sample_count_percentiles` - (Optional) Used only for alarms
-based on percentiles. If you specify `ignore`, the alarm state will not
-change during periods with too few data points to be statistically significant.
-If you specify `evaluate` or omit this parameter, the alarm will always be
-evaluated and possibly change state no matter how many data points are available.
-The following values are supported: `ignore`, and `evaluate`.
-
-## Attributes Reference
-
-The following attributes are exported:
-
-* `id` - The ID of the health check
-
-## Import
-
-Cloud Metric Alarms can be imported using the `alarm_name`, e.g.
-
-```
-$ terraform import aws_cloudwatch_metric_alarm.test alarm-12345
-```
diff --git a/website/source/docs/providers/aws/r/code_commit_repository.html.markdown b/website/source/docs/providers/aws/r/code_commit_repository.html.markdown
deleted file mode 100644
index b6e88968e..000000000
--- a/website/source/docs/providers/aws/r/code_commit_repository.html.markdown
+++ /dev/null
@@ -1,41 +0,0 @@
----
-layout: "aws"
-page_title: "AWS: aws_codecommit_repository"
-sidebar_current: "docs-aws-resource-codecommit-repository"
-description: |-
- Provides a CodeCommit Repository Resource.
----
-
-# aws\_codecommit\_repository
-
-Provides a CodeCommit Repository Resource.
-
-~> **NOTE on CodeCommit Availability**: The CodeCommit is not yet rolled out
-in all regions - available regions are listed
-[the AWS Docs](https://docs.aws.amazon.com/general/latest/gr/rande.html#codecommit_region).
-
-## Example Usage
-
-```hcl
-resource "aws_codecommit_repository" "test" {
- repository_name = "MyTestRepository"
- description = "This is the Sample App Repository"
-}
-```
-
-## Argument Reference
-
-The following arguments are supported:
-
-* `repository_name` - (Required) The name for the repository. This needs to be less than 100 characters.
-* `description` - (Optional) The description of the repository. This needs to be less than 1000 characters
-* `default_branch` - (Optional) The default branch of the repository. The branch specified here needs to exist.
-
-## Attributes Reference
-
-The following attributes are exported:
-
-* `repository_id` - The ID of the repository
-* `arn` - The ARN of the repository
-* `clone_url_http` - The URL to use for cloning the repository over HTTPS.
-* `clone_url_ssh` - The URL to use for cloning the repository over SSH.
\ No newline at end of file
diff --git a/website/source/docs/providers/aws/r/code_commit_trigger.html.markdown b/website/source/docs/providers/aws/r/code_commit_trigger.html.markdown
deleted file mode 100644
index 82dd8dc03..000000000
--- a/website/source/docs/providers/aws/r/code_commit_trigger.html.markdown
+++ /dev/null
@@ -1,41 +0,0 @@
----
-layout: "aws"
-page_title: "AWS: aws_codecommit_trigger"
-sidebar_current: "docs-aws-resource-codecommit-trigger"
-description: |-
- Provides a CodeCommit Trigger Resource.
----
-
-# aws\_codecommit\_trigger
-
-Provides a CodeCommit Trigger Resource.
-
-~> **NOTE on CodeCommit**: The CodeCommit is not yet rolled out
-in all regions - available regions are listed
-[the AWS Docs](https://docs.aws.amazon.com/general/latest/gr/rande.html#codecommit_region).
-
-## Example Usage
-
-```hcl
-resource "aws_codecommit_trigger" "test" {
- depends_on = ["aws_codecommit_repository.test"]
- repository_name = "my_test_repository"
-
- trigger {
- name = "noname"
- events = ["all"]
- destination_arn = "${aws_sns_topic.test.arn}"
- }
-}
-```
-
-## Argument Reference
-
-The following arguments are supported:
-
-* `repository_name` - (Required) The name for the repository. This needs to be less than 100 characters.
-* `name` - (Required) The name of the trigger.
-* `destination_arn` - (Required) The ARN of the resource that is the target for a trigger. For example, the ARN of a topic in Amazon Simple Notification Service (SNS).
-* `custom_data` - (Optional) Any custom data associated with the trigger that will be included in the information sent to the target of the trigger.
-* `branches` - (Optional) The branches that will be included in the trigger configuration. If no branches are specified, the trigger will apply to all branches.
-* `events` - (Required) The repository events that will cause the trigger to run actions in another service, such as sending a notification through Amazon Simple Notification Service (SNS). If no events are specified, the trigger will run for all repository events. Event types include: `all`, `updateReference`, `createReference`, `deleteReference`.
diff --git a/website/source/docs/providers/aws/r/codebuild_project.html.markdown b/website/source/docs/providers/aws/r/codebuild_project.html.markdown
deleted file mode 100644
index b5e18341f..000000000
--- a/website/source/docs/providers/aws/r/codebuild_project.html.markdown
+++ /dev/null
@@ -1,158 +0,0 @@
----
-layout: "aws"
-page_title: "AWS: aws_codebuild_project"
-sidebar_current: "docs-aws-resource-codebuild-project"
-description: |-
- Provides a CodeBuild Project resource.
----
-
-# aws\_codebuild\_project
-
-Provides a CodeBuild Project resource.
-
-## Example Usage
-
-```hcl
-resource "aws_iam_role" "codebuild_role" {
- name = "codebuild-role-"
-
- assume_role_policy = < **NOTE on `aws_codepipeline`:** - the `GITHUB_TOKEN` environment variable must be set if the GitHub provider is specified.
-
-## Example Usage
-
-```hcl
-resource "aws_s3_bucket" "foo" {
- bucket = "test-bucket"
- acl = "private"
-}
-
-resource "aws_iam_role" "foo" {
- name = "test-role"
-
- assume_role_policy = < **Note:** The input artifact of an action must exactly match the output artifact declared in a preceding action, but the input artifact does not have to be the next action in strict sequence from the action that provided the output artifact. Actions in parallel can declare different output artifacts, which are in turn consumed by different following actions.
-
-## Attributes Reference
-
-The following attributes are exported:
-
-* `id` - The codepipeline ID.
-
-## Import
-
-CodePipelines can be imported using the name, e.g.
-
-```
-$ terraform import aws_codepipeline.foo example
-```
diff --git a/website/source/docs/providers/aws/r/cognito_identity_pool.markdown b/website/source/docs/providers/aws/r/cognito_identity_pool.markdown
deleted file mode 100644
index 5dfe696b6..000000000
--- a/website/source/docs/providers/aws/r/cognito_identity_pool.markdown
+++ /dev/null
@@ -1,78 +0,0 @@
----
-layout: "aws"
-page_title: "AWS: aws_cognito_identity_pool"
-sidebar_current: "docs-aws-resource-cognito-identity-pool"
-description: |-
- Provides an AWS Cognito Identity Pool.
----
-
-# aws\_cognito\_identity\_pool
-
-Provides an AWS Cognito Identity Pool.
-
-## Example Usage
-
-```
-resource "aws_iam_saml_provider" "default" {
- name = "my-saml-provider"
- saml_metadata_document = "${file("saml-metadata.xml")}"
-}
-
-resource "aws_cognito_identity_pool" "main" {
- identity_pool_name = "identity pool"
- allow_unauthenticated_identities = false
-
- cognito_identity_providers {
- client_id = "6lhlkkfbfb4q5kpp90urffae"
- provider_name = "cognito-idp.us-east-1.amazonaws.com/us-east-1_Tv0493apJ"
- server_side_token_check = false
- }
-
- cognito_identity_providers {
- client_id = "7kodkvfqfb4qfkp39eurffae"
- provider_name = "cognito-idp.us-east-1.amazonaws.com/eu-west-1_Zr231apJu"
- server_side_token_check = false
- }
-
- supported_login_providers {
- "graph.facebook.com" = "7346241598935552"
- "accounts.google.com" = "123456789012.apps.googleusercontent.com"
- }
-
- saml_provider_arns = ["${aws_iam_saml_provider.default.arn}"]
- openid_connect_provider_arns = ["arn:aws:iam::123456789012:oidc-provider/foo.example.com"]
-}
-```
-
-## Argument Reference
-
-The Cognito Identity Pool argument layout is a structure composed of several sub-resources - these resources are laid out below.
-
-* `identity_pool_name` (Required) - The Cognito Identity Pool name.
-* `allow_unauthenticated_identities` (Required) - Whether the identity pool supports unauthenticated logins or not.
-* `developer_provider_name` (Optional) - The "domain" by which Cognito will refer to your users. This name acts as a placeholder that allows your
-backend and the Cognito service to communicate about the developer provider.
-* `cognito_identity_providers` (Optional) - An array of [Amazon Cognito Identity user pools](#cognito-identity-providers) and their client IDs.
-* `openid_connect_provider_arns` (Optional) - A list of OpendID Connect provider ARNs.
-* `saml_provider_arns` (Optional) - An array of Amazon Resource Names (ARNs) of the SAML provider for your identity.
-* `supported_login_providers` (Optional) - Key-Value pairs mapping provider names to provider app IDs.
-
-#### Cognito Identity Providers
-
- * `client_id` (Optional) - The client ID for the Amazon Cognito Identity User Pool.
- * `provider_name` (Optional) - The provider name for an Amazon Cognito Identity User Pool.
- * `server_side_token_check` (Optional) - Whether server-side token validation is enabled for the identity provider’s token or not.
-
-## Attributes Reference
-
-In addition to the arguments, which are exported, the following attributes are exported:
-
-* `id` - An identity pool ID in the format REGION:GUID.
-
-## Import
-
-Cognito Identity Pool can be imported using the name, e.g.
-
-```
-$ terraform import aws_cognito_identity_pool.mypool
-```
diff --git a/website/source/docs/providers/aws/r/config_config_rule.html.markdown b/website/source/docs/providers/aws/r/config_config_rule.html.markdown
deleted file mode 100644
index fe773c817..000000000
--- a/website/source/docs/providers/aws/r/config_config_rule.html.markdown
+++ /dev/null
@@ -1,139 +0,0 @@
----
-layout: "aws"
-page_title: "AWS: aws_config_config_rule"
-sidebar_current: "docs-aws-resource-config-config-rule"
-description: |-
- Provides an AWS Config Rule.
----
-
-# aws\_config\_config\_rule
-
-Provides an AWS Config Rule.
-
-~> **Note:** Config Rule requires an existing [Configuration Recorder](/docs/providers/aws/r/config_configuration_recorder.html) to be present. Use of `depends_on` is recommended (as shown below) to avoid race conditions.
-
-## Example Usage
-
-```hcl
-resource "aws_config_config_rule" "r" {
- name = "example"
-
- source {
- owner = "AWS"
- source_identifier = "S3_BUCKET_VERSIONING_ENABLED"
- }
-
- depends_on = ["aws_config_configuration_recorder.foo"]
-}
-
-resource "aws_config_configuration_recorder" "foo" {
- name = "example"
- role_arn = "${aws_iam_role.r.arn}"
-}
-
-resource "aws_iam_role" "r" {
- name = "my-awsconfig-role"
-
- assume_role_policy = < **Note:** _Starting_ the Configuration Recorder requires a [delivery channel](/docs/providers/aws/r/config_delivery_channel.html) (while delivery channel creation requires Configuration Recorder). This is why [`aws_config_configuration_recorder_status`](/docs/providers/aws/r/config_configuration_recorder_status.html) is a separate resource.
-
-## Example Usage
-
-```hcl
-resource "aws_config_configuration_recorder" "foo" {
- name = "example"
- role_arn = "${aws_iam_role.r.arn}"
-}
-
-resource "aws_iam_role" "r" {
- name = "awsconfig-example"
-
- assume_role_policy = < **Note:** Starting Configuration Recorder requires a [Delivery Channel](/docs/providers/aws/r/config_delivery_channel.html) to be present. Use of `depends_on` (as shown below) is recommended to avoid race conditions.
-
-## Example Usage
-
-```hcl
-resource "aws_config_configuration_recorder_status" "foo" {
- name = "${aws_config_configuration_recorder.foo.name}"
- is_enabled = true
- depends_on = ["aws_config_delivery_channel.foo"]
-}
-
-resource "aws_iam_role_policy_attachment" "a" {
- role = "${aws_iam_role.r.name}"
- policy_arn = "arn:aws:iam::aws:policy/service-role/AWSConfigRole"
-}
-
-resource "aws_s3_bucket" "b" {
- bucket = "awsconfig-example"
-}
-
-resource "aws_config_delivery_channel" "foo" {
- name = "example"
- s3_bucket_name = "${aws_s3_bucket.b.bucket}"
-}
-
-resource "aws_config_configuration_recorder" "foo" {
- name = "example"
- role_arn = "${aws_iam_role.r.arn}"
-}
-
-resource "aws_iam_role" "r" {
- name = "example-awsconfig"
-
- assume_role_policy = < **Note:** Delivery Channel requires a [Configuration Recorder](/docs/providers/aws/r/config_configuration_recorder.html) to be present. Use of `depends_on` (as shown below) is recommended to avoid race conditions.
-
-## Example Usage
-
-```hcl
-resource "aws_config_delivery_channel" "foo" {
- name = "example"
- s3_bucket_name = "${aws_s3_bucket.b.bucket}"
- depends_on = ["aws_config_configuration_recorder.foo"]
-}
-
-resource "aws_s3_bucket" "b" {
- bucket = "example-awsconfig"
- force_destroy = true
-}
-
-resource "aws_config_configuration_recorder" "foo" {
- name = "example"
- role_arn = "${aws_iam_role.r.arn}"
-}
-
-resource "aws_iam_role" "r" {
- name = "awsconfig-example"
-
- assume_role_policy = < **Note:** using `apply_immediately` can result in a
-brief downtime as the server reboots. See the AWS Docs on [RDS Maintenance][2]
-for more information.
-
-~> **Note:** All arguments including the username and password will be stored in the raw state as plain-text.
-[Read more about sensitive data in state](/docs/state/sensitive-data.html).
-
-## Example Usage
-
-```hcl
-resource "aws_db_instance" "default" {
- allocated_storage = 10
- storage_type = "gp2"
- engine = "mysql"
- engine_version = "5.6.17"
- instance_class = "db.t1.micro"
- name = "mydb"
- username = "foo"
- password = "bar"
- db_subnet_group_name = "my_database_subnet_group"
- parameter_group_name = "default.mysql5.6"
-}
-```
-
-## Argument Reference
-
-For more detailed documentation about each argument, refer to
-the [AWS official documentation](http://docs.aws.amazon.com/AmazonRDS/latest/APIReference/API_CreateDBInstance.html).
-
-The following arguments are supported:
-
-* `allocated_storage` - (Required unless a `snapshot_identifier` or `replicate_source_db` is provided) The allocated storage in gigabytes.
-* `engine` - (Required unless a `snapshot_identifier` or `replicate_source_db` is provided) The database engine to use.
-* `engine_version` - (Optional) The engine version to use.
-* `identifier` - (Optional, Forces new resource) The name of the RDS instance, if omitted, Terraform will assign a random, unique identifier.
-* `identifier_prefix` - (Optional, Forces new resource) Creates a unique identifier beginning with the specified prefix. Conflicts with `identifer`.
-* `instance_class` - (Required) The instance type of the RDS instance.
-* `storage_type` - (Optional) One of "standard" (magnetic), "gp2" (general
- purpose SSD), or "io1" (provisioned IOPS SSD). The default is "io1" if
- `iops` is specified, "standard" if not. Note that this behaviour is different from the AWS web console, where the default is "gp2".
-* `final_snapshot_identifier` - (Optional) The name of your final DB snapshot
- when this DB instance is deleted. If omitted, no final snapshot will be
- made.
-* `skip_final_snapshot` - (Optional) Determines whether a final DB snapshot is created before the DB instance is deleted. If true is specified, no DBSnapshot is created. If false is specified, a DB snapshot is created before the DB instance is deleted, using the value from `final_snapshot_identifier`. Default is `false`.
-* `copy_tags_to_snapshot` – (Optional, boolean) On delete, copy all Instance `tags` to
-the final snapshot (if `final_snapshot_identifier` is specified). Default
-`false`
-* `name` - (Optional) The DB name to create. If omitted, no database is created
- initially.
-* `password` - (Required unless a `snapshot_identifier` or `replicate_source_db` is provided) Password for the master DB user. Note that this may
- show up in logs, and it will be stored in the state file.
-* `username` - (Required unless a `snapshot_identifier` or `replicate_source_db` is provided) Username for the master DB user.
-* `availability_zone` - (Optional) The AZ for the RDS instance.
-* `backup_retention_period` - (Optional) The days to retain backups for. Must be
-`1` or greater to be a source for a [Read Replica][1].
-* `backup_window` - (Optional) The daily time range (in UTC) during which automated backups are created if they are enabled. Example: "09:46-10:16". Must not overlap with `maintenance_window`.
-* `iops` - (Optional) The amount of provisioned IOPS. Setting this implies a
- storage_type of "io1".
-* `maintenance_window` - (Optional) The window to perform maintenance in.
- Syntax: "ddd:hh24:mi-ddd:hh24:mi". Eg: "Mon:00:00-Mon:03:00".
- See [RDS Maintenance Window docs](http://docs.aws.amazon.com/AmazonRDS/latest/UserGuide/USER_UpgradeDBInstance.Maintenance.html#AdjustingTheMaintenanceWindow) for more.
-* `multi_az` - (Optional) Specifies if the RDS instance is multi-AZ
-* `port` - (Optional) The port on which the DB accepts connections.
-* `publicly_accessible` - (Optional) Bool to control if instance is publicly accessible. Defaults to `false`.
-* `vpc_security_group_ids` - (Optional) List of VPC security groups to associate.
-* `security_group_names` - (Optional/Deprecated) List of DB Security Groups to associate.
- Only used for [DB Instances on the _EC2-Classic_ Platform](https://docs.aws.amazon.com/AmazonRDS/latest/UserGuide/USER_VPC.html#USER_VPC.FindDefaultVPC).
-* `db_subnet_group_name` - (Optional) Name of DB subnet group. DB instance will be created in the VPC associated with the DB subnet group. If unspecified, will be created in the `default` VPC, or in EC2 Classic, if available.
-* `parameter_group_name` - (Optional) Name of the DB parameter group to associate.
-* `option_group_name` - (Optional) Name of the DB option group to associate.
-* `storage_encrypted` - (Optional) Specifies whether the DB instance is encrypted. The default is `false` if not specified.
-* `apply_immediately` - (Optional) Specifies whether any database modifications
- are applied immediately, or during the next maintenance window. Default is
- `false`. See [Amazon RDS Documentation for more information.](https://docs.aws.amazon.com/AmazonRDS/latest/UserGuide/Overview.DBInstance.Modifying.html)
-* `replicate_source_db` - (Optional) Specifies that this resource is a Replicate
-database, and to use this value as the source database. This correlates to the
-`identifier` of another Amazon RDS Database to replicate. See
-[DB Instance Replication][1] and
-[Working with PostgreSQL and MySQL Read Replicas](https://docs.aws.amazon.com/AmazonRDS/latest/UserGuide/USER_ReadRepl.html) for
- more information on using Replication.
-* `snapshot_identifier` - (Optional) Specifies whether or not to create this database from a snapshot. This correlates to the snapshot ID you'd find in the RDS console, e.g: rds:production-2015-06-26-06-05.
-* `license_model` - (Optional, but required for some DB engines, i.e. Oracle SE1) License model information for this DB instance.
-* `auto_minor_version_upgrade` - (Optional) Indicates that minor engine upgrades will be applied automatically to the DB instance during the maintenance window. Defaults to true.
-* `allow_major_version_upgrade` - (Optional) Indicates that major version upgrades are allowed. Changing this parameter does not result in an outage and the change is asynchronously applied as soon as possible.
-* `monitoring_role_arn` - (Optional) The ARN for the IAM role that permits RDS to send
-enhanced monitoring metrics to CloudWatch Logs. You can find more information on the [AWS Documentation](https://docs.aws.amazon.com/AmazonRDS/latest/UserGuide/USER_Monitoring.html)
-what IAM permissions are needed to allow Enhanced Monitoring for RDS Instances.
-* `monitoring_interval` - (Optional) The interval, in seconds, between points when Enhanced Monitoring metrics are collected for the DB instance. To disable collecting Enhanced Monitoring metrics, specify 0. The default is 0. Valid Values: 0, 1, 5, 10, 15, 30, 60.
-* `kms_key_id` - (Optional) The ARN for the KMS encryption key.
-* `character_set_name` - (Optional) The character set name to use for DB encoding in Oracle instances. This can't be changed.
-[Oracle Character Sets Supported in Amazon RDS](https://docs.aws.amazon.com/AmazonRDS/latest/UserGuide/Appendix.OracleCharacterSets.html)
-* `iam_database_authentication_enabled` - (Optional) Specifies whether or mappings of AWS Identity and Access Management (IAM) accounts to database accounts is enabled.
-* `tags` - (Optional) A mapping of tags to assign to the resource.
-* `timezone` - (Optional) Time zone of the DB instance. `timezone` is currently only supported by Microsoft SQL Server.
-The `timezone` can only be set on creation. See [MSSQL User Guide](http://docs.aws.amazon.com/AmazonRDS/latest/UserGuide/CHAP_SQLServer.html#SQLServer.Concepts.General.TimeZone) for more information
-
-~> **NOTE:** Removing the `replicate_source_db` attribute from an existing RDS
-Replicate database managed by Terraform will promote the database to a fully
-standalone database.
-
-## Attributes Reference
-
-The following attributes are exported:
-
-* `id` - The RDS instance ID.
-* `resource_id` - The RDS Resource ID of this instance.
-* `address` - The address of the RDS instance.
-* `arn` - The ARN of the RDS instance.
-* `allocated_storage` - The amount of allocated storage
-* `availability_zone` - The availability zone of the instance
-* `backup_retention_period` - The backup retention period
-* `backup_window` - The backup window
-* `endpoint` - The connection endpoint
-* `engine` - The database engine
-* `engine_version` - The database engine version
-* `instance_class`- The RDS instance class
-* `maintenance_window` - The instance maintenance window
-* `multi_az` - If the RDS instance is multi AZ enabled
-* `name` - The database name
-* `port` - The database port
-* `status` - The RDS instance status
-* `username` - The master username for the database
-* `storage_encrypted` - Specifies whether the DB instance is encrypted
-* `hosted_zone_id` - The canonical hosted zone ID of the DB instance (to be used in a Route 53 Alias record)
-
-On Oracle instances the following is exported additionally:
-
-* `character_set_name` - The character set used on Oracle instances.
-
-
-
-## Timeouts
-
-`aws_db_instance` provides the following
-[Timeouts](/docs/configuration/resources.html#timeouts) configuration options:
-
-- `create` - (Default `40 minutes`) Used for Creating Instances, Replicas, and
-restoring from Snapshots
-- `update` - (Default `80 minutes`) Used for Database modifications
-- `delete` - (Default `40 minutes`) Used for destroying databases. This includes
-the time required to take snapshots
-
-[1]: https://docs.aws.amazon.com/AmazonRDS/latest/UserGuide/Overview.Replication.html
-[2]: https://docs.aws.amazon.com/AmazonRDS/latest/UserGuide/USER_UpgradeDBInstance.Maintenance.html
-
-## Import
-
-DB Instances can be imported using the `identifier`, e.g.
-
-```
-$ terraform import aws_db_instance.default mydb-rds-instance
-```
diff --git a/website/source/docs/providers/aws/r/db_option_group.html.markdown b/website/source/docs/providers/aws/r/db_option_group.html.markdown
deleted file mode 100644
index af7dc1b96..000000000
--- a/website/source/docs/providers/aws/r/db_option_group.html.markdown
+++ /dev/null
@@ -1,82 +0,0 @@
----
-layout: "aws"
-page_title: "AWS: aws_db_option_group"
-sidebar_current: "docs-aws-resource-db-option-group"
----
-
-# aws\_db\_option\_group
-
-Provides an RDS DB option group resource.
-
-## Example Usage
-
-```hcl
-resource "aws_db_option_group" "bar" {
- name = "option-group-test-terraform"
- option_group_description = "Terraform Option Group"
- engine_name = "sqlserver-ee"
- major_engine_version = "11.00"
-
- option {
- option_name = "Timezone"
-
- option_settings {
- name = "TIME_ZONE"
- value = "UTC"
- }
- }
-
- option {
- option_name = "TDE"
- }
-}
-```
-
-~> **Note**: Any modifications to the `db_option_group` are set to happen immediately as we default to applying immediately.
-
-## Argument Reference
-
-The following arguments are supported:
-
-* `name` - (Optional, Forces new resource) The name of the option group. If omitted, Terraform will assign a random, unique name. Must be lowercase, to match as it is stored in AWS.
-* `name_prefix` - (Optional, Forces new resource) Creates a unique name beginning with the specified prefix. Conflicts with `name`. Must be lowercase, to match as it is stored in AWS.
-* `option_group_description` - (Optional) The description of the option group. Defaults to "Managed by Terraform".
-* `engine_name` - (Required) Specifies the name of the engine that this option group should be associated with.
-* `major_engine_version` - (Required) Specifies the major version of the engine that this option group should be associated with.
-* `option` - (Optional) A list of Options to apply.
-* `tags` - (Optional) A mapping of tags to assign to the resource.
-
-Option blocks support the following:
-
-* `option_name` - (Required) The Name of the Option (e.g. MEMCACHED).
-* `option_settings` - (Optional) A list of option settings to apply.
-* `port` - (Optional) The Port number when connecting to the Option (e.g. 11211).
-* `db_security_group_memberships` - (Optional) A list of DB Security Groups for which the option is enabled.
-* `vpc_security_group_memberships` - (Optional) A list of VPC Security Groups for which the option is enabled.
-
-Option Settings blocks support the following:
-
-* `name` - (Optional) The Name of the setting.
-* `value` - (Optional) The Value of the setting.
-
-## Attributes Reference
-
-The following attributes are exported:
-
-* `arn` - The ARN of the db option group.
-
-
-## Timeouts
-
-`aws_db_option_group` provides the following
-[Timeouts](/docs/configuration/resources.html#timeouts) configuration options:
-
-- `delete` - (Default `15 minutes`)
-
-## Import
-
-DB Option groups can be imported using the `name`, e.g.
-
-```
-$ terraform import aws_db_option_group.bar mysql-option-group
-```
diff --git a/website/source/docs/providers/aws/r/db_parameter_group.html.markdown b/website/source/docs/providers/aws/r/db_parameter_group.html.markdown
deleted file mode 100644
index 25ea6b539..000000000
--- a/website/source/docs/providers/aws/r/db_parameter_group.html.markdown
+++ /dev/null
@@ -1,62 +0,0 @@
----
-layout: "aws"
-page_title: "AWS: aws_db_parameter_group"
-sidebar_current: "docs-aws-resource-db-parameter-group"
----
-
-# aws\_db\_parameter\_group
-
-Provides an RDS DB parameter group resource.
-
-## Example Usage
-
-```hcl
-resource "aws_db_parameter_group" "default" {
- name = "rds-pg"
- family = "mysql5.6"
-
- parameter {
- name = "character_set_server"
- value = "utf8"
- }
-
- parameter {
- name = "character_set_client"
- value = "utf8"
- }
-}
-```
-
-## Argument Reference
-
-The following arguments are supported:
-
-* `name` - (Optional, Forces new resource) The name of the DB parameter group. If omitted, Terraform will assign a random, unique name.
-* `name_prefix` - (Optional, Forces new resource) Creates a unique name beginning with the specified prefix. Conflicts with `name`.
-* `family` - (Required) The family of the DB parameter group.
-* `description` - (Optional) The description of the DB parameter group. Defaults to "Managed by Terraform".
-* `parameter` - (Optional) A list of DB parameters to apply.
-* `tags` - (Optional) A mapping of tags to assign to the resource.
-
-Parameter blocks support the following:
-
-* `name` - (Required) The name of the DB parameter.
-* `value` - (Required) The value of the DB parameter.
-* `apply_method` - (Optional) "immediate" (default), or "pending-reboot". Some
- engines can't apply some parameters without a reboot, and you will need to
- specify "pending-reboot" here.
-
-## Attributes Reference
-
-The following attributes are exported:
-
-* `id` - The db parameter group name.
-* `arn` - The ARN of the db parameter group.
-
-## Import
-
-DB Parameter groups can be imported using the `name`, e.g.
-
-```
-$ terraform import aws_db_parameter_group.rds_pg rds-pg
-```
diff --git a/website/source/docs/providers/aws/r/db_security_group.html.markdown b/website/source/docs/providers/aws/r/db_security_group.html.markdown
deleted file mode 100644
index 275c94690..000000000
--- a/website/source/docs/providers/aws/r/db_security_group.html.markdown
+++ /dev/null
@@ -1,59 +0,0 @@
----
-layout: "aws"
-page_title: "AWS: aws_db_security_group"
-sidebar_current: "docs-aws-resource-db-security-group"
-description: |-
- Provides an RDS security group resource.
----
-
-# aws\_db\_security\_group
-
-Provides an RDS security group resource. This is only for DB instances in the
-EC2-Classic Platform. For instances inside a VPC, use the
-[`aws_db_instance.vpc_security_group_ids`](/docs/providers/aws/r/db_instance.html#vpc_security_group_ids)
-attribute instead.
-
-## Example Usage
-
-```hcl
-resource "aws_db_security_group" "default" {
- name = "rds_sg"
-
- ingress {
- cidr = "10.0.0.0/24"
- }
-}
-```
-
-## Argument Reference
-
-The following arguments are supported:
-
-* `name` - (Required) The name of the DB security group.
-* `description` - (Optional) The description of the DB security group. Defaults to "Managed by Terraform".
-* `ingress` - (Required) A list of ingress rules.
-* `tags` - (Optional) A mapping of tags to assign to the resource.
-
-Ingress blocks support the following:
-
-* `cidr` - The CIDR block to accept
-* `security_group_name` - The name of the security group to authorize
-* `security_group_id` - The ID of the security group to authorize
-* `security_group_owner_id` - The owner Id of the security group provided
- by `security_group_name`.
-
-## Attributes Reference
-
-The following attributes are exported:
-
-* `id` - The db security group ID.
-* `arn` - The arn of the DB security group.
-
-
-## Import
-
-DB Security groups can be imported using the `name`, e.g.
-
-```
-$ terraform import aws_db_security_group.default aws_rds_sg-1
-```
diff --git a/website/source/docs/providers/aws/r/db_snapshot.html.md b/website/source/docs/providers/aws/r/db_snapshot.html.md
deleted file mode 100644
index 840f31de5..000000000
--- a/website/source/docs/providers/aws/r/db_snapshot.html.md
+++ /dev/null
@@ -1,62 +0,0 @@
----
-layout: "aws"
-page_title: "AWS: aws_db_snapshot"
-sidebar_current: "docs-aws-resource-db-snapshot"
-description: |-
- Provides an DB Instance.
----
-
-# aws\_db\_snapshot
-
-Creates a Snapshot of an DB Instance.
-
-## Example Usage
-
-```
-resource "aws_db_instance" "bar" {
- allocated_storage = 10
- engine = "MySQL"
- engine_version = "5.6.21"
- instance_class = "db.t1.micro"
- name = "baz"
- password = "barbarbarbar"
- username = "foo"
-
- maintenance_window = "Fri:09:00-Fri:09:30"
- backup_retention_period = 0
- parameter_group_name = "default.mysql5.6"
-}
-
-resource "aws_db_snapshot" "test" {
- db_instance_identifier = "${aws_db_instance.bar.id}"
- db_snapshot_identifier = "testsnapshot1234"
-}
-```
-
-## Argument Reference
-
-The following arguments are supported:
-
-* `db_instance_identifier` - (Required) The DB Instance Identifier from which to take the snapshot.
-* `db_snapshot_identifier` - (Required) The Identifier for the snapshot.
-
-
-## Attributes Reference
-
-The following attributes are exported:
-
-* `allocated_storage` - Specifies the allocated storage size in gigabytes (GB).
-* `availability_zone` - Specifies the name of the Availability Zone the DB instance was located in at the time of the DB snapshot.
-* `db_snapshot_arn` - The Amazon Resource Name (ARN) for the DB snapshot.
-* `encrypted` - Specifies whether the DB snapshot is encrypted.
-* `engine` - Specifies the name of the database engine.
-* `engine_version` - Specifies the version of the database engine.
-* `iops` - Specifies the Provisioned IOPS (I/O operations per second) value of the DB instance at the time of the snapshot.
-* `kms_key_id` - The ARN for the KMS encryption key.
-* `license_model` - License model information for the restored DB instance.
-* `option_group_name` - Provides the option group name for the DB snapshot.
-* `source_db_snapshot_identifier` - The DB snapshot Arn that the DB snapshot was copied from. It only has value in case of cross customer or cross region copy.
-* `source_region` - The region that the DB snapshot was created in or copied from.
-* `status` - Specifies the status of this DB snapshot.
-* `storage_type` - Specifies the storage type associated with DB snapshot.
-* `vpc_id` - Specifies the storage type associated with DB snapshot.
\ No newline at end of file
diff --git a/website/source/docs/providers/aws/r/db_subnet_group.html.markdown b/website/source/docs/providers/aws/r/db_subnet_group.html.markdown
deleted file mode 100644
index eaf27e030..000000000
--- a/website/source/docs/providers/aws/r/db_subnet_group.html.markdown
+++ /dev/null
@@ -1,50 +0,0 @@
----
-layout: "aws"
-page_title: "AWS: aws_db_subnet_group"
-sidebar_current: "docs-aws-resource-db-subnet-group"
-description: |-
- Provides an RDS DB subnet group resource.
----
-
-# aws\_db\_subnet\_group
-
-Provides an RDS DB subnet group resource.
-
-## Example Usage
-
-```hcl
-resource "aws_db_subnet_group" "default" {
- name = "main"
- subnet_ids = ["${aws_subnet.frontend.id}", "${aws_subnet.backend.id}"]
-
- tags {
- Name = "My DB subnet group"
- }
-}
-```
-
-## Argument Reference
-
-The following arguments are supported:
-
-* `name` - (Optional, Forces new resource) The name of the DB subnet group. If omitted, Terraform will assign a random, unique name.
-* `name_prefix` - (Optional, Forces new resource) Creates a unique name beginning with the specified prefix. Conflicts with `name`.
-* `description` - (Optional) The description of the DB subnet group. Defaults to "Managed by Terraform".
-* `subnet_ids` - (Required) A list of VPC subnet IDs.
-* `tags` - (Optional) A mapping of tags to assign to the resource.
-
-## Attributes Reference
-
-The following attributes are exported:
-
-* `id` - The db subnet group name.
-* `arn` - The ARN of the db subnet group.
-
-
-## Import
-
-DB Subnet groups can be imported using the `name`, e.g.
-
-```
-$ terraform import aws_db_subnet_group.default production-subnet-group
-```
diff --git a/website/source/docs/providers/aws/r/default_network_acl.html.markdown b/website/source/docs/providers/aws/r/default_network_acl.html.markdown
deleted file mode 100644
index cbabbd22f..000000000
--- a/website/source/docs/providers/aws/r/default_network_acl.html.markdown
+++ /dev/null
@@ -1,181 +0,0 @@
----
-layout: "aws"
-page_title: "AWS: aws_default_network_acl"
-sidebar_current: "docs-aws-resource-default-network-acl"
-description: |-
- Manage the default Network ACL resource.
----
-
-# aws\_default\_network\_acl
-
-Provides a resource to manage the default AWS Network ACL. VPC Only.
-
-Each VPC created in AWS comes with a Default Network ACL that can be managed, but not
-destroyed. **This is an advanced resource**, and has special caveats to be aware
-of when using it. Please read this document in its entirety before using this
-resource.
-
-The `aws_default_network_acl` behaves differently from normal resources, in that
-Terraform does not _create_ this resource, but instead attempts to "adopt" it
-into management. We can do this because each VPC created has a Default Network
-ACL that cannot be destroyed, and is created with a known set of default rules.
-
-When Terraform first adopts the Default Network ACL, it **immediately removes all
-rules in the ACL**. It then proceeds to create any rules specified in the
-configuration. This step is required so that only the rules specified in the
-configuration are created.
-
-This resource treats its inline rules as absolute; only the rules defined
-inline are created, and any additions/removals external to this resource will
-result in diffs being shown. For these reasons, this resource is incompatible with the
-`aws_network_acl_rule` resource.
-
-For more information about Network ACLs, see the AWS Documentation on
-[Network ACLs][aws-network-acls].
-
-## Basic Example Usage, with default rules
-
-The following config gives the Default Network ACL the same rules that AWS
-includes, but pulls the resource under management by Terraform. This means that
-any ACL rules added or changed will be detected as drift.
-
-```hcl
-resource "aws_vpc" "mainvpc" {
- cidr_block = "10.1.0.0/16"
-}
-
-resource "aws_default_network_acl" "default" {
- default_network_acl_id = "${aws_vpc.mainvpc.default_network_acl_id}"
-
- ingress {
- protocol = -1
- rule_no = 100
- action = "allow"
- cidr_block = "0.0.0.0/0"
- from_port = 0
- to_port = 0
- }
-
- egress {
- protocol = -1
- rule_no = 100
- action = "allow"
- cidr_block = "0.0.0.0/0"
- from_port = 0
- to_port = 0
- }
-}
-```
-
-## Example config to deny all Egress traffic, allowing Ingress
-
-The following denies all Egress traffic by omitting any `egress` rules, while
-including the default `ingress` rule to allow all traffic.
-
-```hcl
-resource "aws_vpc" "mainvpc" {
- cidr_block = "10.1.0.0/16"
-}
-
-resource "aws_default_network_acl" "default" {
- default_network_acl_id = "${aws_vpc.mainvpc.default_network_acl_id}"
-
- ingress {
- protocol = -1
- rule_no = 100
- action = "allow"
- cidr_block = "0.0.0.0/0"
- from_port = 0
- to_port = 0
- }
-}
-```
-
-## Example config to deny all traffic to any Subnet in the Default Network ACL:
-
-This config denies all traffic in the Default ACL. This can be useful if you
-want a locked down default to force all resources in the VPC to assign a
-non-default ACL.
-
-```hcl
-resource "aws_vpc" "mainvpc" {
- cidr_block = "10.1.0.0/16"
-}
-
-resource "aws_default_network_acl" "default" {
- default_network_acl_id = "${aws_vpc.mainvpc.default_network_acl_id}"
-
- # no rules defined, deny all traffic in this ACL
-}
-```
-
-## Argument Reference
-
-The following arguments are supported:
-
-* `default_network_acl_id` - (Required) The Network ACL ID to manage. This
-attribute is exported from `aws_vpc`, or manually found via the AWS Console.
-* `subnet_ids` - (Optional) A list of Subnet IDs to apply the ACL to. See the
-notes below on managing Subnets in the Default Network ACL
-* `ingress` - (Optional) Specifies an ingress rule. Parameters defined below.
-* `egress` - (Optional) Specifies an egress rule. Parameters defined below.
-* `tags` - (Optional) A mapping of tags to assign to the resource.
-
-Both `egress` and `ingress` support the following keys:
-
-* `from_port` - (Required) The from port to match.
-* `to_port` - (Required) The to port to match.
-* `rule_no` - (Required) The rule number. Used for ordering.
-* `action` - (Required) The action to take.
-* `protocol` - (Required) The protocol to match. If using the -1 'all'
-protocol, you must specify a from and to port of 0.
-* `cidr_block` - (Optional) The CIDR block to match. This must be a
-valid network mask.
-* `icmp_type` - (Optional) The ICMP type to be used. Default 0.
-* `icmp_code` - (Optional) The ICMP type code to be used. Default 0.
-
-~> Note: For more information on ICMP types and codes, see here: http://www.nthelp.com/icmp.html
-
-### Managing Subnets in the Default Network ACL
-
-Within a VPC, all Subnets must be associated with a Network ACL. In order to
-"delete" the association between a Subnet and a non-default Network ACL, the
-association is destroyed by replacing it with an association between the Subnet
-and the Default ACL instead.
-
-When managing the Default Network ACL, you cannot "remove" Subnets.
-Instead, they must be reassigned to another Network ACL, or the Subnet itself must be
-destroyed. Because of these requirements, removing the `subnet_ids` attribute from the
-configuration of a `aws_default_network_acl` resource may result in a reoccurring
-plan, until the Subnets are reassigned to another Network ACL or are destroyed.
-
-Because Subnets are by default associated with the Default Network ACL, any
-non-explicit association will show up as a plan to remove the Subnet. For
-example: if you have a custom `aws_network_acl` with two subnets attached, and
-you remove the `aws_network_acl` resource, after successfully destroying this
-resource future plans will show a diff on the managed `aws_default_network_acl`,
-as those two Subnets have been orphaned by the now destroyed network acl and thus
-adopted by the Default Network ACL. In order to avoid a reoccurring plan, they
-will need to be reassigned, destroyed, or added to the `subnet_ids` attribute of
-the `aws_default_network_acl` entry.
-
-### Removing `aws_default_network_acl` from your configuration
-
-Each AWS VPC comes with a Default Network ACL that cannot be deleted. The `aws_default_network_acl`
-allows you to manage this Network ACL, but Terraform cannot destroy it. Removing
-this resource from your configuration will remove it from your statefile and
-management, **but will not destroy the Network ACL.** All Subnets associations
-and ingress or egress rules will be left as they are at the time of removal. You
-can resume managing them via the AWS Console.
-
-## Attributes Reference
-
-The following attributes are exported:
-
-* `id` - The ID of the Default Network ACL
-* `vpc_id` - The ID of the associated VPC
-* `ingress` - Set of ingress rules
-* `egress` - Set of egress rules
-* `subnet_ids` – IDs of associated Subnets
-
-[aws-network-acls]: http://docs.aws.amazon.com/AmazonVPC/latest/UserGuide/VPC_ACLs.html
diff --git a/website/source/docs/providers/aws/r/default_route_table.html.markdown b/website/source/docs/providers/aws/r/default_route_table.html.markdown
deleted file mode 100644
index fc7ea34ca..000000000
--- a/website/source/docs/providers/aws/r/default_route_table.html.markdown
+++ /dev/null
@@ -1,91 +0,0 @@
----
-layout: "aws"
-page_title: "AWS: aws_default_route_table"
-sidebar_current: "docs-aws-resource-default-route-table"
-description: |-
- Provides a resource to manage a Default VPC Routing Table.
----
-
-# aws\_default\_route\_table
-
-Provides a resource to manage a Default VPC Routing Table.
-
-Each VPC created in AWS comes with a Default Route Table that can be managed, but not
-destroyed. **This is an advanced resource**, and has special caveats to be aware
-of when using it. Please read this document in its entirety before using this
-resource. It is recommended you **do not** use both `aws_default_route_table` to
-manage the default route table **and** use the `aws_main_route_table_association`,
-due to possible conflict in routes.
-
-The `aws_default_route_table` behaves differently from normal resources, in that
-Terraform does not _create_ this resource, but instead attempts to "adopt" it
-into management. We can do this because each VPC created has a Default Route
-Table that cannot be destroyed, and is created with a single route.
-
-When Terraform first adopts the Default Route Table, it **immediately removes all
-defined routes**. It then proceeds to create any routes specified in the
-configuration. This step is required so that only the routes specified in the
-configuration present in the Default Route Table.
-
-For more information about Route Tables, see the AWS Documentation on
-[Route Tables][aws-route-tables].
-
-For more information about managing normal Route Tables in Terraform, see our
-documentation on [aws_route_table][tf-route-tables].
-
-~> **NOTE on Route Tables and Routes:** Terraform currently
-provides both a standalone [Route resource](route.html) and a Route Table resource with routes
-defined in-line. At this time you cannot use a Route Table with in-line routes
-in conjunction with any Route resources. Doing so will cause
-a conflict of rule settings and will overwrite routes.
-
-
-## Example usage with tags:
-
-```hcl
-resource "aws_default_route_table" "r" {
- default_route_table_id = "${aws_vpc.foo.default_route_table_id}"
-
- route {
- # ...
- }
-
- tags {
- Name = "default table"
- }
-}
-```
-
-## Argument Reference
-
-The following arguments are supported:
-
-* `default_route_table_id` - (Required) The ID of the Default Routing Table.
-* `route` - (Optional) A list of route objects. Their keys are documented below.
-* `tags` - (Optional) A mapping of tags to assign to the resource.
-* `propagating_vgws` - (Optional) A list of virtual gateways for propagation.
-
-Each route supports the following:
-
-* `cidr_block` - (Required) The CIDR block of the route.
-* `ipv6_cidr_block` - Optional) The Ipv6 CIDR block of the route
-* `egress_only_gateway_id` - (Optional) The Egress Only Internet Gateway ID.
-* `gateway_id` - (Optional) The Internet Gateway ID.
-* `nat_gateway_id` - (Optional) The NAT Gateway ID.
-* `instance_id` - (Optional) The EC2 instance ID.
-* `vpc_peering_connection_id` - (Optional) The VPC Peering ID.
-* `network_interface_id` - (Optional) The ID of the elastic network interface (eni) to use.
-
-Each route must contain either a `gateway_id`, an `instance_id`, a `nat_gateway_id`, a
-`vpc_peering_connection_id` or a `network_interface_id`. Note that the default route, mapping
-the VPC's CIDR block to "local", is created implicitly and cannot be specified.
-
-## Attributes Reference
-
-The following attributes are exported:
-
-* `id` - The ID of the routing table
-
-
-[aws-route-tables]: http://docs.aws.amazon.com/AmazonVPC/latest/UserGuide/VPC_Route_Tables.html#Route_Replacing_Main_Table
-[tf-route-tables]: /docs/providers/aws/r/route_table.html
diff --git a/website/source/docs/providers/aws/r/default_security_group.html.markdown b/website/source/docs/providers/aws/r/default_security_group.html.markdown
deleted file mode 100644
index d5300f92b..000000000
--- a/website/source/docs/providers/aws/r/default_security_group.html.markdown
+++ /dev/null
@@ -1,132 +0,0 @@
----
-layout: "aws"
-page_title: "AWS: aws_default_security_group"
-sidebar_current: "docs-aws-resource-default-security-group"
-description: |-
- Manage the default Security Group resource.
----
-
-# aws\_default\_security\_group
-
-Provides a resource to manage the default AWS Security Group.
-
-For EC2 Classic accounts, each region comes with a Default Security Group.
-Additionally, each VPC created in AWS comes with a Default Security Group that can be managed, but not
-destroyed. **This is an advanced resource**, and has special caveats to be aware
-of when using it. Please read this document in its entirety before using this
-resource.
-
-The `aws_default_security_group` behaves differently from normal resources, in that
-Terraform does not _create_ this resource, but instead "adopts" it
-into management. We can do this because these default security groups cannot be
-destroyed, and are created with a known set of default ingress/egress rules.
-
-When Terraform first adopts the Default Security Group, it **immediately removes all
-ingress and egress rules in the Security Group**. It then proceeds to create any rules specified in the
-configuration. This step is required so that only the rules specified in the
-configuration are created.
-
-This resource treats it's inline rules as absolute; only the rules defined
-inline are created, and any additions/removals external to this resource will
-result in diff shown. For these reasons, this resource is incompatible with the
-`aws_security_group_rule` resource.
-
-For more information about Default Security Groups, see the AWS Documentation on
-[Default Security Groups][aws-default-security-groups].
-
-## Basic Example Usage, with default rules
-
-The following config gives the Default Security Group the same rules that AWS
-provides by default, but pulls the resource under management by Terraform. This means that
-any ingress or egress rules added or changed will be detected as drift.
-
-```hcl
-resource "aws_vpc" "mainvpc" {
- cidr_block = "10.1.0.0/16"
-}
-
-resource "aws_default_security_group" "default" {
- vpc_id = "${aws_vpc.mainvpc.id}"
-
- ingress {
- protocol = -1
- self = true
- from_port = 0
- to_port = 0
- }
-
- egress {
- from_port = 0
- to_port = 0
- protocol = "-1"
- cidr_blocks = ["0.0.0.0/0"]
- }
-}
-```
-
-## Example config to deny all Egress traffic, allowing Ingress
-
-The following denies all Egress traffic by omitting any `egress` rules, while
-including the default `ingress` rule to allow all traffic.
-
-```hcl
-resource "aws_vpc" "mainvpc" {
- cidr_block = "10.1.0.0/16"
-}
-
-resource "aws_default_security_group" "default" {
- vpc_id = "${aws_vpc.mainvpc.vpc}"
-
- ingress {
- protocol = -1
- self = true
- from_port = 0
- to_port = 0
- }
-}
-```
-
-## Argument Reference
-
-The arguments of an `aws_default_security_group` differ slightly from `aws_security_group`
-resources. Namely, the `name` argument is computed, and the `name_prefix` attribute
-removed. The following arguments are still supported:
-
-* `ingress` - (Optional) Can be specified multiple times for each
- ingress rule. Each ingress block supports fields documented below.
-* `egress` - (Optional, VPC only) Can be specified multiple times for each
- egress rule. Each egress block supports fields documented below.
-* `vpc_id` - (Optional, Forces new resource) The VPC ID. **Note that changing
-the `vpc_id` will _not_ restore any default security group rules that were
-modified, added, or removed.** It will be left in it's current state
-* `tags` - (Optional) A mapping of tags to assign to the resource.
-
-
-## Usage
-
-With the exceptions mentioned above, `aws_default_security_group` should
-identical behavior to `aws_security_group`. Please consult [AWS_SECURITY_GROUP](/docs/providers/aws/r/security_group.html)
-for further usage documentation.
-
-### Removing `aws_default_security_group` from your configuration
-
-Each AWS VPC (or region, if using EC2 Classic) comes with a Default Security
-Group that cannot be deleted. The `aws_default_security_group` allows you to
-manage this Security Group, but Terraform cannot destroy it. Removing this resource
-from your configuration will remove it from your statefile and management, but
-will not destroy the Security Group. All ingress or egress rules will be left as
-they are at the time of removal. You can resume managing them via the AWS Console.
-
-## Attributes Reference
-
-The following attributes are exported:
-
-* `id` - The ID of the security group
-* `vpc_id` - The VPC ID.
-* `owner_id` - The owner ID.
-* `name` - The name of the security group
-* `description` - The description of the security group
-* `ingress` - The ingress rules. See above for more.
-* `egress` - The egress rules. See above for more.
-
-[aws-default-security-groups]: http://docs.aws.amazon.com/AWSEC2/latest/UserGuide/using-network-security.html#default-security-group
diff --git a/website/source/docs/providers/aws/r/default_subnet.html.markdown b/website/source/docs/providers/aws/r/default_subnet.html.markdown
deleted file mode 100644
index 78b117663..000000000
--- a/website/source/docs/providers/aws/r/default_subnet.html.markdown
+++ /dev/null
@@ -1,57 +0,0 @@
----
-layout: "aws"
-page_title: "AWS: aws_default_subnet"
-sidebar_current: "docs-aws-resource-default-subnet"
-description: |-
- Manage a default VPC subnet resource.
----
-
-# aws\_default\_subnet
-
-Provides a resource to manage a [default AWS VPC subnet](http://docs.aws.amazon.com/AmazonVPC/latest/UserGuide/default-vpc.html#default-vpc-basics)
-in the current region.
-
-The `aws_default_subnet` behaves differently from normal resources, in that
-Terraform does not _create_ this resource, but instead "adopts" it
-into management.
-
-## Example Usage
-
-Basic usage with tags:
-
-```
-resource "aws_default_subnet" "default_az1" {
- availability_zone = "us-west-2a"
-
- tags {
- Name = "Default subnet for us-west-2a"
- }
-}
-```
-
-## Argument Reference
-
-The arguments of an `aws_default_subnet` differ from `aws_subnet` resources.
-Namely, the `availability_zone` argument is required and the `vpc_id`, `cidr_block`, `ipv6_cidr_block`,
-`map_public_ip_on_launch` and `assign_ipv6_address_on_creation` arguments are computed.
-The following arguments are still supported:
-
-* `tags` - (Optional) A mapping of tags to assign to the resource.
-
-### Removing `aws_default_subnet` from your configuration
-
-The `aws_default_subnet` resource allows you to manage a region's default VPC subnet,
-but Terraform cannot destroy it. Removing this resource from your configuration
-will remove it from your statefile and management, but will not destroy the subnet.
-You can resume managing the subnet via the AWS Console.
-
-## Attributes Reference
-
-The following attributes are exported:
-
-* `id` - The ID of the subnet
-* `availability_zone`- The AZ for the subnet.
-* `cidr_block` - The CIDR block for the subnet.
-* `vpc_id` - The VPC ID.
-* `ipv6_association_id` - The association ID for the IPv6 CIDR block.
-* `ipv6_cidr_block` - The IPv6 CIDR block.
diff --git a/website/source/docs/providers/aws/r/default_vpc.html.markdown b/website/source/docs/providers/aws/r/default_vpc.html.markdown
deleted file mode 100644
index acf9be792..000000000
--- a/website/source/docs/providers/aws/r/default_vpc.html.markdown
+++ /dev/null
@@ -1,76 +0,0 @@
----
-layout: "aws"
-page_title: "AWS: aws_default_vpc"
-sidebar_current: "docs-aws-resource-default-vpc"
-description: |-
- Manage the default VPC resource.
----
-
-# aws\_default\_vpc
-
-Provides a resource to manage the [default AWS VPC](http://docs.aws.amazon.com/AmazonVPC/latest/UserGuide/default-vpc.html)
-in the current region.
-
-For AWS accounts created after 2013-12-04, each region comes with a Default VPC.
-**This is an advanced resource**, and has special caveats to be aware of when
-using it. Please read this document in its entirety before using this resource.
-
-The `aws_default_vpc` behaves differently from normal resources, in that
-Terraform does not _create_ this resource, but instead "adopts" it
-into management.
-
-## Example Usage
-
-Basic usage with tags:
-
-```
-resource "aws_default_vpc" "default" {
- tags {
- Name = "Default VPC"
- }
-}
-```
-
-## Argument Reference
-
-The arguments of an `aws_default_vpc` differ slightly from `aws_vpc`
-resources. Namely, the `cidr_block`, `instance_tenancy` and `assign_generated_ipv6_cidr_block`
-arguments are computed. The following arguments are still supported:
-
-* `enable_dns_support` - (Optional) A boolean flag to enable/disable DNS support in the VPC. Defaults true.
-* `enable_dns_hostnames` - (Optional) A boolean flag to enable/disable DNS hostnames in the VPC. Defaults false.
-* `enable_classiclink` - (Optional) A boolean flag to enable/disable ClassicLink
- for the VPC. Only valid in regions and accounts that support EC2 Classic.
- See the [ClassicLink documentation][1] for more information. Defaults false.
-* `tags` - (Optional) A mapping of tags to assign to the resource.
-
-### Removing `aws_default_vpc` from your configuration
-
-The `aws_default_vpc` resource allows you to manage a region's default VPC,
-but Terraform cannot destroy it. Removing this resource from your configuration
-will remove it from your statefile and management, but will not destroy the VPC.
-You can resume managing the VPC via the AWS Console.
-
-## Attributes Reference
-
-The following attributes are exported:
-
-* `id` - The ID of the VPC
-* `cidr_block` - The CIDR block of the VPC
-* `instance_tenancy` - Tenancy of instances spin up within VPC.
-* `enable_dns_support` - Whether or not the VPC has DNS support
-* `enable_dns_hostnames` - Whether or not the VPC has DNS hostname support
-* `enable_classiclink` - Whether or not the VPC has Classiclink enabled
-* `assign_generated_ipv6_cidr_block` - Whether or not an Amazon-provided IPv6 CIDR
-block with a /56 prefix length for the VPC was assigned
-* `main_route_table_id` - The ID of the main route table associated with
- this VPC. Note that you can change a VPC's main route table by using an
- [`aws_main_route_table_association`](/docs/providers/aws/r/main_route_table_assoc.html)
-* `default_network_acl_id` - The ID of the network ACL created by default on VPC creation
-* `default_security_group_id` - The ID of the security group created by default on VPC creation
-* `default_route_table_id` - The ID of the route table created by default on VPC creation
-* `ipv6_association_id` - The association ID for the IPv6 CIDR block of the VPC
-* `ipv6_cidr_block` - The IPv6 CIDR block of the VPC
-
-
-[1]: https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/vpc-classiclink.html
diff --git a/website/source/docs/providers/aws/r/default_vpc_dhcp_options.html.markdown b/website/source/docs/providers/aws/r/default_vpc_dhcp_options.html.markdown
deleted file mode 100644
index bc5a28383..000000000
--- a/website/source/docs/providers/aws/r/default_vpc_dhcp_options.html.markdown
+++ /dev/null
@@ -1,55 +0,0 @@
----
-layout: "aws"
-page_title: "AWS: aws_default_vpc_dhcp_options"
-sidebar_current: "docs-aws-resource-default-vpc-dhcp-options"
-description: |-
- Manage the default VPC DHCP Options resource.
----
-
-# aws\_default\_vpc\_dhcp\_options
-
-Provides a resource to manage the [default AWS DHCP Options Set](http://docs.aws.amazon.com/AmazonVPC/latest/UserGuide/VPC_DHCP_Options.html#AmazonDNS)
-in the current region.
-
-Each AWS region comes with a default set of DHCP options.
-**This is an advanced resource**, and has special caveats to be aware of when
-using it. Please read this document in its entirety before using this resource.
-
-The `aws_default_vpc_dhcp_options` behaves differently from normal resources, in that
-Terraform does not _create_ this resource, but instead "adopts" it
-into management.
-
-## Example Usage
-
-Basic usage with tags:
-
-```
-resource "aws_default_vpc_dhcp_options" "default" {
- tags {
- Name = "Default DHCP Option Set"
- }
-}
-```
-
-## Argument Reference
-
-The arguments of an `aws_default_vpc_dhcp_options` differ slightly from `aws_vpc_dhcp_options` resources.
-Namely, the `domain_name`, `domain_name_servers` and `ntp_servers` arguments are computed.
-The following arguments are still supported:
-
-* `netbios_name_servers` - (Optional) List of NETBIOS name servers.
-* `netbios_node_type` - (Optional) The NetBIOS node type (1, 2, 4, or 8). AWS recommends to specify 2 since broadcast and multicast are not supported in their network. For more information about these node types, see [RFC 2132](http://www.ietf.org/rfc/rfc2132.txt).
-* `tags` - (Optional) A mapping of tags to assign to the resource.
-
-### Removing `aws_default_vpc_dhcp_options` from your configuration
-
-The `aws_default_vpc_dhcp_options` resource allows you to manage a region's default DHCP Options Set,
-but Terraform cannot destroy it. Removing this resource from your configuration
-will remove it from your statefile and management, but will not destroy the DHCP Options Set.
-You can resume managing the DHCP Options Set via the AWS Console.
-
-## Attributes Reference
-
-The following attributes are exported:
-
-* `id` - The ID of the DHCP Options Set.
diff --git a/website/source/docs/providers/aws/r/devicefarm_project.html.markdown b/website/source/docs/providers/aws/r/devicefarm_project.html.markdown
deleted file mode 100644
index 2f0424933..000000000
--- a/website/source/docs/providers/aws/r/devicefarm_project.html.markdown
+++ /dev/null
@@ -1,37 +0,0 @@
----
-layout: "aws"
-page_title: "AWS: aws_devicefarm_project"
-sidebar_current: "docs-aws-resource-devicefarm-project"
-description: |-
- Provides a Devicefarm project
----
-
-# aws_devicefarm_project
-
-Provides a resource to manage AWS Device Farm Projects.
-Please keep in mind that this feature is only supported on the "us-west-2" region.
-This resource will error if you try to create a project in another region.
-
-For more information about Device Farm Projects, see the AWS Documentation on
-[Device Farm Projects][aws-get-project].
-
-## Basic Example Usage
-
-
-```hcl
-resource "aws_devicefarm_project" "awesome_devices" {
- name = "my-device-farm"
-}
-```
-
-## Argument Reference
-
-* `name` - (Required) The name of the project
-
-## Attributes Reference
-
-The following attributes are exported:
-
-* `arn` - The Amazon Resource Name of this project
-
-[aws-get-project]: http://docs.aws.amazon.com/devicefarm/latest/APIReference/API_GetProject.html
diff --git a/website/source/docs/providers/aws/r/directory_service_directory.html.markdown b/website/source/docs/providers/aws/r/directory_service_directory.html.markdown
deleted file mode 100644
index 16bc9145c..000000000
--- a/website/source/docs/providers/aws/r/directory_service_directory.html.markdown
+++ /dev/null
@@ -1,80 +0,0 @@
----
-layout: "aws"
-page_title: "AWS: aws_directory_service_directory"
-sidebar_current: "docs-aws-resource-directory-service-directory"
-description: |-
- Provides a directory in AWS Directory Service.
----
-
-# aws\_directory\_service\_directory
-
-Provides a Simple or Managed Microsoft directory in AWS Directory Service.
-
-~> **Note:** All arguments including the password and customer username will be stored in the raw state as plain-text.
-[Read more about sensitive data in state](/docs/state/sensitive-data.html).
-
-## Example Usage
-
-```hcl
-resource "aws_directory_service_directory" "bar" {
- name = "corp.notexample.com"
- password = "SuperSecretPassw0rd"
- size = "Small"
-
- vpc_settings {
- vpc_id = "${aws_vpc.main.id}"
- subnet_ids = ["${aws_subnet.foo.id}", "${aws_subnet.bar.id}"]
- }
-}
-
-resource "aws_vpc" "main" {
- cidr_block = "10.0.0.0/16"
-}
-
-resource "aws_subnet" "foo" {
- vpc_id = "${aws_vpc.main.id}"
- availability_zone = "us-west-2a"
- cidr_block = "10.0.1.0/24"
-}
-
-resource "aws_subnet" "bar" {
- vpc_id = "${aws_vpc.main.id}"
- availability_zone = "us-west-2b"
- cidr_block = "10.0.2.0/24"
-}
-```
-
-## Argument Reference
-
-The following arguments are supported:
-
-* `name` - (Required) The fully qualified name for the directory, such as `corp.example.com`
-* `password` - (Required) The password for the directory administrator or connector user.
-* `size` - (Required for `SimpleAD` and `ADConnector`) The size of the directory (`Small` or `Large` are accepted values).
-* `vpc_settings` - (Required for `SimpleAD` and `MicrosoftAD`) VPC related information about the directory. Fields documented below.
-* `connect_settings` - (Required for `ADConnector`) Connector related information about the directory. Fields documented below.
-* `alias` - (Optional) The alias for the directory (must be unique amongst all aliases in AWS). Required for `enable_sso`.
-* `description` - (Optional) A textual description for the directory.
-* `short_name` - (Optional) The short name of the directory, such as `CORP`.
-* `enable_sso` - (Optional) Whether to enable single-sign on for the directory. Requires `alias`. Defaults to `false`.
-* `type` (Optional) - The directory type (`SimpleAD` or `MicrosoftAD` are accepted values). Defaults to `SimpleAD`.
-
-**vpc\_settings** supports the following:
-
-* `subnet_ids` - (Required) The identifiers of the subnets for the directory servers (2 subnets in 2 different AZs).
-* `vpc_id` - (Required) The identifier of the VPC that the directory is in.
-
-**connect\_settings** supports the following:
-
-* `customer_username` - (Required) The username corresponding to the password provided.
-* `customer_dns_ips` - (Required) The DNS IP addresses of the domain to connect to.
-* `subnet_ids` - (Required) The identifiers of the subnets for the directory servers (2 subnets in 2 different AZs).
-* `vpc_id` - (Required) The identifier of the VPC that the directory is in.
-
-## Attributes Reference
-
-The following attributes are exported:
-
-* `id` - The directory identifier.
-* `access_url` - The access URL for the directory, such as `http://alias.awsapps.com`.
-* `dns_ip_addresses` - A list of IP addresses of the DNS servers for the directory or connector.
diff --git a/website/source/docs/providers/aws/r/dms_certificate.html.markdown b/website/source/docs/providers/aws/r/dms_certificate.html.markdown
deleted file mode 100644
index 6e7a06d8f..000000000
--- a/website/source/docs/providers/aws/r/dms_certificate.html.markdown
+++ /dev/null
@@ -1,49 +0,0 @@
----
-layout: "aws"
-page_title: "AWS: aws_dms_certificate"
-sidebar_current: "docs-aws-resource-dms-certificate"
-description: |-
- Provides a DMS (Data Migration Service) certificate resource.
----
-
-# aws\_dms\_certificate
-
-Provides a DMS (Data Migration Service) certificate resource. DMS certificates can be created, deleted, and imported.
-
-~> **Note:** All arguments including the PEM encoded certificate will be stored in the raw state as plain-text.
-[Read more about sensitive data in state](/docs/state/sensitive-data.html).
-
-## Example Usage
-
-```hcl
-# Create a new certificate
-resource "aws_dms_certificate" "test" {
- certificate_id = "test-dms-certificate-tf"
- certificate_pem = "..."
-}
-```
-
-## Argument Reference
-
-The following arguments are supported:
-
-* `certificate_id` - (Required) The certificate identifier.
-
- - Must contain from 1 to 255 alphanumeric characters and hyphens.
-
-* `certificate_pem` - (Optional) The contents of the .pem X.509 certificate file for the certificate. Either `certificate_pem` or `certificate_wallet` must be set.
-* `certificate_wallet` - (Optional) The contents of the Oracle Wallet certificate for use with SSL. Either `certificate_pem` or `certificate_wallet` must be set.
-
-## Attributes Reference
-
-The following attributes are exported:
-
-* `certificate_arn` - The Amazon Resource Name (ARN) for the certificate.
-
-## Import
-
-Certificates can be imported using the `certificate_arn`, e.g.
-
-```
-$ terraform import aws_dms_certificate.test arn:aws:dms:us-west-2:123456789:cert:xxxxxxxxxx
-```
diff --git a/website/source/docs/providers/aws/r/dms_endpoint.html.markdown b/website/source/docs/providers/aws/r/dms_endpoint.html.markdown
deleted file mode 100644
index e88b55a29..000000000
--- a/website/source/docs/providers/aws/r/dms_endpoint.html.markdown
+++ /dev/null
@@ -1,78 +0,0 @@
----
-layout: "aws"
-page_title: "AWS: aws_dms_endpoint"
-sidebar_current: "docs-aws-resource-dms-endpoint"
-description: |-
- Provides a DMS (Data Migration Service) endpoint resource.
----
-
-# aws\_dms\_endpoint
-
-Provides a DMS (Data Migration Service) endpoint resource. DMS endpoints can be created, updated, deleted, and imported.
-
-~> **Note:** All arguments including the password will be stored in the raw state as plain-text.
-[Read more about sensitive data in state](/docs/state/sensitive-data.html).
-
-## Example Usage
-
-```hcl
-# Create a new endpoint
-resource "aws_dms_endpoint" "test" {
- certificate_arn = "arn:aws:acm:us-east-1:123456789012:certificate/12345678-1234-1234-1234-123456789012"
- database_name = "test"
- endpoint_id = "test-dms-endpoint-tf"
- endpoint_type = "source"
- engine_name = "aurora"
- extra_connection_attributes = ""
- kms_key_arn = "arn:aws:kms:us-east-1:123456789012:key/12345678-1234-1234-1234-123456789012"
- password = "test"
- port = 3306
- server_name = "test"
- ssl_mode = "none"
-
- tags {
- Name = "test"
- }
-
- username = "test"
-}
-```
-
-## Argument Reference
-
-The following arguments are supported:
-
-* `certificate_arn` - (Optional, Default: empty string) The Amazon Resource Name (ARN) for the certificate.
-* `database_name` - (Optional) The name of the endpoint database.
-* `endpoint_id` - (Required) The database endpoint identifier.
-
- - Must contain from 1 to 255 alphanumeric characters or hyphens.
- - Must begin with a letter
- - Must contain only ASCII letters, digits, and hyphens
- - Must not end with a hyphen
- - Must not contain two consecutive hyphens
-
-* `endpoint_type` - (Required) The type of endpoint. Can be one of `source | target`.
-* `engine_name` - (Required) The type of engine for the endpoint. Can be one of `mysql | oracle | postgres | mariadb | aurora | redshift | sybase | sqlserver`.
-* `extra_connection_attributes` - (Optional) Additional attributes associated with the connection. For available attributes see [Using Extra Connection Attributes with AWS Database Migration Service](http://docs.aws.amazon.com/dms/latest/userguide/CHAP_Introduction.ConnectionAttributes.html).
-* `kms_key_arn` - (Optional) The Amazon Resource Name (ARN) for the KMS key that will be used to encrypt the connection parameters. If you do not specify a value for `kms_key_arn`, then AWS DMS will use your default encryption key. AWS KMS creates the default encryption key for your AWS account. Your AWS account has a different default encryption key for each AWS region.
-* `password` - (Required) The password to be used to login to the endpoint database.
-* `port` - (Required) The port used by the endpoint database.
-* `server_name` - (Required) The host name of the server.
-* `ssl_mode` - (Optional, Default: none) The SSL mode to use for the connection. Can be one of `none | require | verify-ca | verify-full`
-* `tags` - (Optional) A mapping of tags to assign to the resource.
-* `username` - (Required) The user name to be used to login to the endpoint database.
-
-## Attributes Reference
-
-The following attributes are exported:
-
-* `endpoint_arn` - The Amazon Resource Name (ARN) for the endpoint.
-
-## Import
-
-Endpoints can be imported using the `endpoint_id`, e.g.
-
-```
-$ terraform import aws_dms_endpoint.test test-dms-endpoint-tf
-```
diff --git a/website/source/docs/providers/aws/r/dms_replication_instance.html.markdown b/website/source/docs/providers/aws/r/dms_replication_instance.html.markdown
deleted file mode 100644
index 83e8e2cb5..000000000
--- a/website/source/docs/providers/aws/r/dms_replication_instance.html.markdown
+++ /dev/null
@@ -1,96 +0,0 @@
----
-layout: "aws"
-page_title: "AWS: aws_dms_replication_instance"
-sidebar_current: "docs-aws-resource-dms-replication-instance"
-description: |-
- Provides a DMS (Data Migration Service) replication instance resource.
----
-
-# aws\_dms\_replication\_instance
-
-Provides a DMS (Data Migration Service) replication instance resource. DMS replication instances can be created, updated, deleted, and imported.
-
-## Example Usage
-
-```hcl
-# Create a new replication instance
-resource "aws_dms_replication_instance" "test" {
- allocated_storage = 20
- apply_immediately = true
- auto_minor_version_upgrade = true
- availability_zone = "us-west-2c"
- engine_version = "1.9.0"
- kms_key_arn = "arn:aws:kms:us-east-1:123456789012:key/12345678-1234-1234-1234-123456789012"
- multi_az = false
- preferred_maintenance_window = "sun:10:30-sun:14:30"
- publicly_accessible = true
- replication_instance_class = "dms.t2.micro"
- replication_instance_id = "test-dms-replication-instance-tf"
- replication_subnet_group_id = "${aws_dms_replication_subnet_group.test-dms-replication-subnet-group-tf}"
-
- tags {
- Name = "test"
- }
-
- vpc_security_group_ids = [
- "sg-12345678",
- ]
-}
-```
-
-## Argument Reference
-
-The following arguments are supported:
-
-* `allocated_storage` - (Optional, Default: 50, Min: 5, Max: 6144) The amount of storage (in gigabytes) to be initially allocated for the replication instance.
-* `apply_immediately` - (Optional, Default: false) Indicates whether the changes should be applied immediately or during the next maintenance window. Only used when updating an existing resource.
-* `auto_minor_version_upgrade` - (Optional, Default: false) Indicates that minor engine upgrades will be applied automatically to the replication instance during the maintenance window.
-* `availability_zone` - (Optional) The EC2 Availability Zone that the replication instance will be created in.
-* `engine_version` - (Optional) The engine version number of the replication instance.
-* `kms_key_arn` - (Optional) The Amazon Resource Name (ARN) for the KMS key that will be used to encrypt the connection parameters. If you do not specify a value for `kms_key_arn`, then AWS DMS will use your default encryption key. AWS KMS creates the default encryption key for your AWS account. Your AWS account has a different default encryption key for each AWS region.
-* `multi_az` - (Optional) Specifies if the replication instance is a multi-az deployment. You cannot set the `availability_zone` parameter if the `multi_az` parameter is set to `true`.
-* `preferred_maintenance_window` - (Optional) The weekly time range during which system maintenance can occur, in Universal Coordinated Time (UTC).
-
- - Default: A 30-minute window selected at random from an 8-hour block of time per region, occurring on a random day of the week.
- - Format: `ddd:hh24:mi-ddd:hh24:mi`
- - Valid Days: `mon, tue, wed, thu, fri, sat, sun`
- - Constraints: Minimum 30-minute window.
-
-* `publicly_accessible` - (Optional, Default: false) Specifies the accessibility options for the replication instance. A value of true represents an instance with a public IP address. A value of false represents an instance with a private IP address.
-* `replication_instance_class` - (Required) The compute and memory capacity of the replication instance as specified by the replication instance class. Can be one of `dms.t2.micro | dms.t2.small | dms.t2.medium | dms.t2.large | dms.c4.large | dms.c4.xlarge | dms.c4.2xlarge | dms.c4.4xlarge`
-* `replication_instance_id` - (Required) The replication instance identifier. This parameter is stored as a lowercase string.
-
- - Must contain from 1 to 63 alphanumeric characters or hyphens.
- - First character must be a letter.
- - Cannot end with a hyphen
- - Cannot contain two consecutive hyphens.
-
-* `replication_subnet_group_id` - (Optional) A subnet group to associate with the replication instance.
-* `tags` - (Optional) A mapping of tags to assign to the resource.
-* `vpc_security_group_ids` - (Optional) A list of VPC security group IDs to be used with the replication instance. The VPC security groups must work with the VPC containing the replication instance.
-
-## Attributes Reference
-
-The following attributes are exported:
-
-* `replication_instance_arn` - The Amazon Resource Name (ARN) of the replication instance.
-* `replication_instance_private_ips` - A list of the private IP addresses of the replication instance.
-* `replication_instance_public_ips` - A list of the public IP addresses of the replication instance.
-
-
-## Timeouts
-
-`aws_dms_replication_instance` provides the following
-[Timeouts](/docs/configuration/resources.html#timeouts) configuration options:
-
-- `create` - (Default `30 minutes`) Used for Creating Instances
-- `update` - (Default `30 minutes`) Used for Database modifications
-- `delete` - (Default `30 minutes`) Used for destroying databases.
-
-## Import
-
-Replication instances can be imported using the `replication_instance_id`, e.g.
-
-```
-$ terraform import aws_dms_replication_instance.test test-dms-replication-instance-tf
-```
diff --git a/website/source/docs/providers/aws/r/dms_replication_subnet_group.html.markdown b/website/source/docs/providers/aws/r/dms_replication_subnet_group.html.markdown
deleted file mode 100644
index d3aa97fde..000000000
--- a/website/source/docs/providers/aws/r/dms_replication_subnet_group.html.markdown
+++ /dev/null
@@ -1,51 +0,0 @@
----
-layout: "aws"
-page_title: "AWS: aws_dms_replication_subnet_group"
-sidebar_current: "docs-aws-resource-dms-replication-subnet-group"
-description: |-
- Provides a DMS (Data Migration Service) subnet group resource.
----
-
-# aws\_dms\_replication\_subnet\_group
-
-Provides a DMS (Data Migration Service) replication subnet group resource. DMS replication subnet groups can be created, updated, deleted, and imported.
-
-## Example Usage
-
-```hcl
-# Create a new replication subnet group
-resource "aws_dms_replication_subnet_group" "test" {
- replication_subnet_group_description = "Test replication subnet group"
- replication_subnet_group_id = "test-dms-replication-subnet-group-tf"
-
- subnet_ids = [
- "subnet-12345678",
- ]
-}
-```
-
-## Argument Reference
-
-The following arguments are supported:
-
-* `replication_subnet_group_description` - (Required) The description for the subnet group.
-* `replication_subnet_group_id` - (Required) The name for the replication subnet group. This value is stored as a lowercase string.
-
- - Must contain no more than 255 alphanumeric characters, periods, spaces, underscores, or hyphens.
- - Must not be "default".
-
-* `subnet_ids` - (Required) A list of the EC2 subnet IDs for the subnet group.
-
-## Attributes Reference
-
-The following attributes are exported:
-
-* `vpc_id` - The ID of the VPC the subnet group is in.
-
-## Import
-
-Replication subnet groups can be imported using the `replication_subnet_group_id`, e.g.
-
-```
-$ terraform import aws_dms_replication_subnet_group.test test-dms-replication-subnet-group-tf
-```
diff --git a/website/source/docs/providers/aws/r/dms_replication_task.html.markdown b/website/source/docs/providers/aws/r/dms_replication_task.html.markdown
deleted file mode 100644
index eb24c0430..000000000
--- a/website/source/docs/providers/aws/r/dms_replication_task.html.markdown
+++ /dev/null
@@ -1,66 +0,0 @@
----
-layout: "aws"
-page_title: "AWS: aws_dms_replication_task"
-sidebar_current: "docs-aws-resource-dms-replication-task"
-description: |-
- Provides a DMS (Data Migration Service) replication task resource.
----
-
-# aws\_dms\_replication\_task
-
-Provides a DMS (Data Migration Service) replication task resource. DMS replication tasks can be created, updated, deleted, and imported.
-
-## Example Usage
-
-```hcl
-# Create a new replication task
-resource "aws_dms_replication_task" "test" {
- cdc_start_time = 1484346880
- migration_type = "full-load"
- replication_instance_arn = "${aws_dms_replication_instance.test-dms-replication-instance-tf.replication_instance_arn}"
- replication_task_id = "test-dms-replication-task-tf"
- replication_task_settings = "..."
- source_endpoint_arn = "${aws_dms_endpoint.test-dms-source-endpoint-tf.endpoint_arn}"
- table_mappings = "{\"rules\":[{\"rule-type\":\"selection\",\"rule-id\":\"1\",\"rule-name\":\"1\",\"object-locator\":{\"schema-name\":\"%\",\"table-name\":\"%\"},\"rule-action\":\"include\"}]}"
-
- tags {
- Name = "test"
- }
-
- target_endpoint_arn = "${aws_dms_endpoint.test-dms-target-endpoint-tf.endpoint_arn}"
-}
-```
-
-## Argument Reference
-
-The following arguments are supported:
-
-* `cdc_start_time` - (Optional) The Unix timestamp integer for the start of the Change Data Capture (CDC) operation.
-* `migration_type` - (Required) The migration type. Can be one of `full-load | cdc | full-load-and-cdc`.
-* `replication_instance_arn` - (Required) The Amazon Resource Name (ARN) of the replication instance.
-* `replication_task_id` - (Required) The replication task identifier.
-
- - Must contain from 1 to 255 alphanumeric characters or hyphens.
- - First character must be a letter.
- - Cannot end with a hyphen.
- - Cannot contain two consecutive hyphens.
-
-* `replication_task_settings` - (Optional) An escaped JSON string that contains the task settings. For a complete list of task settings, see [Task Settings for AWS Database Migration Service Tasks](http://docs.aws.amazon.com/dms/latest/userguide/CHAP_Tasks.CustomizingTasks.TaskSettings.html).
-* `source_endpoint_arn` - (Required) The Amazon Resource Name (ARN) string that uniquely identifies the source endpoint.
-* `table_mappings` - (Required) An escaped JSON string that contains the table mappings. For information on table mapping see [Using Table Mapping with an AWS Database Migration Service Task to Select and Filter Data](http://docs.aws.amazon.com/dms/latest/userguide/CHAP_Tasks.CustomizingTasks.TableMapping.html)
-* `tags` - (Optional) A mapping of tags to assign to the resource.
-* `target_endpoint_arn` - (Required) The Amazon Resource Name (ARN) string that uniquely identifies the target endpoint.
-
-## Attributes Reference
-
-The following attributes are exported:
-
-* `replication_task_arn` - The Amazon Resource Name (ARN) for the replication task.
-
-## Import
-
-Replication tasks can be imported using the `replication_task_id`, e.g.
-
-```
-$ terraform import aws_dms_replication_task.test test-dms-replication-task-tf
-```
diff --git a/website/source/docs/providers/aws/r/dynamodb_table.html.markdown b/website/source/docs/providers/aws/r/dynamodb_table.html.markdown
deleted file mode 100644
index 413a17abb..000000000
--- a/website/source/docs/providers/aws/r/dynamodb_table.html.markdown
+++ /dev/null
@@ -1,144 +0,0 @@
----
-layout: "aws"
-page_title: "AWS: dynamodb_table"
-sidebar_current: "docs-aws-resource-dynamodb-table"
-description: |-
- Provides a DynamoDB table resource
----
-
-# aws\_dynamodb\_table
-
-Provides a DynamoDB table resource
-
-## Example Usage
-
-The following dynamodb table description models the table and GSI shown
-in the [AWS SDK example documentation](https://docs.aws.amazon.com/amazondynamodb/latest/developerguide/GSI.html)
-
-```hcl
-resource "aws_dynamodb_table" "basic-dynamodb-table" {
- name = "GameScores"
- read_capacity = 20
- write_capacity = 20
- hash_key = "UserId"
- range_key = "GameTitle"
-
- attribute {
- name = "UserId"
- type = "S"
- }
-
- attribute {
- name = "GameTitle"
- type = "S"
- }
-
- attribute {
- name = "TopScore"
- type = "N"
- }
-
- ttl {
- attribute_name = "TimeToExist"
- enabled = false
- }
-
- global_secondary_index {
- name = "GameTitleIndex"
- hash_key = "GameTitle"
- range_key = "TopScore"
- write_capacity = 10
- read_capacity = 10
- projection_type = "INCLUDE"
- non_key_attributes = ["UserId"]
- }
-
- tags {
- Name = "dynamodb-table-1"
- Environment = "production"
- }
-}
-```
-
-## Argument Reference
-
-The following arguments are supported:
-
-* `name` - (Required) The name of the table, this needs to be unique
- within a region.
-* `read_capacity` - (Required) The number of read units for this table
-* `write_capacity` - (Required) The number of write units for this table
-* `hash_key` - (Required, Forces new resource) The attribute to use as the hash key (the
- attribute must also be defined as an attribute record
-* `range_key` - (Optional, Forces new resource) The attribute to use as the range key (must
- also be defined)
-* `attribute` - Define an attribute, has two properties:
- * `name` - The name of the attribute
- * `type` - One of: S, N, or B for (S)tring, (N)umber or (B)inary data
-* `stream_enabled` - (Optional) Indicates whether Streams are to be enabled (true) or disabled (false).
-* `stream_view_type` - (Optional) When an item in the table is modified, StreamViewType determines what information is written to the table's stream. Valid values are KEYS_ONLY, NEW_IMAGE, OLD_IMAGE, NEW_AND_OLD_IMAGES.
-* `ttl` - (Optional) Defines ttl, has two properties, and can only be specified once:
- * `enabled` - (Required) Indicates whether ttl is enabled (true) or disabled (false).
- * `attribute_name` - (Required) The name of the table attribute to store the TTL timestamp in.
-* `local_secondary_index` - (Optional, Forces new resource) Describe an LSI on the table;
- these can only be allocated *at creation* so you cannot change this
-definition after you have created the resource.
-* `global_secondary_index` - (Optional) Describe a GSO for the table;
- subject to the normal limits on the number of GSIs, projected
-attributes, etc.
-* `tags` - (Optional) A map of tags to populate on the created table.
-
-For both `local_secondary_index` and `global_secondary_index` objects,
-the following properties are supported:
-
-* `name` - (Required) The name of the LSI or GSI
-* `hash_key` - (Required for GSI) The name of the hash key in the index; must be
-defined as an attribute in the resource. Only applies to
- `global_secondary_index`
-* `range_key` - (Required) The name of the range key; must be defined
-* `projection_type` - (Required) One of "ALL", "INCLUDE" or "KEYS_ONLY"
- where *ALL* projects every attribute into the index, *KEYS_ONLY*
- projects just the hash and range key into the index, and *INCLUDE*
- projects only the keys specified in the _non_key_attributes_
-parameter.
-* `non_key_attributes` - (Optional) Only required with *INCLUDE* as a
- projection type; a list of attributes to project into the index. These
-do not need to be defined as attributes on the table.
-
-For `global_secondary_index` objects only, you need to specify
-`write_capacity` and `read_capacity` in the same way you would for the
-table as they have separate I/O capacity.
-
-### A note about attributes
-
-Only define attributes on the table object that are going to be used as:
-
-* Table hash key or range key
-* LSI or GSI hash key or range key
-
-The DynamoDB API expects attribute structure (name and type) to be
-passed along when creating or updating GSI/LSIs or creating the initial
-table. In these cases it expects the Hash / Range keys to be provided;
-because these get re-used in numerous places (i.e the table's range key
-could be a part of one or more GSIs), they are stored on the table
-object to prevent duplication and increase consistency. If you add
-attributes here that are not used in these scenarios it can cause an
-infinite loop in planning.
-
-
-## Attributes Reference
-
-The following attributes are exported:
-
-* `arn` - The arn of the table
-* `id` - The name of the table
-* `stream_arn` - The ARN of the Table Stream. Only available when `stream_enabled = true`
-
-
-## Import
-
-DynamoDB tables can be imported using the `name`, e.g.
-
-```
-$ terraform import aws_dynamodb_table.basic-dynamodb-table GameScores
-```
diff --git a/website/source/docs/providers/aws/r/ebs_snapshot.html.md b/website/source/docs/providers/aws/r/ebs_snapshot.html.md
deleted file mode 100644
index fffa6c978..000000000
--- a/website/source/docs/providers/aws/r/ebs_snapshot.html.md
+++ /dev/null
@@ -1,48 +0,0 @@
----
-layout: "aws"
-page_title: "AWS: aws_ebs_snapshot"
-sidebar_current: "docs-aws-resource-ebs-snapshot"
-description: |-
- Provides an elastic block storage snapshot resource.
----
-
-# aws\_ebs\_snapshot
-
-Creates a Snapshot of an EBS Volume.
-
-## Example Usage
-
-```hcl
-resource "aws_ebs_volume" "example" {
- availability_zone = "us-west-2a"
- size = 40
- tags {
- Name = "HelloWorld"
- }
-}
-
-resource "aws_ebs_snapshot" "example_snapshot" {
- volume_id = "${aws_ebs_volume.example.id}"
-}
-```
-
-## Argument Reference
-
-The following arguments are supported:
-
-* `volume_id` - (Required) The Volume ID of which to make a snapshot.
-* `description` - (Optional) A description of what the snapshot is.
-
-
-## Attributes Reference
-
-The following attributes are exported:
-
-* `id` - The snapshot ID (e.g. snap-59fcb34e).
-* `owner_id` - The AWS account ID of the EBS snapshot owner.
-* `owner_alias` - Value from an Amazon-maintained list (`amazon`, `aws-marketplace`, `microsoft`) of snapshot owners.
-* `encrypted` - Whether the snapshot is encrypted.
-* `volume_size` - The size of the drive in GiBs.
-* `kms_key_id` - The ARN for the KMS encryption key.
-* `data_encryption_key_id` - The data encryption key identifier for the snapshot.
-* `tags` - A mapping of tags for the resource.
\ No newline at end of file
diff --git a/website/source/docs/providers/aws/r/ebs_volume.html.md b/website/source/docs/providers/aws/r/ebs_volume.html.md
deleted file mode 100644
index 205473f98..000000000
--- a/website/source/docs/providers/aws/r/ebs_volume.html.md
+++ /dev/null
@@ -1,53 +0,0 @@
----
-layout: "aws"
-page_title: "AWS: aws_ebs_volume"
-sidebar_current: "docs-aws-resource-ebs-volume"
-description: |-
- Provides an elastic block storage resource.
----
-
-# aws\_ebs\_volume
-
-Manages a single EBS volume.
-
-## Example Usage
-
-```hcl
-resource "aws_ebs_volume" "example" {
- availability_zone = "us-west-2a"
- size = 40
- tags {
- Name = "HelloWorld"
- }
-}
-```
-
-~> **NOTE**: One of `size` or `snapshot_id` is required when specifying an EBS volume
-
-## Argument Reference
-
-The following arguments are supported:
-
-* `availability_zone` - (Required) The AZ where the EBS volume will exist.
-* `encrypted` - (Optional) If true, the disk will be encrypted.
-* `iops` - (Optional) The amount of IOPS to provision for the disk.
-* `size` - (Optional) The size of the drive in GiBs.
-* `snapshot_id` (Optional) A snapshot to base the EBS volume off of.
-* `type` - (Optional) The type of EBS volume. Can be "standard", "gp2", "io1", "sc1" or "st1" (Default: "standard").
-* `kms_key_id` - (Optional) The ARN for the KMS encryption key. When specifying `kms_key_id`, `encrypted` needs to be set to true.
-* `tags` - (Optional) A mapping of tags to assign to the resource.
-
-## Attributes Reference
-
-The following attributes are exported:
-
-* `id` - The volume ID (e.g. vol-59fcb34e).
-
-
-## Import
-
-EBS Volumes can be imported using the `id`, e.g.
-
-```
-$ terraform import aws_ebs_volume.data vol-049df61146c4d7901
-```
diff --git a/website/source/docs/providers/aws/r/ecr_repository.html.markdown b/website/source/docs/providers/aws/r/ecr_repository.html.markdown
deleted file mode 100644
index d9783e0a2..000000000
--- a/website/source/docs/providers/aws/r/ecr_repository.html.markdown
+++ /dev/null
@@ -1,47 +0,0 @@
----
-layout: "aws"
-page_title: "AWS: aws_ecr_repository"
-sidebar_current: "docs-aws-resource-ecr-repository"
-description: |-
- Provides an EC2 Container Registry Repository.
----
-
-# aws\_ecr\_repository
-
-Provides an EC2 Container Registry Repository.
-
-~> **NOTE on ECR Availability**: The EC2 Container Registry is not yet rolled out
-in all regions - available regions are listed
-[the AWS Docs](https://docs.aws.amazon.com/general/latest/gr/rande.html#ecr_region).
-
-## Example Usage
-
-```hcl
-resource "aws_ecr_repository" "foo" {
- name = "bar"
-}
-```
-
-## Argument Reference
-
-The following arguments are supported:
-
-* `name` - (Required) Name of the repository.
-
-## Attributes Reference
-
-The following attributes are exported:
-
-* `arn` - Full ARN of the repository.
-* `name` - The name of the repository.
-* `registry_id` - The registry ID where the repository was created.
-* `repository_url` - The URL of the repository (in the form `aws_account_id.dkr.ecr.region.amazonaws.com/repositoryName`
-
-
-## Import
-
-ECR Repositories can be imported using the `name`, e.g.
-
-```
-$ terraform import aws_ecr_repository.service test-service
-```
diff --git a/website/source/docs/providers/aws/r/ecr_repository_policy.html.markdown b/website/source/docs/providers/aws/r/ecr_repository_policy.html.markdown
deleted file mode 100644
index a6437cf6c..000000000
--- a/website/source/docs/providers/aws/r/ecr_repository_policy.html.markdown
+++ /dev/null
@@ -1,72 +0,0 @@
----
-layout: "aws"
-page_title: "AWS: aws_ecr_repository_policy"
-sidebar_current: "docs-aws-resource-ecr-repository-policy"
-description: |-
- Provides an ECR Repository Policy.
----
-
-# aws\_ecr\_repository\_policy
-
-Provides an ECR repository policy.
-
-Note that currently only one policy may be applied to a repository.
-
-~> **NOTE on ECR Availability**: The EC2 Container Registry is not yet rolled out
-in all regions - available regions are listed
-[the AWS Docs](https://docs.aws.amazon.com/general/latest/gr/rande.html#ecr_region).
-
-## Example Usage
-
-```hcl
-resource "aws_ecr_repository" "foo" {
- name = "bar"
-}
-
-resource "aws_ecr_repository_policy" "foopolicy" {
- repository = "${aws_ecr_repository.foo.name}"
-
- policy = < **Note:** To prevent a race condition during service deletion, make sure to set `depends_on` to the related `aws_iam_role_policy`; otherwise, the policy may be destroyed too soon and the ECS service will then get stuck in the `DRAINING` state.
-
-Provides an ECS service - effectively a task that is expected to run until an error occurs or a user terminates it (typically a webserver or a database).
-
-See [ECS Services section in AWS developer guide](https://docs.aws.amazon.com/AmazonECS/latest/developerguide/ecs_services.html).
-
-## Example Usage
-
-```hcl
-resource "aws_ecs_service" "mongo" {
- name = "mongodb"
- cluster = "${aws_ecs_cluster.foo.id}"
- task_definition = "${aws_ecs_task_definition.mongo.arn}"
- desired_count = 3
- iam_role = "${aws_iam_role.foo.arn}"
- depends_on = ["aws_iam_role_policy.foo"]
-
- placement_strategy {
- type = "binpack"
- field = "cpu"
- }
-
- load_balancer {
- elb_name = "${aws_elb.foo.name}"
- container_name = "mongo"
- container_port = 8080
- }
-
- placement_constraints {
- type = "memberOf"
- expression = "attribute:ecs.availability-zone in [us-west-2a, us-west-2b]"
- }
-}
-```
-
-## Argument Reference
-
-The following arguments are supported:
-
-* `name` - (Required) The name of the service (up to 255 letters, numbers, hyphens, and underscores)
-* `task_definition` - (Required) The family and revision (`family:revision`) or full ARN of the task definition that you want to run in your service.
-* `desired_count` - (Required) The number of instances of the task definition to place and keep running
-* `cluster` - (Optional) ARN of an ECS cluster
-* `iam_role` - (Optional) The ARN of IAM role that allows your Amazon ECS container agent to make calls to your load balancer on your behalf. This parameter is only required if you are using a load balancer with your service.
-* `deployment_maximum_percent` - (Optional) The upper limit (as a percentage of the service's desiredCount) of the number of running tasks that can be running in a service during a deployment.
-* `deployment_minimum_healthy_percent` - (Optional) The lower limit (as a percentage of the service's desiredCount) of the number of running tasks that must remain running and healthy in a service during a deployment.
-* `placement_strategy` - (Optional) Service level strategy rules that are taken
-into consideration during task placement. The maximum number of
-`placement_strategy` blocks is `5`. Defined below.
-* `load_balancer` - (Optional) A load balancer block. Load balancers documented below.
-* `placement_constraints` - (Optional) rules that are taken into consideration during task placement. Maximum number of
-`placement_constraints` is `10`. Defined below.
-
--> **Note:** As a result of an AWS limitation, a single `load_balancer` can be attached to the ECS service at most. See [related docs](http://docs.aws.amazon.com/AmazonECS/latest/developerguide/service-load-balancing.html#load-balancing-concepts).
-
-Load balancers support the following:
-
-* `elb_name` - (Required for ELB Classic) The name of the ELB (Classic) to associate with the service.
-* `target_group_arn` - (Required for ALB) The ARN of the ALB target group to associate with the service.
-* `container_name` - (Required) The name of the container to associate with the load balancer (as it appears in a container definition).
-* `container_port` - (Required) The port on the container to associate with the load balancer.
-
-## placement_strategy
-
-`placement_strategy` supports the following:
-
-* `type` - (Required) The type of placement strategy. Must be one of: `binpack`, `random`, or `spread`
-* `field` - (Optional) For the `spread` placement strategy, valid values are instanceId (or host,
- which has the same effect), or any platform or custom attribute that is applied to a container instance.
- For the `binpack` type, valid values are `memory` and `cpu`. For the `random` type, this attribute is not
- needed. For more information, see [Placement Strategy](http://docs.aws.amazon.com/AmazonECS/latest/APIReference/API_PlacementStrategy.html).
-
-## placement_constraints
-
-`placement_constraints` support the following:
-
-* `type` - (Required) The type of constraint. The only valid values at this time are `memberOf` and `distinctInstance`.
-* `expression` - (Optional) Cluster Query Language expression to apply to the constraint. Does not need to be specified
-for the `distinctInstance` type.
-For more information, see [Cluster Query Language in the Amazon EC2 Container
-Service Developer
-Guide](http://docs.aws.amazon.com/AmazonECS/latest/developerguide/cluster-query-language.html).
-
-
-
-## Attributes Reference
-
-The following attributes are exported:
-
-* `id` - The Amazon Resource Name (ARN) that identifies the service
-* `name` - The name of the service
-* `cluster` - The Amazon Resource Name (ARN) of cluster which the service runs on
-* `iam_role` - The ARN of IAM role used for ELB
-* `desired_count` - The number of instances of the task definition
diff --git a/website/source/docs/providers/aws/r/ecs_task_definition.html.markdown b/website/source/docs/providers/aws/r/ecs_task_definition.html.markdown
deleted file mode 100644
index fd40c9ef0..000000000
--- a/website/source/docs/providers/aws/r/ecs_task_definition.html.markdown
+++ /dev/null
@@ -1,109 +0,0 @@
----
-layout: "aws"
-page_title: "AWS: aws_ecs_task_definition"
-sidebar_current: "docs-aws-resource-ecs-task-definition"
-description: |-
- Provides an ECS task definition.
----
-
-# aws\_ecs\_task\_definition
-
-Provides an ECS task definition to be used in `aws_ecs_service`.
-
-## Example Usage
-
-```hcl
-resource "aws_ecs_task_definition" "service" {
- family = "service"
- container_definitions = "${file("task-definitions/service.json")}"
-
- volume {
- name = "service-storage"
- host_path = "/ecs/service-storage"
- }
-
- placement_constraints {
- type = "memberOf"
- expression = "attribute:ecs.availability-zone in [us-west-2a, us-west-2b]"
- }
-}
-```
-
-The referenced `task-definitions/service.json` file contains a valid JSON document,
-which is shown below, and its content is going to be passed directly into the
-`container_definitions` attribute as a string. Please note that this example
-contains only a small subset of the available parameters.
-
-```
-[
- {
- "name": "first",
- "image": "service-first",
- "cpu": 10,
- "memory": 512,
- "essential": true,
- "portMappings": [
- {
- "containerPort": 80,
- "hostPort": 80
- }
- ]
- },
- {
- "name": "second",
- "image": "service-second",
- "cpu": 10,
- "memory": 256,
- "essential": true,
- "portMappings": [
- {
- "containerPort": 443,
- "hostPort": 443
- }
- ]
- }
-]
-```
-
-## Argument Reference
-
-The following arguments are supported:
-
-* `family` - (Required) A unique name for your task definition.
-* `container_definitions` - (Required) A list of valid [container definitions]
-(http://docs.aws.amazon.com/AmazonECS/latest/APIReference/API_ContainerDefinition.html) provided as a
-single valid JSON document. Please note that you should only provide values that are part of the container
-definition document. For a detailed description of what parameters are available, see the [Task Definition Parameters]
-(https://docs.aws.amazon.com/AmazonECS/latest/developerguide/task_definition_parameters.html) section from the
-official [Developer Guide](https://docs.aws.amazon.com/AmazonECS/latest/developerguide).
-* `task_role_arn` - (Optional) The ARN of IAM role that allows your Amazon ECS container task to make calls to other AWS services.
-* `network_mode` - (Optional) The Docker networking mode to use for the containers in the task. The valid values are `none`, `bridge`, and `host`.
-* `volume` - (Optional) A volume block. See below for details about what arguments are supported.
-* `placement_constraints` - (Optional) rules that are taken into consideration during task placement. Maximum number of
-`placement_constraints` is `10`. Defined below.
-
-Volume block supports the following arguments:
-
-* `name` - (Required) The name of the volume. This name is referenced in the `sourceVolume`
-parameter of container definition in the `mountPoints` section.
-* `host_path` - (Optional) The path on the host container instance that is presented to the container. If not set, ECS will create a nonpersistent data volume that starts empty and is deleted after the task has finished.
-
-## placement_constraints
-
-`placement_constraints` support the following:
-
-* `type` - (Required) The type of constraint. Use `memberOf` to restrict selection to a group of valid candidates.
-Note that `distinctInstance` is not supported in task definitions.
-* `expression` - (Optional) Cluster Query Language expression to apply to the constraint.
-For more information, see [Cluster Query Language in the Amazon EC2 Container
-Service Developer
-Guide](http://docs.aws.amazon.com/AmazonECS/latest/developerguide/cluster-query-language.html).
-
-
-## Attributes Reference
-
-The following attributes are exported:
-
-* `arn` - Full ARN of the Task Definition (including both `family` and `revision`).
-* `family` - The family of the Task Definition.
-* `revision` - The revision of the task in a particular family.
diff --git a/website/source/docs/providers/aws/r/efs_file_system.html.markdown b/website/source/docs/providers/aws/r/efs_file_system.html.markdown
deleted file mode 100644
index c65d1322d..000000000
--- a/website/source/docs/providers/aws/r/efs_file_system.html.markdown
+++ /dev/null
@@ -1,55 +0,0 @@
----
-layout: "aws"
-page_title: "AWS: aws_efs_file_system"
-sidebar_current: "docs-aws-resource-efs-file-system"
-description: |-
- Provides an Elastic File System (EFS) resource.
----
-
-# aws\_efs\_file\_system
-
-Provides an Elastic File System (EFS) resource.
-
-## Example Usage
-
-```hcl
-resource "aws_efs_file_system" "foo" {
- creation_token = "my-product"
-
- tags {
- Name = "MyProduct"
- }
-}
-```
-
-## Argument Reference
-
-~> **NOTE:** The `reference_name` attribute has been deprecated and might
-be removed in future releases, please use `creation_token` instead.
-
-The following arguments are supported:
-
-* `creation_token` - (Optional) A unique name (a maximum of 64 characters are allowed)
-used as reference when creating the Elastic File System to ensure idempotent file
-system creation. By default generated by Terraform. See [Elastic File System]
-(http://docs.aws.amazon.com/efs/latest/ug/) user guide for more information.
-* `reference_name` - **DEPRECATED** (Optional) A reference name used when creating the
-`Creation Token` which Amazon EFS uses to ensure idempotent file system creation. By
-default generated by Terraform.
-* `performance_mode` - (Optional) The file system performance mode. Can be either
-`"generalPurpose"` or `"maxIO"` (Default: `"generalPurpose"`).
-* `tags` - (Optional) A mapping of tags to assign to the file system.
-
-## Attributes Reference
-
-The following attributes are exported:
-
-* `id` - The ID that identifies the file system (e.g. fs-ccfc0d65).
-
-## Import
-
-The EFS file systems can be imported using the `id`, e.g.
-
-```
-$ terraform import aws_efs_file_system.foo fs-6fa144c6
-```
diff --git a/website/source/docs/providers/aws/r/efs_mount_target.html.markdown b/website/source/docs/providers/aws/r/efs_mount_target.html.markdown
deleted file mode 100644
index eb3f3562e..000000000
--- a/website/source/docs/providers/aws/r/efs_mount_target.html.markdown
+++ /dev/null
@@ -1,61 +0,0 @@
----
-layout: "aws"
-page_title: "AWS: aws_efs_mount_target"
-sidebar_current: "docs-aws-resource-efs-mount-target"
-description: |-
- Provides an Elastic File System (EFS) mount target.
----
-
-# aws\_efs\_mount\_target
-
-Provides an Elastic File System (EFS) mount target.
-
-## Example Usage
-
-```hcl
-resource "aws_efs_mount_target" "alpha" {
- file_system_id = "${aws_efs_file_system.foo.id}"
- subnet_id = "${aws_subnet.alpha.id}"
-}
-
-resource "aws_vpc" "foo" {
- cidr_block = "10.0.0.0/16"
-}
-
-resource "aws_subnet" "alpha" {
- vpc_id = "${aws_vpc.foo.id}"
- availability_zone = "us-west-2a"
- cidr_block = "10.0.1.0/24"
-}
-```
-
-## Argument Reference
-
-The following arguments are supported:
-
-* `file_system_id` - (Required) The ID of the file system for which the mount target is intended.
-* `subnet_id` - (Required) The ID of the subnet to add the mount target in.
-* `ip_address` - (Optional) The address (within the address range of the specified subnet) at
-which the file system may be mounted via the mount target.
-* `security_groups` - (Optional) A list of up to 5 VPC security group IDs (that must
-be for the same VPC as subnet specified) in effect for the mount target.
-
-## Attributes Reference
-
-~> **Note:** The `dns_name` attribute is only useful if the mount target is in a VPC that has
-support for DNS hostnames enabled. See [Using DNS with Your VPC](http://docs.aws.amazon.com/AmazonVPC/latest/UserGuide/vpc-dns.html)
-and [VPC resource](https://www.terraform.io/docs/providers/aws/r/vpc.html#enable_dns_hostnames) in Terraform for more information.
-
-The following attributes are exported:
-
-* `id` - The ID of the mount target.
-* `dns_name` - The DNS name for the given subnet/AZ per [documented convention](http://docs.aws.amazon.com/efs/latest/ug/mounting-fs-mount-cmd-dns-name.html).
-* `network_interface_id` - The ID of the network interface that Amazon EFS created when it created the mount target.
-
-## Import
-
-The EFS mount targets can be imported using the `id`, e.g.
-
-```
-$ terraform import aws_efs_mount_target.alpha fsmt-52a643fb
-```
diff --git a/website/source/docs/providers/aws/r/egress_only_internet_gateway.html.markdown b/website/source/docs/providers/aws/r/egress_only_internet_gateway.html.markdown
deleted file mode 100644
index e4238a25a..000000000
--- a/website/source/docs/providers/aws/r/egress_only_internet_gateway.html.markdown
+++ /dev/null
@@ -1,39 +0,0 @@
----
-layout: "aws"
-page_title: "AWS: aws_egress_only_internet_gateway"
-sidebar_current: "docs-aws-resource-egress-only-internet-gateway"
-description: |-
- Provides a resource to create a VPC Egress Only Internet Gateway.
----
-
-# aws\_egress\_only\_internet\_gateway
-
-[IPv6 only] Creates an egress-only Internet gateway for your VPC.
-An egress-only Internet gateway is used to enable outbound communication
-over IPv6 from instances in your VPC to the Internet, and prevents hosts
-outside of your VPC from initiating an IPv6 connection with your instance.
-
-## Example Usage
-
-```hcl
-resource "aws_vpc" "foo" {
- cidr_block = "10.1.0.0/16"
- assign_amazon_ipv6_cidr_block = true
-}
-
-resource "aws_egress_only_internet_gateway" "foo" {
- vpc_id = "${aws_vpc.foo.id}"
-}
-```
-
-## Argument Reference
-
-The following arguments are supported:
-
-* `vpc_id` - (Required) The VPC ID to create in.
-
-## Attributes Reference
-
-The following attributes are exported:
-
-* `id` - The ID of the Egress Only Internet Gateway.
\ No newline at end of file
diff --git a/website/source/docs/providers/aws/r/eip.html.markdown b/website/source/docs/providers/aws/r/eip.html.markdown
deleted file mode 100644
index 1eb96a092..000000000
--- a/website/source/docs/providers/aws/r/eip.html.markdown
+++ /dev/null
@@ -1,125 +0,0 @@
----
-layout: "aws"
-page_title: "AWS: aws_eip"
-sidebar_current: "docs-aws-resource-eip"
-description: |-
- Provides an Elastic IP resource.
----
-
-# aws\_eip
-
-Provides an Elastic IP resource.
-
-## Example Usage
-
-Single EIP associated with an instance:
-
-```hcl
-resource "aws_eip" "lb" {
- instance = "${aws_instance.web.id}"
- vpc = true
-}
-```
-
-Multiple EIPs associated with a single network interface:
-
-```hcl
-resource "aws_network_interface" "multi-ip" {
- subnet_id = "${aws_subnet.main.id}"
- private_ips = ["10.0.0.10", "10.0.0.11"]
-}
-
-resource "aws_eip" "one" {
- vpc = true
- network_interface = "${aws_network_interface.multi-ip.id}"
- associate_with_private_ip = "10.0.0.10"
-}
-
-resource "aws_eip" "two" {
- vpc = true
- network_interface = "${aws_network_interface.multi-ip.id}"
- associate_with_private_ip = "10.0.0.11"
-}
-```
-
-Attaching an EIP to an Instance with a pre-assigned private ip (VPC Only):
-
-```hcl
-resource "aws_vpc" "default" {
- cidr_block = "10.0.0.0/16"
- enable_dns_hostnames = true
-}
-
-resource "aws_internet_gateway" "gw" {
- vpc_id = "${aws_vpc.default.id}"
-}
-
-resource "aws_subnet" "tf_test_subnet" {
- vpc_id = "${aws_vpc.default.id}"
- cidr_block = "10.0.0.0/24"
- map_public_ip_on_launch = true
-
- depends_on = ["aws_internet_gateway.gw"]
-}
-
-resource "aws_instance" "foo" {
- # us-west-2
- ami = "ami-5189a661"
- instance_type = "t2.micro"
-
- private_ip = "10.0.0.12"
- subnet_id = "${aws_subnet.tf_test_subnet.id}"
-}
-
-resource "aws_eip" "bar" {
- vpc = true
-
- instance = "${aws_instance.foo.id}"
- associate_with_private_ip = "10.0.0.12"
-}
-```
-
-## Argument Reference
-
-The following arguments are supported:
-
-* `vpc` - (Optional) Boolean if the EIP is in a VPC or not.
-* `instance` - (Optional) EC2 instance ID.
-* `network_interface` - (Optional) Network interface ID to associate with.
-* `associate_with_private_ip` - (Optional) A user specified primary or secondary private IP address to
- associate with the Elastic IP address. If no private IP address is specified,
- the Elastic IP address is associated with the primary private IP address.
-
-~> **NOTE:** You can specify either the `instance` ID or the `network_interface` ID,
-but not both. Including both will **not** return an error from the AWS API, but will
-have undefined behavior. See the relevant [AssociateAddress API Call][1] for
-more information.
-
-## Attributes Reference
-
-The following attributes are exported:
-
-* `id` - Contains the EIP allocation ID.
-* `private_ip` - Contains the private IP address (if in VPC).
-* `associate_with_private_ip` - Contains the user specified private IP address
-(if in VPC).
-* `public_ip` - Contains the public IP address.
-* `instance` - Contains the ID of the attached instance.
-* `network_interface` - Contains the ID of the attached network interface.
-
-
-## Import
-
-EIPs in a VPC can be imported using their Allocation ID, e.g.
-
-```
-$ terraform import aws_eip.bar eipalloc-00a10e96
-```
-
-EIPs in EC2 Classic can be imported using their Public IP, e.g.
-
-```
-$ terraform import aws_eip.bar 52.0.0.0
-```
-
-[1]: https://docs.aws.amazon.com/AWSEC2/latest/APIReference/API_AssociateAddress.html
diff --git a/website/source/docs/providers/aws/r/eip_association.html.markdown b/website/source/docs/providers/aws/r/eip_association.html.markdown
deleted file mode 100644
index fe473b51f..000000000
--- a/website/source/docs/providers/aws/r/eip_association.html.markdown
+++ /dev/null
@@ -1,68 +0,0 @@
----
-layout: "aws"
-page_title: "AWS: aws_eip_association"
-sidebar_current: "docs-aws-resource-eip-association"
-description: |-
- Provides an AWS EIP Association
----
-
-# aws\_eip\_association
-
-Provides an AWS EIP Association as a top level resource, to associate and
-disassociate Elastic IPs from AWS Instances and Network Interfaces.
-
-~> **NOTE:** `aws_eip_association` is useful in scenarios where EIPs are either
-pre-existing or distributed to customers or users and therefore cannot be changed.
-
-## Example Usage
-
-```hcl
-resource "aws_eip_association" "eip_assoc" {
- instance_id = "${aws_instance.web.id}"
- allocation_id = "${aws_eip.example.id}"
-}
-
-resource "aws_instance" "web" {
- ami = "ami-21f78e11"
- availability_zone = "us-west-2a"
- instance_type = "t1.micro"
-
- tags {
- Name = "HelloWorld"
- }
-}
-
-resource "aws_eip" "example" {
- vpc = true
-}
-```
-
-## Argument Reference
-
-The following arguments are supported:
-
-* `allocation_id` - (Optional) The allocation ID. This is required for EC2-VPC.
-* `allow_reassociation` - (Optional, Boolean) Whether to allow an Elastic IP to
-be re-associated. Defaults to `true` in VPC.
-* `instance_id` - (Optional) The ID of the instance. This is required for
-EC2-Classic. For EC2-VPC, you can specify either the instance ID or the
-network interface ID, but not both. The operation fails if you specify an
-instance ID unless exactly one network interface is attached.
-* `network_interface_id` - (Optional) The ID of the network interface. If the
-instance has more than one network interface, you must specify a network
-interface ID.
-* `private_ip_address` - (Optional) The primary or secondary private IP address
-to associate with the Elastic IP address. If no private IP address is
-specified, the Elastic IP address is associated with the primary private IP
-address.
-* `public_ip` - (Optional) The Elastic IP address. This is required for EC2-Classic.
-
-## Attributes Reference
-
-* `association_id` - The ID that represents the association of the Elastic IP
-address with an instance.
-* `allocation_id` - As above
-* `instance_id` - As above
-* `network_interface_id` - As above
-* `private_ip_address` - As above
-* `public_ip` - As above
diff --git a/website/source/docs/providers/aws/r/elastic_beanstalk_application.html.markdown b/website/source/docs/providers/aws/r/elastic_beanstalk_application.html.markdown
deleted file mode 100644
index d8437fff3..000000000
--- a/website/source/docs/providers/aws/r/elastic_beanstalk_application.html.markdown
+++ /dev/null
@@ -1,48 +0,0 @@
----
-layout: "aws"
-page_title: "AWS: aws_elastic_beanstalk_application"
-sidebar_current: "docs-aws-resource-elastic-beanstalk-application"
-description: |-
- Provides an Elastic Beanstalk Application Resource
----
-
-# aws\_elastic\_beanstalk\_application
-
-Provides an Elastic Beanstalk Application Resource. Elastic Beanstalk allows
-you to deploy and manage applications in the AWS cloud without worrying about
-the infrastructure that runs those applications.
-
-This resource creates an application that has one configuration template named
-`default`, and no application versions
-
-## Example Usage
-
-```hcl
-resource "aws_elastic_beanstalk_application" "tftest" {
- name = "tf-test-name"
- description = "tf-test-desc"
-}
-```
-
-## Argument Reference
-
-The following arguments are supported:
-
-* `name` - (Required) The name of the application, must be unique within your account
-* `description` - (Optional) Short description of the application
-
-## Attributes Reference
-
-The following attributes are exported:
-
-* `name`
-* `description`
-
-
-## Import
-
-Elastic Beanstalk Applications can be imported using the `name`, e.g.
-
-```
-$ terraform import aws_elastic_beanstalk_application.tf_test tf-test-name
-```
\ No newline at end of file
diff --git a/website/source/docs/providers/aws/r/elastic_beanstalk_application_version.html.markdown b/website/source/docs/providers/aws/r/elastic_beanstalk_application_version.html.markdown
deleted file mode 100644
index 5cf76b849..000000000
--- a/website/source/docs/providers/aws/r/elastic_beanstalk_application_version.html.markdown
+++ /dev/null
@@ -1,71 +0,0 @@
----
-layout: "aws"
-page_title: "AWS: aws_elastic_beanstalk_application_version"
-sidebar_current: "docs-aws-resource-elastic-beanstalk-application-version"
-description: |-
- Provides an Elastic Beanstalk Application Version Resource
----
-
-# aws\_elastic\_beanstalk\_application\_version
-
-Provides an Elastic Beanstalk Application Version Resource. Elastic Beanstalk allows
-you to deploy and manage applications in the AWS cloud without worrying about
-the infrastructure that runs those applications.
-
-This resource creates a Beanstalk Application Version that can be deployed to a Beanstalk
-Environment.
-
-~> **NOTE on Application Version Resource:** When using the Application Version resource with multiple
-[Elastic Beanstalk Environments](elastic_beanstalk_environment.html) it is possible that an error may be returned
-when attempting to delete an Application Version while it is still in use by a different environment.
-To work around this you can:
-
-
Create each environment in a separate AWS account
-
Create your `aws_elastic_beanstalk_application_version` resources with a unique names in your
-Elastic Beanstalk Application. For example <revision>-<environment>.
-
-
-## Example Usage
-
-```hcl
-resource "aws_s3_bucket" "default" {
- bucket = "tftest.applicationversion.bucket"
-}
-
-resource "aws_s3_bucket_object" "default" {
- bucket = "${aws_s3_bucket.default.id}"
- key = "beanstalk/go-v1.zip"
- source = "go-v1.zip"
-}
-
-resource "aws_elastic_beanstalk_application" "default" {
- name = "tf-test-name"
- description = "tf-test-desc"
-}
-
-resource "aws_elastic_beanstalk_application_version" "default" {
- name = "tf-test-version-label"
- application = "tf-test-name"
- description = "application version created by terraform"
- bucket = "${aws_s3_bucket.default.id}"
- key = "${aws_s3_bucket_object.default.id}"
-}
-```
-
-## Argument Reference
-
-The following arguments are supported:
-
-* `name` - (Required) A unique name for the this Application Version.
-* `application` - (Required) Name of the Beanstalk Application the version is associated with.
-* `description` - (Optional) Short description of the Application Version.
-* `bucket` - (Required) S3 bucket that contains the Application Version source bundle.
-* `key` - (Required) S3 object that is the Application Version source bundle.
-* `force_delete` - (Optional) On delete, force an Application Version to be deleted when it may be in use
- by multiple Elastic Beanstalk Environments.
-
-## Attributes Reference
-
-The following attributes are exported:
-
-* `name` - The Application Version name.
diff --git a/website/source/docs/providers/aws/r/elastic_beanstalk_configuration_template.html.markdown b/website/source/docs/providers/aws/r/elastic_beanstalk_configuration_template.html.markdown
deleted file mode 100644
index 37b0bc775..000000000
--- a/website/source/docs/providers/aws/r/elastic_beanstalk_configuration_template.html.markdown
+++ /dev/null
@@ -1,67 +0,0 @@
----
-layout: "aws"
-page_title: "AWS: aws_elastic_beanstalk_configuration_template"
-sidebar_current: "docs-aws-resource-elastic-beanstalk-configuration-template"
-description: |-
- Provides an Elastic Beanstalk Configuration Template
----
-
-# aws\_elastic\_beanstalk\_configuration\_template
-
-Provides an Elastic Beanstalk Configuration Template, which are associated with
-a specific application and are used to deploy different versions of the
-application with the same configuration settings.
-
-## Example Usage
-
-```hcl
-resource "aws_elastic_beanstalk_application" "tftest" {
- name = "tf-test-name"
- description = "tf-test-desc"
-}
-
-resource "aws_elastic_beanstalk_configuration_template" "tf_template" {
- name = "tf-test-template-config"
- application = "${aws_elastic_beanstalk_application.tftest.name}"
- solution_stack_name = "64bit Amazon Linux 2015.09 v2.0.8 running Go 1.4"
-}
-```
-
-## Argument Reference
-
-The following arguments are supported:
-
-* `name` - (Required) A unique name for this Template.
-* `application` – (Required) name of the application to associate with this configuration template
-* `description` - (Optional) Short description of the Template
-* `environment_id` – (Optional) The ID of the environment used with this configuration template
-* `setting` – (Optional) Option settings to configure the new Environment. These
- override specific values that are set as defaults. The format is detailed
- below in [Option Settings](#option-settings)
-* `solution_stack_name` – (Optional) A solution stack to base your Template
-off of. Example stacks can be found in the [Amazon API documentation][1]
-
-
-## Option Settings
-
-The `setting` field supports the following format:
-
-* `namespace` - unique namespace identifying the option's associated AWS resource
-* `name` - name of the configuration option
-* `value` - value for the configuration option
-* `resource` - (Optional) resource name for [scheduled action](https://docs.aws.amazon.com/elasticbeanstalk/latest/dg/command-options-general.html#command-options-general-autoscalingscheduledaction)
-
-## Attributes Reference
-
-The following attributes are exported:
-
-* `name`
-* `application`
-* `description`
-* `environment_id`
-* `option_settings`
-* `solution_stack_name`
-
-[1]: https://docs.aws.amazon.com/elasticbeanstalk/latest/dg/concepts.platforms.html
-
-
diff --git a/website/source/docs/providers/aws/r/elastic_beanstalk_environment.html.markdown b/website/source/docs/providers/aws/r/elastic_beanstalk_environment.html.markdown
deleted file mode 100644
index 3a8117ad1..000000000
--- a/website/source/docs/providers/aws/r/elastic_beanstalk_environment.html.markdown
+++ /dev/null
@@ -1,139 +0,0 @@
----
-layout: "aws"
-page_title: "AWS: aws_elastic_beanstalk_environment"
-sidebar_current: "docs-aws-resource-elastic-beanstalk-environment"
-description: |-
- Provides an Elastic Beanstalk Environment Resource
----
-
-# aws\_elastic\_beanstalk\_environment
-
-Provides an Elastic Beanstalk Environment Resource. Elastic Beanstalk allows
-you to deploy and manage applications in the AWS cloud without worrying about
-the infrastructure that runs those applications.
-
-Environments are often things such as `development`, `integration`, or
-`production`.
-
-## Example Usage
-
-```hcl
-resource "aws_elastic_beanstalk_application" "tftest" {
- name = "tf-test-name"
- description = "tf-test-desc"
-}
-
-resource "aws_elastic_beanstalk_environment" "tfenvtest" {
- name = "tf-test-name"
- application = "${aws_elastic_beanstalk_application.tftest.name}"
- solution_stack_name = "64bit Amazon Linux 2015.03 v2.0.3 running Go 1.4"
-}
-```
-
-## Argument Reference
-
-The following arguments are supported:
-
-* `name` - (Required) A unique name for this Environment. This name is used
- in the application URL
-* `application` – (Required) Name of the application that contains the version
- to be deployed
-* `cname_prefix` - (Optional) Prefix to use for the fully qualified DNS name of
- the Environment.
-* `description` - (Optional) Short description of the Environment
-* `tier` - (Optional) Elastic Beanstalk Environment tier. Valid values are `Worker`
- or `WebServer`. If tier is left blank `WebServer` will be used.
-* `setting` – (Optional) Option settings to configure the new Environment. These
- override specific values that are set as defaults. The format is detailed
- below in [Option Settings](#option-settings)
-* `solution_stack_name` – (Optional) A solution stack to base your environment
-off of. Example stacks can be found in the [Amazon API documentation][1]
-* `template_name` – (Optional) The name of the Elastic Beanstalk Configuration
- template to use in deployment
-* `wait_for_ready_timeout` - (Default: `20m`) The maximum
- [duration](https://golang.org/pkg/time/#ParseDuration) that Terraform should
- wait for an Elastic Beanstalk Environment to be in a ready state before timing
- out.
-* `poll_interval` – The time between polling the AWS API to
-check if changes have been applied. Use this to adjust the rate of API calls
-for any `create` or `update` action. Minimum `10s`, maximum `180s`. Omit this to
-use the default behavior, which is an exponential backoff
-* `version_label` - (Optional) The name of the Elastic Beanstalk Application Version
-to use in deployment.
-* `tags` – (Optional) A set of tags to apply to the Environment. **Note:** at
-this time the Elastic Beanstalk API does not provide a programatic way of
-changing these tags after initial application
-
-
-## Option Settings
-
-Some options can be stack-specific, check [AWS Docs](https://docs.aws.amazon.com/elasticbeanstalk/latest/dg/command-options-general.html)
-for supported options and examples.
-
-The `setting` and `all_settings` mappings support the following format:
-
-* `namespace` - unique namespace identifying the option's associated AWS resource
-* `name` - name of the configuration option
-* `value` - value for the configuration option
-* `resource` - (Optional) resource name for [scheduled action](https://docs.aws.amazon.com/elasticbeanstalk/latest/dg/command-options-general.html#command-options-general-autoscalingscheduledaction)
-
-### Example With Options
-
-```hcl
-resource "aws_elastic_beanstalk_application" "tftest" {
- name = "tf-test-name"
- description = "tf-test-desc"
-}
-
-resource "aws_elastic_beanstalk_environment" "tfenvtest" {
- name = "tf-test-name"
- application = "${aws_elastic_beanstalk_application.tftest.name}"
- solution_stack_name = "64bit Amazon Linux 2015.03 v2.0.3 running Go 1.4"
-
- setting {
- namespace = "aws:ec2:vpc"
- name = "VPCId"
- value = "vpc-xxxxxxxx"
- }
-
- setting {
- namespace = "aws:ec2:vpc"
- name = "Subnets"
- value = "subnet-xxxxxxxx"
- }
-}
-```
-
-## Attributes Reference
-
-The following attributes are exported:
-
-* `id` - ID of the Elastic Beanstalk Environment.
-* `name` - Name of the Elastic Beanstalk Environment.
-* `description` - Description of the Elastic Beanstalk Environment.
-* `tier` - The environment tier specified.
-* `application` – The Elastic Beanstalk Application specified for this environment.
-* `setting` – Settings specifically set for this Environment.
-* `all_settings` – List of all option settings configured in the Environment. These
- are a combination of default settings and their overrides from `setting` in
- the configuration.
-* `cname` - Fully qualified DNS name for the Environment.
-* `autoscaling_groups` - The autoscaling groups used by this environment.
-* `instances` - Instances used by this environment.
-* `launch_configurations` - Launch configurations in use by this environment.
-* `load_balancers` - Elastic load balancers in use by this environment.
-* `queues` - SQS queues in use by this environment.
-* `triggers` - Autoscaling triggers in use by this environment.
-
-
-
-[1]: https://docs.aws.amazon.com/elasticbeanstalk/latest/dg/concepts.platforms.html
-
-
-## Import
-
-Elastic Beanstalk Environments can be imported using the `id`, e.g.
-
-```
-$ terraform import aws_elastic_beanstalk_environment.prodenv e-rpqsewtp2j
-```
diff --git a/website/source/docs/providers/aws/r/elastic_transcoder_pipeline.html.markdown b/website/source/docs/providers/aws/r/elastic_transcoder_pipeline.html.markdown
deleted file mode 100644
index dfa1c60a7..000000000
--- a/website/source/docs/providers/aws/r/elastic_transcoder_pipeline.html.markdown
+++ /dev/null
@@ -1,95 +0,0 @@
----
-layout: "aws"
-page_title: "AWS: aws_elastictranscoder_pipeline"
-sidebar_current: "docs-aws-resource-elastic-transcoder-pipeline"
-description: |-
- Provides an Elastic Transcoder pipeline resource.
----
-
-# aws\_elastictranscoder\_pipeline
-
-Provides an Elastic Transcoder pipeline resource.
-
-## Example Usage
-
-```hcl
-resource "aws_elastictranscoder_pipeline" "bar" {
- input_bucket = "${aws_s3_bucket.input_bucket.bucket}"
- name = "aws_elastictranscoder_pipeline_tf_test_"
- role = "${aws_iam_role.test_role.arn}"
-
- content_config = {
- bucket = "${aws_s3_bucket.content_bucket.bucket}"
- storage_class = "Standard"
- }
-
- thumbnail_config = {
- bucket = "${aws_s3_bucket.thumb_bucket.bucket}"
- storage_class = "Standard"
- }
-}
-```
-
-## Argument Reference
-
-See ["Create Pipeline"](http://docs.aws.amazon.com/elastictranscoder/latest/developerguide/create-pipeline.html) in the AWS docs for reference.
-
-The following arguments are supported:
-
-* `aws_kms_key_arn` - (Optional) The AWS Key Management Service (AWS KMS) key that you want to use with this pipeline.
-* `content_config` - (Optional) The ContentConfig object specifies information about the Amazon S3 bucket in which you want Elastic Transcoder to save transcoded files and playlists. (documented below)
-* `content_config_permissions` - (Optional) The permissions for the `content_config` object. (documented below)
-* `input_bucket` - (Required) The Amazon S3 bucket in which you saved the media files that you want to transcode and the graphics that you want to use as watermarks.
-* `name` - (Optional, Forces new resource) The name of the pipeline. Maximum 40 characters
-* `notifications` - (Optional) The Amazon Simple Notification Service (Amazon SNS) topic that you want to notify to report job status. (documented below)
-* `output_bucket` - (Optional) The Amazon S3 bucket in which you want Elastic Transcoder to save the transcoded files.
-* `role` - (Required) The IAM Amazon Resource Name (ARN) for the role that you want Elastic Transcoder to use to transcode jobs for this pipeline.
-* `thumbnail_config` - (Optional) The ThumbnailConfig object specifies information about the Amazon S3 bucket in which you want Elastic Transcoder to save thumbnail files. (documented below)
-* `thumbnail_config_permissions` - (Optional) The permissions for the `thumbnail_config` object. (documented below)
-
-The `content_config` object specifies information about the Amazon S3 bucket in
-which you want Elastic Transcoder to save transcoded files and playlists: which
-bucket to use, and the storage class that you want to assign to the files. If
-you specify values for `content_config`, you must also specify values for
-`thumbnail_config`. If you specify values for `content_config` and
-`thumbnail_config`, omit the `output_bucket` object.
-
-The `content_config` object supports the following:
-
-* `bucket` - The Amazon S3 bucket in which you want Elastic Transcoder to save transcoded files and playlists.
-* `storage_class` - The Amazon S3 storage class, Standard or ReducedRedundancy, that you want Elastic Transcoder to assign to the files and playlists that it stores in your Amazon S3 bucket.
-
-The `content_config_permissions` object supports the following:
-
-* `access` - The permission that you want to give to the AWS user that you specified in `content_config_permissions.grantee`
-* `grantee` - The AWS user or group that you want to have access to transcoded files and playlists.
-* `grantee_type` - Specify the type of value that appears in the `content_config_permissions.grantee` object. Valid values are `Canonical`, `Email` or `Group`.
-
-
-The `notifications` object supports the following:
-
-* `completed` - The topic ARN for the Amazon SNS topic that you want to notify when Elastic Transcoder has finished processing a job in this pipeline.
-* `error` - The topic ARN for the Amazon SNS topic that you want to notify when Elastic Transcoder encounters an error condition while processing a job in this pipeline.
-* `progressing` - The topic ARN for the Amazon Simple Notification Service (Amazon SNS) topic that you want to notify when Elastic Transcoder has started to process a job in this pipeline.
-* `warning` - The topic ARN for the Amazon SNS topic that you want to notify when Elastic Transcoder encounters a warning condition while processing a job in this pipeline.
-
-The `thumbnail_config` object specifies information about the Amazon S3 bucket in
-which you want Elastic Transcoder to save thumbnail files: which bucket to use,
-which users you want to have access to the files, the type of access you want
-users to have, and the storage class that you want to assign to the files. If
-you specify values for `content_config`, you must also specify values for
-`thumbnail_config` even if you don't want to create thumbnails. (You control
-whether to create thumbnails when you create a job. For more information, see
-ThumbnailPattern in the topic Create Job.) If you specify values for
-`content_config` and `thumbnail_config`, omit the OutputBucket object.
-
-The `thumbnail_config` object supports the following:
-
-* `bucket` - The Amazon S3 bucket in which you want Elastic Transcoder to save thumbnail files.
-* `storage_class` - The Amazon S3 storage class, Standard or ReducedRedundancy, that you want Elastic Transcoder to assign to the thumbnails that it stores in your Amazon S3 bucket.
-
-The `thumbnail_config_permissions` object supports the following:
-
-* `access` - The permission that you want to give to the AWS user that you specified in `thumbnail_config_permissions.grantee`.
-* `grantee` - The AWS user or group that you want to have access to thumbnail files.
-* `grantee_type` - Specify the type of value that appears in the `thumbnail_config_permissions.grantee` object.
diff --git a/website/source/docs/providers/aws/r/elastic_transcoder_preset.html.markdown b/website/source/docs/providers/aws/r/elastic_transcoder_preset.html.markdown
deleted file mode 100644
index 930dc8fce..000000000
--- a/website/source/docs/providers/aws/r/elastic_transcoder_preset.html.markdown
+++ /dev/null
@@ -1,160 +0,0 @@
----
-layout: "aws"
-page_title: "AWS: aws_elastictranscoder_preset"
-sidebar_current: "docs-aws-resource-elastic-transcoder-preset"
-description: |-
- Provides an Elastic Transcoder preset resource.
----
-
-# aws\_elastictranscoder\_preset
-
-Provides an Elastic Transcoder preset resource.
-
-## Example Usage
-
-```hcl
-resource "aws_elastictranscoder_preset" "bar" {
- container = "mp4"
- description = "Sample Preset"
- name = "sample_preset"
-
- audio = {
- audio_packing_mode = "SingleTrack"
- bit_rate = 96
- channels = 2
- codec = "AAC"
- sample_rate = 44100
- }
-
- audio_codec_options = {
- profile = "AAC-LC"
- }
-
- video = {
- bit_rate = "1600"
- codec = "H.264"
- display_aspect_ratio = "16:9"
- fixed_gop = "false"
- frame_rate = "auto"
- max_frame_rate = "60"
- keyframes_max_dist = 240
- max_height = "auto"
- max_width = "auto"
- padding_policy = "Pad"
- sizing_policy = "Fit"
- }
-
- video_codec_options = {
- Profile = "main"
- Level = "2.2"
- MaxReferenceFrames = 3
- InterlaceMode = "Progressive"
- ColorSpaceConversionMode = "None"
- }
-
- video_watermarks = {
- id = "Terraform Test"
- max_width = "20%"
- max_height = "20%"
- sizing_policy = "ShrinkToFit"
- horizontal_align = "Right"
- horizontal_offset = "10px"
- vertical_align = "Bottom"
- vertical_offset = "10px"
- opacity = "55.5"
- target = "Content"
- }
-
- thumbnails = {
- format = "png"
- interval = 120
- max_width = "auto"
- max_height = "auto"
- padding_policy = "Pad"
- sizing_policy = "Fit"
- }
-}
-```
-
-## Argument Reference
-
-See ["Create Preset"](http://docs.aws.amazon.com/elastictranscoder/latest/developerguide/create-preset.html) in the AWS docs for reference.
-
-The following arguments are supported:
-
-* `audio` - (Optional, Forces new resource) Audio parameters object (documented below).
-* `audio_codec_options` - (Optional, Forces new resource) Codec options for the audio parameters (documented below)
-* `container` - (Required, Forces new resource) The container type for the output file. Valid values are `flac`, `flv`, `fmp4`, `gif`, `mp3`, `mp4`, `mpg`, `mxf`, `oga`, `ogg`, `ts`, and `webm`.
-* `description` - (Optional, Forces new resource) A description of the preset (maximum 255 characters)
-* `name` - (Optional, Forces new resource) The name of the preset. (maximum 40 characters)
-* `thumbnails` - (Optional, Forces new resource) Thumbnail parameters object (documented below)
-* `video` - (Optional, Forces new resource) Video parameters object (documented below)
-* `video_watermarks` - (Optional, Forces new resource) Watermark parameters for the video parameters (documented below)
-* `video_codec_options` (Optional, Forces new resource) Codec options for the video parameters
-
-The `audio` object supports the following:
-
-* `audio_packing_mode` - The method of organizing audio channels and tracks. Use Audio:Channels to specify the number of channels in your output, and Audio:AudioPackingMode to specify the number of tracks and their relation to the channels. If you do not specify an Audio:AudioPackingMode, Elastic Transcoder uses SingleTrack.
-* `bit_rate` - The bit rate of the audio stream in the output file, in kilobits/second. Enter an integer between 64 and 320, inclusive.
-* `channels` - The number of audio channels in the output file
-* `codec` - The audio codec for the output file. Valid values are `AAC`, `flac`, `mp2`, `mp3`, `pcm`, and `vorbis`.
-* `sample_rate` - The sample rate of the audio stream in the output file, in hertz. Valid values are: `auto`, `22050`, `32000`, `44100`, `48000`, `96000`
-
-The `audio_codec_options` object supports the following:
-
-* `bit_depth` - The bit depth of a sample is how many bits of information are included in the audio samples. Valid values are `16` and `24`. (FLAC/PCM Only)
-* `bit_order` - The order the bits of a PCM sample are stored in. The supported value is LittleEndian. (PCM Only)
-* `profile` - If you specified AAC for Audio:Codec, choose the AAC profile for the output file.
-* `signed` - Whether audio samples are represented with negative and positive numbers (signed) or only positive numbers (unsigned). The supported value is Signed. (PCM Only)
-
-The `thumbnails` object supports the following:
-
-* `aspect_ratio` - The aspect ratio of thumbnails. The following values are valid: auto, 1:1, 4:3, 3:2, 16:9
-* `format` - The format of thumbnails, if any. Valid formats are jpg and png.
-* `interval` - The approximate number of seconds between thumbnails. The value must be an integer. The actual interval can vary by several seconds from one thumbnail to the next.
-* `max_height` - The maximum height of thumbnails, in pixels. If you specify auto, Elastic Transcoder uses 1080 (Full HD) as the default value. If you specify a numeric value, enter an even integer between 32 and 3072, inclusive.
-* `max_width` - The maximum width of thumbnails, in pixels. If you specify auto, Elastic Transcoder uses 1920 (Full HD) as the default value. If you specify a numeric value, enter an even integer between 32 and 4096, inclusive.
-* `padding_policy` - When you set PaddingPolicy to Pad, Elastic Transcoder might add black bars to the top and bottom and/or left and right sides of thumbnails to make the total size of the thumbnails match the values that you specified for thumbnail MaxWidth and MaxHeight settings.
-* `resolution` - The width and height of thumbnail files in pixels, in the format WidthxHeight, where both values are even integers. The values cannot exceed the width and height that you specified in the Video:Resolution object. (To better control resolution and aspect ratio of thumbnails, we recommend that you use the thumbnail values `max_width`, `max_height`, `sizing_policy`, and `padding_policy` instead of `resolution` and `aspect_ratio`. The two groups of settings are mutually exclusive. Do not use them together)
-* `sizing_policy` - A value that controls scaling of thumbnails. Valid values are: `Fit`, `Fill`, `Stretch`, `Keep`, `ShrinkToFit`, and `ShrinkToFill`.
-
-The `video` object supports the following:
-
-* `aspect_ratio` - The display aspect ratio of the video in the output file. Valid values are: `auto`, `1:1`, `4:3`, `3:2`, `16:9`. (Note; to better control resolution and aspect ratio of output videos, we recommend that you use the values `max_width`, `max_height`, `sizing_policy`, `padding_policy`, and `display_aspect_ratio` instead of `resolution` and `aspect_ratio`.)
-* `bit_rate` - The bit rate of the video stream in the output file, in kilobits/second. You can configure variable bit rate or constant bit rate encoding.
-* `codec` - The video codec for the output file. Valid values are `gif`, `H.264`, `mpeg2`, `vp8`, and `vp9`.
-* `display_aspect_ratio` - The value that Elastic Transcoder adds to the metadata in the output file. If you set DisplayAspectRatio to auto, Elastic Transcoder chooses an aspect ratio that ensures square pixels. If you specify another option, Elastic Transcoder sets that value in the output file.
-* `fixed_gop` - Whether to use a fixed value for Video:FixedGOP. Not applicable for containers of type gif. Valid values are true and false.
-* `frame_rate` - The frames per second for the video stream in the output file. The following values are valid: `auto`, `10`, `15`, `23.97`, `24`, `25`, `29.97`, `30`, `50`, `60`.
-* `keyframes_max_dist` - The maximum number of frames between key frames. Not applicable for containers of type gif.
-* `max_frame_rate` - If you specify auto for FrameRate, Elastic Transcoder uses the frame rate of the input video for the frame rate of the output video, up to the maximum frame rate. If you do not specify a MaxFrameRate, Elastic Transcoder will use a default of 30.
-* `max_height` - The maximum height of the output video in pixels. If you specify auto, Elastic Transcoder uses 1080 (Full HD) as the default value. If you specify a numeric value, enter an even integer between 96 and 3072, inclusive.
-* `max_width` - The maximum width of the output video in pixels. If you specify auto, Elastic Transcoder uses 1920 (Full HD) as the default value. If you specify a numeric value, enter an even integer between 128 and 4096, inclusive.
-* `padding_policy` - When you set PaddingPolicy to Pad, Elastic Transcoder might add black bars to the top and bottom and/or left and right sides of the output video to make the total size of the output video match the values that you specified for `max_width` and `max_height`.
-* `resolution` - The width and height of the video in the output file, in pixels. Valid values are `auto` and `widthxheight`. (see note for `aspect_ratio`)
-* `sizing_policy` - A value that controls scaling of the output video. Valid values are: `Fit`, `Fill`, `Stretch`, `Keep`, `ShrinkToFit`, `ShrinkToFill`.
-
-The `video_watermarks` object supports the following:
-
-* `horizontal_align` - The horizontal position of the watermark unless you specify a nonzero value for `horzontal_offset`.
-* `horizontal_offset` - The amount by which you want the horizontal position of the watermark to be offset from the position specified by `horizontal_align`.
-* `id` - A unique identifier for the settings for one watermark. The value of Id can be up to 40 characters long. You can specify settings for up to four watermarks.
-* `max_height` - The maximum height of the watermark.
-* `max_width` - The maximum width of the watermark.
-* `opacity` - A percentage that indicates how much you want a watermark to obscure the video in the location where it appears.
-* `sizing_policy` - A value that controls scaling of the watermark. Valid values are: `Fit`, `Stretch`, `ShrinkToFit`
-* `target` - A value that determines how Elastic Transcoder interprets values that you specified for `video_watermarks.horizontal_offset`, `video_watermarks.vertical_offset`, `video_watermarks.max_width`, and `video_watermarks.max_height`. Valid values are `Content` and `Frame`.
-* `vertical_align` - The vertical position of the watermark unless you specify a nonzero value for `vertical_align`. Valid values are `Top`, `Bottom`, `Center`.
-* `vertical_offset` - The amount by which you want the vertical position of the watermark to be offset from the position specified by `vertical_align`
-
-The `video_codec_options` map supports the following:
-
-* `Profile` - The codec profile that you want to use for the output file. (H.264/VP8 Only)
-* `Level` - The H.264 level that you want to use for the output file. Elastic Transcoder supports the following levels: `1`, `1b`, `1.1`, `1.2`, `1.3`, `2`, `2.1`, `2.2`, `3`, `3.1`, `3.2`, `4`, `4.1` (H.264 only)
-* `MaxReferenceFrames` - The maximum number of previously decoded frames to use as a reference for decoding future frames. Valid values are integers 0 through 16. (H.264 only)
-* `MaxBitRate` - The maximum number of kilobits per second in the output video. Specify a value between 16 and 62,500 inclusive, or `auto`. (Optional, H.264/MPEG2/VP8/VP9 only)
-* `BufferSize` - The maximum number of kilobits in any x seconds of the output video. This window is commonly 10 seconds, the standard segment duration when you're using ts for the container type of the output video. Specify an integer greater than 0. If you specify MaxBitRate and omit BufferSize, Elastic Transcoder sets BufferSize to 10 times the value of MaxBitRate. (Optional, H.264/MPEG2/VP8/VP9 only)
-* `InterlacedMode` - The interlace mode for the output video. (Optional, H.264/MPEG2 Only)
-* `ColorSpaceConversion` - The color space conversion Elastic Transcoder applies to the output video. Valid values are `None`, `Bt709toBt601`, `Bt601toBt709`, and `Auto`. (Optional, H.264/MPEG2 Only)
-* `ChromaSubsampling` - The sampling pattern for the chroma (color) channels of the output video. Valid values are `yuv420p` and `yuv422p`.
-* `LoopCount` - The number of times you want the output gif to loop (Gif only)
diff --git a/website/source/docs/providers/aws/r/elasticache_cluster.html.markdown b/website/source/docs/providers/aws/r/elasticache_cluster.html.markdown
deleted file mode 100644
index d9c5637a0..000000000
--- a/website/source/docs/providers/aws/r/elasticache_cluster.html.markdown
+++ /dev/null
@@ -1,133 +0,0 @@
----
-layout: "aws"
-page_title: "AWS: aws_elasticache_cluster"
-sidebar_current: "docs-aws-resource-elasticache-cluster"
-description: |-
- Provides an ElastiCache Cluster resource.
----
-
-# aws\_elasticache\_cluster
-
-Provides an ElastiCache Cluster resource.
-
-Changes to a Cache Cluster can occur when you manually change a
-parameter, such as `node_type`, and are reflected in the next maintenance
-window. Because of this, Terraform may report a difference in its planning
-phase because a modification has not yet taken place. You can use the
-`apply_immediately` flag to instruct the service to apply the change immediately
-(see documentation below).
-
-~> **Note:** using `apply_immediately` can result in a
-brief downtime as the server reboots. See the AWS Docs on
-[Modifying an ElastiCache Cache Cluster][2] for more information.
-
-## Example Usage
-
-```hcl
-resource "aws_elasticache_cluster" "bar" {
- cluster_id = "cluster-example"
- engine = "memcached"
- node_type = "cache.t2.micro"
- port = 11211
- num_cache_nodes = 1
- parameter_group_name = "default.memcached1.4"
-}
-```
-
-## Argument Reference
-
-The following arguments are supported:
-
-* `cluster_id` – (Required) Group identifier. ElastiCache converts
- this name to lowercase
-
-* `engine` – (Required) Name of the cache engine to be used for this cache cluster.
- Valid values for this parameter are `memcached` or `redis`
-
-* `engine_version` – (Optional) Version number of the cache engine to be used.
-See [Selecting a Cache Engine and Version](https://docs.aws.amazon.com/AmazonElastiCache/latest/UserGuide/SelectEngine.html)
-in the AWS Documentation center for supported versions
-
-* `maintenance_window` – (Optional) Specifies the weekly time range for when maintenance
-on the cache cluster is performed. The format is `ddd:hh24:mi-ddd:hh24:mi` (24H Clock UTC).
-The minimum maintenance window is a 60 minute period. Example: `sun:05:00-sun:09:00`
-
-* `node_type` – (Required) The compute and memory capacity of the nodes. See
-[Available Cache Node Types](https://aws.amazon.com/elasticache/details#Available_Cache_Node_Types) for
-supported node types
-
-* `num_cache_nodes` – (Required) The initial number of cache nodes that the
-cache cluster will have. For Redis, this value must be 1. For Memcache, this
-value must be between 1 and 20. If this number is reduced on subsequent runs,
-the highest numbered nodes will be removed.
-
-* `parameter_group_name` – (Required) Name of the parameter group to associate
-with this cache cluster
-
-* `port` – (Required) The port number on which each of the cache nodes will
-accept connections. For Memcache the default is 11211, and for Redis the default port is 6379.
-
-* `subnet_group_name` – (Optional, VPC only) Name of the subnet group to be used
-for the cache cluster.
-
-* `security_group_names` – (Optional, EC2 Classic only) List of security group
-names to associate with this cache cluster
-
-* `security_group_ids` – (Optional, VPC only) One or more VPC security groups associated
- with the cache cluster
-
-* `apply_immediately` - (Optional) Specifies whether any database modifications
- are applied immediately, or during the next maintenance window. Default is
- `false`. See [Amazon ElastiCache Documentation for more information.][1]
- (Available since v0.6.0)
-
-* `snapshot_arns` – (Optional) A single-element string list containing an
-Amazon Resource Name (ARN) of a Redis RDB snapshot file stored in Amazon S3.
-Example: `arn:aws:s3:::my_bucket/snapshot1.rdb`
-
-* `snapshot_name` - (Optional) The name of a snapshot from which to restore data into the new node group. Changing the `snapshot_name` forces a new resource.
-
-* `snapshot_window` - (Optional, Redis only) The daily time range (in UTC) during which ElastiCache will
-begin taking a daily snapshot of your cache cluster. Example: 05:00-09:00
-
-* `snapshot_retention_limit` - (Optional, Redis only) The number of days for which ElastiCache will
-retain automatic cache cluster snapshots before deleting them. For example, if you set
-SnapshotRetentionLimit to 5, then a snapshot that was taken today will be retained for 5 days
-before being deleted. If the value of SnapshotRetentionLimit is set to zero (0), backups are turned off.
-Please note that setting a `snapshot_retention_limit` is not supported on cache.t1.micro or cache.t2.* cache nodes
-
-* `notification_topic_arn` – (Optional) An Amazon Resource Name (ARN) of an
-SNS topic to send ElastiCache notifications to. Example:
-`arn:aws:sns:us-east-1:012345678999:my_sns_topic`
-
-* `az_mode` - (Optional, Memcached only) Specifies whether the nodes in this Memcached node group are created in a single Availability Zone or created across multiple Availability Zones in the cluster's region. Valid values for this parameter are `single-az` or `cross-az`, default is `single-az`. If you want to choose `cross-az`, `num_cache_nodes` must be greater than `1`
-
-* `availability_zone` - (Optional) The Availability Zone for the cache cluster. If you want to create cache nodes in multi-az, use `availability_zones`
-
-* `availability_zones` - (Optional, Memcached only) List of Availability Zones in which the cache nodes will be created. If you want to create cache nodes in single-az, use `availability_zone`
-
-* `tags` - (Optional) A mapping of tags to assign to the resource
-
-~> **NOTE:** Snapshotting functionality is not compatible with t2 instance types.
-
-## Attributes Reference
-
-The following attributes are exported:
-
-* `cache_nodes` - List of node objects including `id`, `address`, `port` and `availability_zone`.
- Referenceable e.g. as `${aws_elasticache_cluster.bar.cache_nodes.0.address}`
-
-* `configuration_endpoint` - (Memcached only) The configuration endpoint to allow host discovery.
-* `cluster_address` - (Memcached only) The DNS name of the cache cluster without the port appended.
-
-[1]: https://docs.aws.amazon.com/AmazonElastiCache/latest/APIReference/API_ModifyCacheCluster.html
-[2]: https://docs.aws.amazon.com/AmazonElastiCache/latest/UserGuide/Clusters.Modify.html
-
-
-## Import
-
-ElastiCache Clusters can be imported using the `cluster_id`, e.g.
-
-```
-$ terraform import aws_elasticache_cluster.my_cluster my_cluster
-```
diff --git a/website/source/docs/providers/aws/r/elasticache_parameter_group.html.markdown b/website/source/docs/providers/aws/r/elasticache_parameter_group.html.markdown
deleted file mode 100644
index b322aaa12..000000000
--- a/website/source/docs/providers/aws/r/elasticache_parameter_group.html.markdown
+++ /dev/null
@@ -1,57 +0,0 @@
----
-layout: "aws"
-page_title: "AWS: aws_elasticache_parameter_group"
-sidebar_current: "docs-aws-resource-elasticache-parameter-group"
----
-
-# aws\_elasticache\_parameter\_group
-
-Provides an ElastiCache parameter group resource.
-
-## Example Usage
-
-```hcl
-resource "aws_elasticache_parameter_group" "default" {
- name = "cache-params"
- family = "redis2.8"
-
- parameter {
- name = "activerehashing"
- value = "yes"
- }
-
- parameter {
- name = "min-slaves-to-write"
- value = "2"
- }
-}
-```
-
-## Argument Reference
-
-The following arguments are supported:
-
-* `name` - (Required) The name of the ElastiCache parameter group.
-* `family` - (Required) The family of the ElastiCache parameter group.
-* `description` - (Optional) The description of the ElastiCache parameter group. Defaults to "Managed by Terraform".
-* `parameter` - (Optional) A list of ElastiCache parameters to apply.
-
-Parameter blocks support the following:
-
-* `name` - (Required) The name of the ElastiCache parameter.
-* `value` - (Required) The value of the ElastiCache parameter.
-
-## Attributes Reference
-
-The following attributes are exported:
-
-* `id` - The ElastiCache parameter group name.
-
-
-## Import
-
-ElastiCache Parameter Groups can be imported using the `name`, e.g.
-
-```
-$ terraform import aws_elasticache_parameter_group.default redis-params
-```
\ No newline at end of file
diff --git a/website/source/docs/providers/aws/r/elasticache_replication_group.html.markdown b/website/source/docs/providers/aws/r/elasticache_replication_group.html.markdown
deleted file mode 100644
index c2175b490..000000000
--- a/website/source/docs/providers/aws/r/elasticache_replication_group.html.markdown
+++ /dev/null
@@ -1,112 +0,0 @@
----
-layout: "aws"
-page_title: "AWS: aws_elasticache_replication_group"
-sidebar_current: "docs-aws-resource-elasticache-replication-group"
-description: |-
- Provides an ElastiCache Replication Group resource.
----
-
-# aws\_elasticache\_replication\_group
-
-Provides an ElastiCache Replication Group resource.
-
-## Example Usage
-
-### Redis Master with One Replica
-
-```hcl
-resource "aws_elasticache_replication_group" "bar" {
- replication_group_id = "tf-rep-group-1"
- replication_group_description = "test description"
- node_type = "cache.m1.small"
- number_cache_clusters = 2
- port = 6379
- parameter_group_name = "default.redis3.2"
- availability_zones = ["us-west-2a", "us-west-2b"]
- automatic_failover_enabled = true
-}
-```
-
-### Native Redis Cluser 2 Masters 2 Replicas
-
-```hcl
-resource "aws_elasticache_replication_group" "baz" {
- replication_group_id = "tf-redis-cluster"
- replication_group_description = "test description"
- node_type = "cache.m1.small"
- port = 6379
- parameter_group_name = "default.redis3.2.cluster.on"
- automatic_failover_enabled = true
- cluster_mode {
- replicas_per_node_group = 1
- num_node_groups = 2
- }
-}
-```
-
-~> **Note:** We currently do not support passing a `primary_cluster_id` in order to create the Replication Group.
-
-~> **Note:** Automatic Failover is unavailable for Redis versions earlier than 2.8.6,
-and unavailable on T1 and T2 node types. See the [Amazon Replication with
-Redis](http://docs.aws.amazon.com/en_en/AmazonElastiCache/latest/UserGuide/Replication.html) guide
-for full details on using Replication Groups.
-
-## Argument Reference
-
-The following arguments are supported:
-
-* `replication_group_id` – (Required) The replication group identifier. This parameter is stored as a lowercase string.
-* `replication_group_description` – (Required) A user-created description for the replication group.
-* `number_cache_clusters` - (Required) The number of cache clusters this replication group will have.
- If Multi-AZ is enabled , the value of this parameter must be at least 2. Changing this number will force a new resource
-* `node_type` - (Required) The compute and memory capacity of the nodes in the node group.
-* `automatic_failover_enabled` - (Optional) Specifies whether a read-only replica will be automatically promoted to read/write primary if the existing primary fails. Defaults to `false`.
-* `auto_minor_version_upgrade` - (Optional) Specifies whether a minor engine upgrades will be applied automatically to the underlying Cache Cluster instances during the maintenance window. Defaults to `true`.
-* `availability_zones` - (Optional) A list of EC2 availability zones in which the replication group's cache clusters will be created. The order of the availability zones in the list is not important.
-* `engine_version` - (Optional) The version number of the cache engine to be used for the cache clusters in this replication group.
-* `parameter_group_name` - (Optional) The name of the parameter group to associate with this replication group. If this argument is omitted, the default cache parameter group for the specified engine is used.
-* `port` – (Required) The port number on which each of the cache nodes will accept connections. For Memcache the default is 11211, and for Redis the default port is 6379.
-* `subnet_group_name` - (Optional) The name of the cache subnet group to be used for the replication group.
-* `security_group_names` - (Optional) A list of cache security group names to associate with this replication group.
-* `security_group_ids` - (Optional) One or more Amazon VPC security groups associated with this replication group. Use this parameter only when you are creating a replication group in an Amazon Virtual Private Cloud
-* `snapshot_arns` – (Optional) A single-element string list containing an
-Amazon Resource Name (ARN) of a Redis RDB snapshot file stored in Amazon S3.
-Example: `arn:aws:s3:::my_bucket/snapshot1.rdb`
-* `snapshot_name` - (Optional) The name of a snapshot from which to restore data into the new node group. Changing the `snapshot_name` forces a new resource.
-* `maintenance_window` – (Optional) Specifies the weekly time range for when maintenance
-on the cache cluster is performed. The format is `ddd:hh24:mi-ddd:hh24:mi` (24H Clock UTC).
-The minimum maintenance window is a 60 minute period. Example: `sun:05:00-sun:09:00`
-* `notification_topic_arn` – (Optional) An Amazon Resource Name (ARN) of an
-SNS topic to send ElastiCache notifications to. Example:
-`arn:aws:sns:us-east-1:012345678999:my_sns_topic`
-* `snapshot_window` - (Optional, Redis only) The daily time range (in UTC) during which ElastiCache will
-begin taking a daily snapshot of your cache cluster. Example: 05:00-09:00
-* `snapshot_retention_limit` - (Optional, Redis only) The number of days for which ElastiCache will
-retain automatic cache cluster snapshots before deleting them. For example, if you set
-SnapshotRetentionLimit to 5, then a snapshot that was taken today will be retained for 5 days
-before being deleted. If the value of SnapshotRetentionLimit is set to zero (0), backups are turned off.
-Please note that setting a `snapshot_retention_limit` is not supported on cache.t1.micro or cache.t2.* cache nodes
-* `apply_immediately` - (Optional) Specifies whether any modifications are applied immediately, or during the next maintenance window. Default is `false`.
-* `tags` - (Optional) A mapping of tags to assign to the resource
-* `cluster_mode` - (Optional) Create a native redis cluster. `automatic_failover_enabled` must be set to true. Cluster Mode documented below. Only 1 `cluster_mode` block is allowed.
-
-Cluster Mode (`cluster_mode`) supports the following:
-
-* `replicas_per_node_group` - (Required) Specify the number of replica nodes in each node group. Valid values are 0 to 5. Changing this number will force a new resource.
-* `num_node_groups` - (Required) Specify the number of node groups (shards) for this Redis replication group. Changing this number will force a new resource.
-
-## Attributes Reference
-
-The following attributes are exported:
-
-* `id` - The ID of the ElastiCache Replication Group.
-* `primary_endpoint_address` - The address of the endpoint for the primary node in the replication group. If Redis, only present when cluster mode is disabled.
-* `configuration_endpoint_address` - (Redis only) The address of the replication group configuration endpoint when cluster mode is enabled.
-
-## Import
-
-ElastiCache Replication Groups can be imported using the `replication_group_id`, e.g.
-
-```
-$ terraform import aws_elasticache_replication_group.my_replication_group replication-group-1
-```
diff --git a/website/source/docs/providers/aws/r/elasticache_security_group.html.markdown b/website/source/docs/providers/aws/r/elasticache_security_group.html.markdown
deleted file mode 100644
index 08b0c2d2d..000000000
--- a/website/source/docs/providers/aws/r/elasticache_security_group.html.markdown
+++ /dev/null
@@ -1,47 +0,0 @@
----
-layout: "aws"
-page_title: "AWS: aws_elasticache_security_group"
-sidebar_current: "docs-aws-resource-elasticache-security-group"
-description: |-
- Provides an ElastiCache Security Group to control access to one or more cache clusters.
----
-
-# aws\_elasticache\_security\_group
-
-Provides an ElastiCache Security Group to control access to one or more cache
-clusters.
-
-~> **NOTE:** ElastiCache Security Groups are for use only when working with an
-ElastiCache cluster **outside** of a VPC. If you are using a VPC, see the
-[ElastiCache Subnet Group resource](elasticache_subnet_group.html).
-
-## Example Usage
-
-```hcl
-resource "aws_security_group" "bar" {
- name = "security-group"
-}
-
-resource "aws_elasticache_security_group" "bar" {
- name = "elasticache-security-group"
- security_group_names = ["${aws_security_group.bar.name}"]
-}
-```
-
-## Argument Reference
-
-The following arguments are supported:
-
-* `name` – (Required) Name for the cache security group. This value is stored as a lowercase string.
-* `description` – (Optional) description for the cache security group. Defaults to "Managed by Terraform".
-* `security_group_names` – (Required) List of EC2 security group names to be
-authorized for ingress to the cache security group
-
-
-## Attributes Reference
-
-The following attributes are exported:
-
-* `description`
-* `name`
-* `security_group_names`
diff --git a/website/source/docs/providers/aws/r/elasticache_subnet_group.html.markdown b/website/source/docs/providers/aws/r/elasticache_subnet_group.html.markdown
deleted file mode 100644
index dbcc85980..000000000
--- a/website/source/docs/providers/aws/r/elasticache_subnet_group.html.markdown
+++ /dev/null
@@ -1,67 +0,0 @@
----
-layout: "aws"
-page_title: "AWS: aws_elasticache_subnet_group"
-sidebar_current: "docs-aws-resource-elasticache-subnet-group"
-description: |-
- Provides an ElastiCache Subnet Group resource.
----
-
-# aws\_elasticache\_subnet\_group
-
-Provides an ElastiCache Subnet Group resource.
-
-~> **NOTE:** ElastiCache Subnet Groups are only for use when working with an
-ElastiCache cluster **inside** of a VPC. If you are on EC2 Classic, see the
-[ElastiCache Security Group resource](elasticache_security_group.html).
-
-## Example Usage
-
-```hcl
-resource "aws_vpc" "foo" {
- cidr_block = "10.0.0.0/16"
-
- tags {
- Name = "tf-test"
- }
-}
-
-resource "aws_subnet" "foo" {
- vpc_id = "${aws_vpc.foo.id}"
- cidr_block = "10.0.0.0/24"
- availability_zone = "us-west-2a"
-
- tags {
- Name = "tf-test"
- }
-}
-
-resource "aws_elasticache_subnet_group" "bar" {
- name = "tf-test-cache-subnet"
- subnet_ids = ["${aws_subnet.foo.id}"]
-}
-```
-
-## Argument Reference
-
-The following arguments are supported:
-
-* `name` – (Required) Name for the cache subnet group. Elasticache converts this name to lowercase.
-* `description` – (Optional) Description for the cache subnet group. Defaults to "Managed by Terraform".
-* `subnet_ids` – (Required) List of VPC Subnet IDs for the cache subnet group
-
-## Attributes Reference
-
-The following attributes are exported:
-
-* `description`
-* `name`
-* `subnet_ids`
-
-
-## Import
-
-ElastiCache Subnet Groups can be imported using the `name`, e.g.
-
-```
-$ terraform import aws_elasticache_subnet_group.bar tf-test-cache-subnet
-```
\ No newline at end of file
diff --git a/website/source/docs/providers/aws/r/elasticsearch_domain.html.markdown b/website/source/docs/providers/aws/r/elasticsearch_domain.html.markdown
deleted file mode 100644
index 7ee34d9ef..000000000
--- a/website/source/docs/providers/aws/r/elasticsearch_domain.html.markdown
+++ /dev/null
@@ -1,103 +0,0 @@
----
-layout: "aws"
-page_title: "AWS: aws_elasticsearch_domain"
-sidebar_current: "docs-aws-resource-elasticsearch-domain"
-description: |-
- Provides an ElasticSearch Domain.
----
-
-# aws\_elasticsearch\_domain
-
-
-## Example Usage
-
-```hcl
-resource "aws_elasticsearch_domain" "es" {
- domain_name = "tf-test"
- elasticsearch_version = "1.5"
- cluster_config {
- instance_type = "r3.large.elasticsearch"
- }
-
- advanced_options {
- "rest.action.multi.allow_explicit_index" = "true"
- }
-
- access_policies = < **NOTE on ELB Instances and ELB Attachments:** Terraform currently
-provides both a standalone [ELB Attachment resource](elb_attachment.html)
-(describing an instance attached to an ELB), and an ELB resource with
-`instances` defined in-line. At this time you cannot use an ELB with in-line
-instances in conjunction with a ELB Attachment resources. Doing so will cause a
-conflict and will overwrite attachments.
-
-## Example Usage
-
-```hcl
-# Create a new load balancer
-resource "aws_elb" "bar" {
- name = "foobar-terraform-elb"
- availability_zones = ["us-west-2a", "us-west-2b", "us-west-2c"]
-
- access_logs {
- bucket = "foo"
- bucket_prefix = "bar"
- interval = 60
- }
-
- listener {
- instance_port = 8000
- instance_protocol = "http"
- lb_port = 80
- lb_protocol = "http"
- }
-
- listener {
- instance_port = 8000
- instance_protocol = "http"
- lb_port = 443
- lb_protocol = "https"
- ssl_certificate_id = "arn:aws:iam::123456789012:server-certificate/certName"
- }
-
- health_check {
- healthy_threshold = 2
- unhealthy_threshold = 2
- timeout = 3
- target = "HTTP:8000/"
- interval = 30
- }
-
- instances = ["${aws_instance.foo.id}"]
- cross_zone_load_balancing = true
- idle_timeout = 400
- connection_draining = true
- connection_draining_timeout = 400
-
- tags {
- Name = "foobar-terraform-elb"
- }
-}
-```
-
-## Argument Reference
-
-The following arguments are supported:
-
-* `name` - (Optional) The name of the ELB. By default generated by Terraform.
-* `name_prefix` - (Optional, Forces new resource) Creates a unique name beginning with the specified
- prefix. Conflicts with `name`.
-* `access_logs` - (Optional) An Access Logs block. Access Logs documented below.
-* `availability_zones` - (Required for an EC2-classic ELB) The AZ's to serve traffic in.
-* `security_groups` - (Optional) A list of security group IDs to assign to the ELB.
- Only valid if creating an ELB within a VPC
-* `subnets` - (Required for a VPC ELB) A list of subnet IDs to attach to the ELB.
-* `instances` - (Optional) A list of instance ids to place in the ELB pool.
-* `internal` - (Optional) If true, ELB will be an internal ELB.
-* `listener` - (Required) A list of listener blocks. Listeners documented below.
-* `health_check` - (Optional) A health_check block. Health Check documented below.
-* `cross_zone_load_balancing` - (Optional) Enable cross-zone load balancing. Default: `true`
-* `idle_timeout` - (Optional) The time in seconds that the connection is allowed to be idle. Default: `60`
-* `connection_draining` - (Optional) Boolean to enable connection draining. Default: `false`
-* `connection_draining_timeout` - (Optional) The time in seconds to allow for connections to drain. Default: `300`
-* `tags` - (Optional) A mapping of tags to assign to the resource.
-
-Exactly one of `availability_zones` or `subnets` must be specified: this
-determines if the ELB exists in a VPC or in EC2-classic.
-
-Access Logs (`access_logs`) support the following:
-
-* `bucket` - (Required) The S3 bucket name to store the logs in.
-* `bucket_prefix` - (Optional) The S3 bucket prefix. Logs are stored in the root if not configured.
-* `interval` - (Optional) The publishing interval in minutes. Default: 60 minutes.
-* `enabled` - (Optional) Boolean to enable / disable `access_logs`. Default is `true`
-
-Listeners (`listener`) support the following:
-
-* `instance_port` - (Required) The port on the instance to route to
-* `instance_protocol` - (Required) The protocol to use to the instance. Valid
- values are `HTTP`, `HTTPS`, `TCP`, or `SSL`
-* `lb_port` - (Required) The port to listen on for the load balancer
-* `lb_protocol` - (Required) The protocol to listen on. Valid values are `HTTP`,
- `HTTPS`, `TCP`, or `SSL`
-* `ssl_certificate_id` - (Optional) The ARN of an SSL certificate you have
-uploaded to AWS IAM. **Note ECDSA-specific restrictions below. Only valid when `lb_protocol` is either HTTPS or SSL**
-
-Health Check (`health_check`) supports the following:
-
-* `healthy_threshold` - (Required) The number of checks before the instance is declared healthy.
-* `unhealthy_threshold` - (Required) The number of checks before the instance is declared unhealthy.
-* `target` - (Required) The target of the check. Valid pattern is "${PROTOCOL}:${PORT}${PATH}", where PROTOCOL
- values are:
- * `HTTP`, `HTTPS` - PORT and PATH are required
- * `TCP`, `SSL` - PORT is required, PATH is not supported
-* `interval` - (Required) The interval between checks.
-* `timeout` - (Required) The length of time before the check times out.
-
-## Note on ECDSA Key Algorithm
-
-If the ARN of the `ssl_certificate_id` that is pointed to references a
-certificate that was signed by an ECDSA key, note that ELB only supports the
-P256 and P384 curves. Using a certificate signed by a key using a different
-curve could produce the error `ERR_SSL_VERSION_OR_CIPHER_MISMATCH` in your
-browser.
-
-## Attributes Reference
-
-The following attributes are exported:
-
-* `id` - The name of the ELB
-* `name` - The name of the ELB
-* `dns_name` - The DNS name of the ELB
-* `instances` - The list of instances in the ELB
-* `source_security_group` - The name of the security group that you can use as
- part of your inbound rules for your load balancer's back-end application
- instances. Use this for Classic or Default VPC only.
-* `source_security_group_id` - The ID of the security group that you can use as
- part of your inbound rules for your load balancer's back-end application
- instances. Only available on ELBs launched in a VPC.
-* `zone_id` - The canonical hosted zone ID of the ELB (to be used in a Route 53 Alias record)
-
-## Import
-
-ELBs can be imported using the `name`, e.g.
-
-```
-$ terraform import aws_elb.bar elb-production-12345
-```
diff --git a/website/source/docs/providers/aws/r/elb_attachment.html.markdown b/website/source/docs/providers/aws/r/elb_attachment.html.markdown
deleted file mode 100644
index 6ac433b31..000000000
--- a/website/source/docs/providers/aws/r/elb_attachment.html.markdown
+++ /dev/null
@@ -1,34 +0,0 @@
----
-layout: "aws"
-page_title: "AWS: aws_elb_attachment"
-sidebar_current: "docs-aws-resource-elb-attachment"
-description: |-
- Provides an Elastic Load Balancer Attachment resource.
----
-
-# aws\_elb\_attachment
-
-Provides an Elastic Load Balancer Attachment resource.
-
-~> **NOTE on ELB Instances and ELB Attachments:** Terraform currently provides
-both a standalone ELB Attachment resource (describing an instance attached to
-an ELB), and an [Elastic Load Balancer resource](elb.html) with
-`instances` defined in-line. At this time you cannot use an ELB with in-line
-instances in conjunction with an ELB Attachment resource. Doing so will cause a
-conflict and will overwrite attachments.
-## Example Usage
-
-```hcl
-# Create a new load balancer attachment
-resource "aws_elb_attachment" "baz" {
- elb = "${aws_elb.bar.id}"
- instance = "${aws_instance.foo.id}"
-}
-```
-
-## Argument Reference
-
-The following arguments are supported:
-
-* `elb` - (Required) The name of the ELB.
-* `instance` - (Required) Instance ID to place in the ELB pool.
diff --git a/website/source/docs/providers/aws/r/emr_cluster.html.md b/website/source/docs/providers/aws/r/emr_cluster.html.md
deleted file mode 100644
index dd2beb0ff..000000000
--- a/website/source/docs/providers/aws/r/emr_cluster.html.md
+++ /dev/null
@@ -1,404 +0,0 @@
----
-layout: "aws"
-page_title: "AWS: aws_emr_cluster"
-sidebar_current: "docs-aws-resource-emr-cluster"
-description: |-
- Provides an Elastic MapReduce Cluster
----
-
-# aws\_emr\_cluster
-
-Provides an Elastic MapReduce Cluster, a web service that makes it easy to
-process large amounts of data efficiently. See [Amazon Elastic MapReduce Documentation](https://aws.amazon.com/documentation/elastic-mapreduce/)
-for more information.
-
-## Example Usage
-
-```hcl
-resource "aws_emr_cluster" "emr-test-cluster" {
- name = "emr-test-arn"
- release_label = "emr-4.6.0"
- applications = ["Spark"]
-
- termination_protection = false
- keep_job_flow_alive_when_no_steps = true
-
- ec2_attributes {
- subnet_id = "${aws_subnet.main.id}"
- emr_managed_master_security_group = "${aws_security_group.sg.id}"
- emr_managed_slave_security_group = "${aws_security_group.sg.id}"
- instance_profile = "${aws_iam_instance_profile.emr_profile.arn}"
- }
-
- master_instance_type = "m3.xlarge"
- core_instance_type = "m3.xlarge"
- core_instance_count = 1
-
- tags {
- role = "rolename"
- env = "env"
- }
-
- bootstrap_action {
- path = "s3://elasticmapreduce/bootstrap-actions/run-if"
- name = "runif"
- args = ["instance.isMaster=true", "echo running on master node"]
- }
-
- configurations = "test-fixtures/emr_configurations.json"
-
- service_role = "${aws_iam_role.iam_emr_service_role.arn}"
-}
-```
-
-The `aws_emr_cluster` resource typically requires two IAM roles, one for the EMR Cluster
-to use as a service, and another to place on your Cluster Instances to interact
-with AWS from those instances. The suggested role policy template for the EMR service is `AmazonElasticMapReduceRole`,
-and `AmazonElasticMapReduceforEC2Role` for the EC2 profile. See the [Getting
-Started](https://docs.aws.amazon.com/ElasticMapReduce/latest/ManagementGuide/emr-gs-launch-sample-cluster.html)
-guide for more information on these IAM roles. There is also a fully-bootable
-example Terraform configuration at the bottom of this page.
-
-## Argument Reference
-
-The following arguments are supported:
-
-* `name` - (Required) The name of the job flow
-* `release_label` - (Required) The release label for the Amazon EMR release
-* `master_instance_type` - (Required) The EC2 instance type of the master node
-* `service_role` - (Required) IAM role that will be assumed by the Amazon EMR service to access AWS resources
-* `security_configuration` - (Optional) The security configuration name to attach to the EMR cluster. Only valid for EMR clusters with `release_label` 4.8.0 or greater
-* `core_instance_type` - (Optional) The EC2 instance type of the slave nodes
-* `core_instance_count` - (Optional) Number of Amazon EC2 instances used to execute the job flow. EMR will use one node as the cluster's master node and use the remainder of the nodes (`core_instance_count`-1) as core nodes. Default `1`
-* `log_uri` - (Optional) S3 bucket to write the log files of the job flow. If a value
- is not provided, logs are not created
-* `applications` - (Optional) A list of applications for the cluster. Valid values are: `Flink`, `Hadoop`, `Hive`, `Mahout`, `Pig`, and `Spark`. Case insensitive
-* `termination_protection` - (Optional) Switch on/off termination protection (default is off)
-* `keep_job_flow_alive_when_no_steps` - (Optional) Switch on/off run cluster with no steps or when all steps are complete (default is on)
-* `ec2_attributes` - (Optional) Attributes for the EC2 instances running the job
-flow. Defined below
-* `bootstrap_action` - (Optional) List of bootstrap actions that will be run before Hadoop is started on
- the cluster nodes. Defined below
-* `configurations` - (Optional) List of configurations supplied for the EMR cluster you are creating
-* `visible_to_all_users` - (Optional) Whether the job flow is visible to all IAM users of the AWS account associated with the job flow. Default `true`
-* `autoscaling_role` - (Optional) An IAM role for automatic scaling policies. The IAM role provides permissions that the automatic scaling feature requires to launch and terminate EC2 instances in an instance group.
-* `tags` - (Optional) list of tags to apply to the EMR Cluster
-
-
-## ec2\_attributes
-
-Attributes for the Amazon EC2 instances running the job flow
-
-* `key_name` - (Optional) Amazon EC2 key pair that can be used to ssh to the master
- node as the user called `hadoop`
-* `subnet_id` - (Optional) VPC subnet id where you want the job flow to launch.
-Cannot specify the `cc1.4xlarge` instance type for nodes of a job flow launched in a Amazon VPC
-* `additional_master_security_groups` - (Optional) List of additional Amazon EC2 security group IDs for the master node
-* `additional_slave_security_groups` - (Optional) List of additional Amazon EC2 security group IDs for the slave nodes
-* `emr_managed_master_security_group` - (Optional) Identifier of the Amazon EC2 security group for the master node
-* `emr_managed_slave_security_group` - (Optional) Identifier of the Amazon EC2 security group for the slave nodes
-* `service_access_security_group` - (Optional) Identifier of the Amazon EC2 service-access security group - required when the cluster runs on a private subnet
-* `instance_profile` - (Required) Instance Profile for EC2 instances of the cluster assume this role
-
-
-## bootstrap\_action
-
-* `name` - (Required) Name of the bootstrap action
-* `path` - (Required) Location of the script to run during a bootstrap action. Can be either a location in Amazon S3 or on a local file system
-* `args` - (Optional) List of command line arguments to pass to the bootstrap action script
-
-## Attributes Reference
-
-The following attributes are exported:
-
-* `id` - The ID of the EMR Cluster
-* `name` - The name of the cluster.
-* `release_label` - The release label for the Amazon EMR release.
-* `master_instance_type` - The EC2 instance type of the master node.
-* `master_public_dns` - The public DNS name of the master EC2 instance.
-* `core_instance_type` - The EC2 instance type of the slave nodes.
-* `core_instance_count` The number of slave nodes, i.e. EC2 instance nodes.
-* `log_uri` - The path to the Amazon S3 location where logs for this cluster are stored.
-* `applications` - The applications installed on this cluster.
-* `ec2_attributes` - Provides information about the EC2 instances in a cluster grouped by category: key name, subnet ID, IAM instance profile, and so on.
-* `bootstrap_action` - A list of bootstrap actions that will be run before Hadoop is started on the cluster nodes.
-* `configurations` - The list of Configurations supplied to the EMR cluster.
-* `service_role` - The IAM role that will be assumed by the Amazon EMR service to access AWS resources on your behalf.
-* `visible_to_all_users` - Indicates whether the job flow is visible to all IAM users of the AWS account associated with the job flow.
-* `tags` - The list of tags associated with a cluster.
-
-
-## Example bootable config
-
-**NOTE:** This configuration demonstrates a minimal configuration needed to
-boot an example EMR Cluster. It is not meant to display best practices. Please
-use at your own risk.
-
-
-```
-provider "aws" {
- region = "us-west-2"
-}
-
-resource "aws_emr_cluster" "tf-test-cluster" {
- name = "emr-test-arn"
- release_label = "emr-4.6.0"
- applications = ["Spark"]
-
- ec2_attributes {
- subnet_id = "${aws_subnet.main.id}"
- emr_managed_master_security_group = "${aws_security_group.allow_all.id}"
- emr_managed_slave_security_group = "${aws_security_group.allow_all.id}"
- instance_profile = "${aws_iam_instance_profile.emr_profile.arn}"
- }
-
- master_instance_type = "m3.xlarge"
- core_instance_type = "m3.xlarge"
- core_instance_count = 1
-
- tags {
- role = "rolename"
- dns_zone = "env_zone"
- env = "env"
- name = "name-env"
- }
-
- bootstrap_action {
- path = "s3://elasticmapreduce/bootstrap-actions/run-if"
- name = "runif"
- args = ["instance.isMaster=true", "echo running on master node"]
- }
-
- configurations = "test-fixtures/emr_configurations.json"
-
- service_role = "${aws_iam_role.iam_emr_service_role.arn}"
-}
-
-resource "aws_security_group" "allow_all" {
- name = "allow_all"
- description = "Allow all inbound traffic"
- vpc_id = "${aws_vpc.main.id}"
-
- ingress {
- from_port = 0
- to_port = 0
- protocol = "-1"
- cidr_blocks = ["0.0.0.0/0"]
- }
-
- egress {
- from_port = 0
- to_port = 0
- protocol = "-1"
- cidr_blocks = ["0.0.0.0/0"]
- }
-
- depends_on = ["aws_subnet.main"]
-
- lifecycle {
- ignore_changes = ["ingress", "egress"]
- }
-
- tags {
- name = "emr_test"
- }
-}
-
-resource "aws_vpc" "main" {
- cidr_block = "168.31.0.0/16"
- enable_dns_hostnames = true
-
- tags {
- name = "emr_test"
- }
-}
-
-resource "aws_subnet" "main" {
- vpc_id = "${aws_vpc.main.id}"
- cidr_block = "168.31.0.0/20"
-
- tags {
- name = "emr_test"
- }
-}
-
-resource "aws_internet_gateway" "gw" {
- vpc_id = "${aws_vpc.main.id}"
-}
-
-resource "aws_route_table" "r" {
- vpc_id = "${aws_vpc.main.id}"
-
- route {
- cidr_block = "0.0.0.0/0"
- gateway_id = "${aws_internet_gateway.gw.id}"
- }
-}
-
-resource "aws_main_route_table_association" "a" {
- vpc_id = "${aws_vpc.main.id}"
- route_table_id = "${aws_route_table.r.id}"
-}
-
-###
-
-# IAM Role setups
-
-###
-
-# IAM role for EMR Service
-resource "aws_iam_role" "iam_emr_service_role" {
- name = "iam_emr_service_role"
-
- assume_role_policy = < **NOTE:** At this time, Instance Groups cannot be destroyed through the API nor
-web interface. Instance Groups are destroyed when the EMR Cluster is destroyed.
-Terraform will resize any Instance Group to zero when destroying the resource.
-
-## Example Usage
-
-```hcl
-resource "aws_emr_instance_group" "task" {
- cluster_id = "${aws_emr_cluster.tf-test-cluster.id}"
- instance_count = 1
- instance_type = "m3.xlarge"
- name = "my little instance group"
-}
-```
-
-## Argument Reference
-
-The following arguments are supported:
-* `name` (Required) Human friendly name given to the instance group. Changing this forces a new resource to be created.
-* `cluster_id` (Required) ID of the EMR Cluster to attach to. Changing this forces a new resource to be created.
-* `instance_type` (Required) The EC2 instance type for all instances in the instance group. Changing this forces a new resource to be created.
-* `instance_count` (Optional) Target number of instances for the instance group. Defaults to 0.
-* `ebs_optimized` (Optional) Indicates whether an Amazon EBS volume is EBS-optimized. Changing this forces a new resource to be created.
-* `ebs_config` (Optional) One or more `ebs_config` blocks as defined below. Changing this forces a new resource to be created.
-
-`ebs_config` supports the following:
-* `iops` - (Optional) The number of I/O operations per second (IOPS) that the volume supports.
-* `size` - (Optional) The volume size, in gibibytes (GiB). This can be a number from 1 - 1024. If the volume type is EBS-optimized, the minimum value is 10.
-* `type` - (Optional) The volume type. Valid options are 'gp2', 'io1' and 'standard'.
-* `volumes_per_instance` - (Optional) The number of EBS Volumes to attach per instance.
-
-## Attributes Reference
-
-The following attributes are exported:
-
-* `id` - The EMR Instance ID
-
-* `running_instance_count` The number of instances currently running in this instance group.
-
-* `status` The current status of the instance group.
diff --git a/website/source/docs/providers/aws/r/emr_security_configuration.html.markdown b/website/source/docs/providers/aws/r/emr_security_configuration.html.markdown
deleted file mode 100644
index 54717817f..000000000
--- a/website/source/docs/providers/aws/r/emr_security_configuration.html.markdown
+++ /dev/null
@@ -1,63 +0,0 @@
----
-layout: "aws"
-page_title: "AWS: aws_emr_security_configuraiton"
-sidebar_current: "docs-aws-resource-emr-security-configuration"
-description: |-
- Provides a resource to manage AWS EMR Security Configurations
----
-
-# aws\_emr\_security\_configuration
-
-Provides a resource to manage AWS EMR Security Configurations
-
-## Example Usage
-
-```hcl
-resource "aws_emr_security_configuration" "foo" {
- name = "emrsc_other"
-
- configuration = < **NOTE:** When removing a Glacier Vault, the Vault must be empty.
-
-## Example Usage
-
-```hcl
-resource "aws_sns_topic" "aws_sns_topic" {
- name = "glacier-sns-topic"
-}
-
-resource "aws_glacier_vault" "my_archive" {
- name = "MyArchive"
-
- notification {
- sns_topic = "${aws_sns_topic.aws_sns_topic.arn}"
- events = ["ArchiveRetrievalCompleted", "InventoryRetrievalCompleted"]
- }
-
- access_policy = < **NOTE:** The encrypted secret may be decrypted using the command line,
- for example: `terraform output secret | base64 --decode | keybase pgp decrypt`.
-* `ses_smtp_password` - The secret access key converted into an SES SMTP
- password by applying [AWS's documented conversion
- algorithm](https://docs.aws.amazon.com/ses/latest/DeveloperGuide/smtp-credentials.html#smtp-credentials-convert).
-* `status` - "Active" or "Inactive". Keys are initially active, but can be made
- inactive by other means.
diff --git a/website/source/docs/providers/aws/r/iam_account_alias.html.markdown b/website/source/docs/providers/aws/r/iam_account_alias.html.markdown
deleted file mode 100644
index 67365a541..000000000
--- a/website/source/docs/providers/aws/r/iam_account_alias.html.markdown
+++ /dev/null
@@ -1,35 +0,0 @@
----
-layout: "aws"
-page_title: "AWS: aws_iam_account_alias"
-sidebar_current: "docs-aws-resource-iam-account-alias"
-description: |-
- Manages the account alias for the AWS Account.
----
-
-# aws\_iam\_account\_alias
-
--> **Note:** There is only a single account alias per AWS account.
-
-Manages the account alias for the AWS Account.
-
-## Example Usage
-
-```hcl
-resource "aws_iam_account_alias" "alias" {
- account_alias = "my-account-alias"
-}
-```
-
-## Argument Reference
-
-The following arguments are supported:
-
-* `account_alias` - (Required) The account alias
-
-## Import
-
-The current Account Alias can be imported using the `account_alias`, e.g.
-
-```
-$ terraform import aws_iam_account_alias.alias my-account-alias
-```
diff --git a/website/source/docs/providers/aws/r/iam_account_password_policy.html.markdown b/website/source/docs/providers/aws/r/iam_account_password_policy.html.markdown
deleted file mode 100644
index 3cb4631dd..000000000
--- a/website/source/docs/providers/aws/r/iam_account_password_policy.html.markdown
+++ /dev/null
@@ -1,60 +0,0 @@
----
-layout: "aws"
-page_title: "AWS: aws_iam_account_password_policy"
-sidebar_current: "docs-aws-resource-iam-account-password-policy"
-description: |-
- Manages Password Policy for the AWS Account.
----
-
-# aws\_iam\_account_password_policy
-
--> **Note:** There is only a single policy allowed per AWS account. An existing policy will be lost when using this resource as an effect of this limitation.
-
-Manages Password Policy for the AWS Account.
-See more about [Account Password Policy](http://docs.aws.amazon.com/IAM/latest/UserGuide/id_credentials_passwords_account-policy.html)
-in the official AWS docs.
-
-## Example Usage
-
-```hcl
-resource "aws_iam_account_password_policy" "strict" {
- minimum_password_length = 8
- require_lowercase_characters = true
- require_numbers = true
- require_uppercase_characters = true
- require_symbols = true
- allow_users_to_change_password = true
-}
-```
-
-## Argument Reference
-
-The following arguments are supported:
-
-* `allow_users_to_change_password` - (Optional) Whether to allow users to change their own password
-* `hard_expiry` - (Optional) Whether users are prevented from setting a new password after their password has expired
- (i.e. require administrator reset)
-* `max_password_age` - (Optional) The number of days that an user password is valid.
-* `minimum_password_length` - (Optional) Minimum length to require for user passwords.
-* `password_reuse_prevention` - (Optional) The number of previous passwords that users are prevented from reusing.
-* `require_lowercase_characters` - (Optional) Whether to require lowercase characters for user passwords.
-* `require_numbers` - (Optional) Whether to require numbers for user passwords.
-* `require_symbols` - (Optional) Whether to require symbols for user passwords.
-* `require_uppercase_characters` - (Optional) Whether to require uppercase characters for user passwords.
-
-## Attributes Reference
-
-The following attributes are exported:
-
-* `expire_passwords` - Indicates whether passwords in the account expire.
- Returns `true` if `max_password_age` contains a value greater than `0`.
- Returns `false` if it is `0` or _not present_.
-
-
-## Import
-
-IAM Account Password Policy can be imported using the word `iam-account-password-policy`, e.g.
-
-```
-$ terraform import aws_iam_account_password_policy.strict iam-account-password-policy
-```
\ No newline at end of file
diff --git a/website/source/docs/providers/aws/r/iam_group.html.markdown b/website/source/docs/providers/aws/r/iam_group.html.markdown
deleted file mode 100644
index da1486d24..000000000
--- a/website/source/docs/providers/aws/r/iam_group.html.markdown
+++ /dev/null
@@ -1,47 +0,0 @@
----
-layout: "aws"
-page_title: "AWS: aws_iam_group"
-sidebar_current: "docs-aws-resource-iam-group"
-description: |-
- Provides an IAM group.
----
-
-# aws\_iam\_group
-
-Provides an IAM group.
-
-## Example Usage
-
-```hcl
-resource "aws_iam_group" "developers" {
- name = "developers"
- path = "/users/"
-}
-```
-
-## Argument Reference
-
-The following arguments are supported:
-
-* `name` - (Required) The group's name. The name must consist of upper and lowercase alphanumeric characters with no spaces. You can also include any of the following characters: `=,.@-_.`. Group names are not distinguished by case. For example, you cannot create groups named both "ADMINS" and "admins".
-* `path` - (Optional, default "/") Path in which to create the group.
-
-## Attributes Reference
-
-The following attributes are exported:
-
-* `id` - The group's ID.
-* `arn` - The ARN assigned by AWS for this group.
-* `name` - The group's name.
-* `path` - The path of the group in IAM.
-* `unique_id` - The [unique ID][1] assigned by AWS.
-
- [1]: https://docs.aws.amazon.com/IAM/latest/UserGuide/Using_Identifiers.html#GUIDs
-
-## Import
-
-IAM Groups can be imported using the `name`, e.g.
-
-```
-$ terraform import aws_iam_group.developers developers
-```
diff --git a/website/source/docs/providers/aws/r/iam_group_membership.html.markdown b/website/source/docs/providers/aws/r/iam_group_membership.html.markdown
deleted file mode 100644
index eb929b82e..000000000
--- a/website/source/docs/providers/aws/r/iam_group_membership.html.markdown
+++ /dev/null
@@ -1,58 +0,0 @@
----
-layout: "aws"
-page_title: "AWS: aws_iam_group_membership"
-sidebar_current: "docs-aws-resource-iam-group-membership"
-description: |-
- Provides a top level resource to manage IAM Group membership for IAM Users.
----
-
-# aws\_iam\_group\_membership
-
-Provides a top level resource to manage IAM Group membership for IAM Users. For
-more information on managing IAM Groups or IAM Users, see [IAM Groups][1] or
-[IAM Users][2]
-
-## Example Usage
-
-```hcl
-resource "aws_iam_group_membership" "team" {
- name = "tf-testing-group-membership"
-
- users = [
- "${aws_iam_user.user_one.name}",
- "${aws_iam_user.user_two.name}",
- ]
-
- group = "${aws_iam_group.group.name}"
-}
-
-resource "aws_iam_group" "group" {
- name = "test-group"
-}
-
-resource "aws_iam_user" "user_one" {
- name = "test-user"
-}
-
-resource "aws_iam_user" "user_two" {
- name = "test-user-two"
-}
-```
-
-## Argument Reference
-
-The following arguments are supported:
-
-* `name` - (Required) The name to identify the Group Membership
-* `users` - (Required) A list of IAM User names to associate with the Group
-* `group` – (Required) The IAM Group name to attach the list of `users` to
-
-## Attributes Reference
-
-* `name` - The name to identifing the Group Membership
-* `users` - list of IAM User names
-* `group` – IAM Group name
-
-
-[1]: /docs/providers/aws/r/iam_group.html
-[2]: /docs/providers/aws/r/iam_user.html
diff --git a/website/source/docs/providers/aws/r/iam_group_policy.html.markdown b/website/source/docs/providers/aws/r/iam_group_policy.html.markdown
deleted file mode 100644
index 4bf62e931..000000000
--- a/website/source/docs/providers/aws/r/iam_group_policy.html.markdown
+++ /dev/null
@@ -1,59 +0,0 @@
----
-layout: "aws"
-page_title: "AWS: aws_group_policy"
-sidebar_current: "docs-aws-resource-iam-group-policy"
-description: |-
- Provides an IAM policy attached to a group.
----
-
-# aws\_iam\_group\_policy
-
-Provides an IAM policy attached to a group.
-
-## Example Usage
-
-```hcl
-resource "aws_iam_group_policy" "my_developer_policy" {
- name = "my_developer_policy"
- group = "${aws_iam_group.my_developers.id}"
-
- policy = < **NOTE:** Either `role` or `roles` (**deprecated**) must be specified.
-
-## Example Usage
-
-```hcl
-resource "aws_iam_instance_profile" "test_profile" {
- name = "test_profile"
- role = "${aws_iam_role.role.name}"
-}
-
-resource "aws_iam_role" "role" {
- name = "test_role"
- path = "/"
-
- assume_role_policy = <= 2 roles are not possible. See [issue #11575](https://github.com/hashicorp/terraform/issues/11575).
-* `role` - (Optional) The role name to include in the profile.
-
-## Attribute Reference
-
-* `id` - The instance profile's ID.
-* `arn` - The ARN assigned by AWS to the instance profile.
-* `create_date` - The creation timestamp of the instance profile.
-* `name` - The instance profile's name.
-* `path` - The path of the instance profile in IAM.
-* `role` - The role assigned to the instance profile.
-* `roles` - The list of roles assigned to the instance profile. (**Deprecated**)
-* `unique_id` - The [unique ID][1] assigned by AWS.
-
- [1]: https://docs.aws.amazon.com/IAM/latest/UserGuide/Using_Identifiers.html#GUIDs
-
-
-## Import
-
-Instance Profiles can be imported using the `name`, e.g.
-
-```
-$ terraform import aws_iam_instance_profile.test_profile app-instance-profile-1
-```
diff --git a/website/source/docs/providers/aws/r/iam_openid_connect_provider.html.markdown b/website/source/docs/providers/aws/r/iam_openid_connect_provider.html.markdown
deleted file mode 100644
index bdd49c4ef..000000000
--- a/website/source/docs/providers/aws/r/iam_openid_connect_provider.html.markdown
+++ /dev/null
@@ -1,45 +0,0 @@
----
-layout: "aws"
-page_title: "AWS: aws_iam_openid_connect_provider"
-sidebar_current: "docs-aws-resource-iam-openid-connect-provider"
-description: |-
- Provides an IAM OpenID Connect provider.
----
-
-# aws\_iam\_openid\_connect\_provider
-
-Provides an IAM OpenID Connect provider.
-
-## Example Usage
-
-```hcl
-resource "aws_iam_openid_connect_provider" "default" {
- url = "https://accounts.google.com"
- client_id_list = [
- "266362248691-342342xasdasdasda-apps.googleusercontent.com"
- ]
- thumbprint_list = []
-}
-```
-
-## Argument Reference
-
-The following arguments are supported:
-
-* `url` - (Required) The URL of the identity provider. Corresponds to the _iss_ claim.
-* `client_id_list` - (Required) A list of client IDs (also known as audiences). When a mobile or web app registers with an OpenID Connect provider, they establish a value that identifies the application. (This is the value that's sent as the client_id parameter on OAuth requests.)
-* `thumbprint_list` - (Required) A list of server certificate thumbprints for the OpenID Connect (OIDC) identity provider's server certificate(s).
-
-## Attributes Reference
-
-The following attributes are exported:
-
-* `arn` - The ARN assigned by AWS for this provider.
-
-## Import
-
-IAM OpenID Connect Providers can be imported using the `arn`, e.g.
-
-```
-$ terraform import aws_iam_openid_connect_provider.default arn:aws:iam::123456789012:oidc-provider/accounts.google.com
-```
diff --git a/website/source/docs/providers/aws/r/iam_policy.html.markdown b/website/source/docs/providers/aws/r/iam_policy.html.markdown
deleted file mode 100644
index e82c4b6f4..000000000
--- a/website/source/docs/providers/aws/r/iam_policy.html.markdown
+++ /dev/null
@@ -1,67 +0,0 @@
----
-layout: "aws"
-page_title: "AWS: aws_iam_policy"
-sidebar_current: "docs-aws-resource-iam-policy"
-description: |-
- Provides an IAM policy.
----
-
-# aws\_iam\_policy
-
-Provides an IAM policy.
-
-```hcl
-resource "aws_iam_policy" "policy" {
- name = "test_policy"
- path = "/"
- description = "My test policy"
-
- policy = < **NOTE:** The aws_iam_policy_attachment resource is only meant to be used once for each managed policy. All of the users/roles/groups that a single policy is being attached to should be declared by a single aws_iam_policy_attachment resource.
-
-```hcl
-resource "aws_iam_user" "user" {
- name = "test-user"
-}
-
-resource "aws_iam_role" "role" {
- name = "test-role"
-}
-
-resource "aws_iam_group" "group" {
- name = "test-group"
-}
-
-resource "aws_iam_policy" "policy" {
- name = "test-policy"
- description = "A test policy"
- policy = # omitted
-}
-
-resource "aws_iam_policy_attachment" "test-attach" {
- name = "test-attachment"
- users = ["${aws_iam_user.user.name}"]
- roles = ["${aws_iam_role.role.name}"]
- groups = ["${aws_iam_group.group.name}"]
- policy_arn = "${aws_iam_policy.policy.arn}"
-}
-```
-
-## Argument Reference
-
-The following arguments are supported:
-
-* `name` (Required) - The name of the policy. This cannot be an empty string.
-* `users` (Optional) - The user(s) the policy should be applied to
-* `roles` (Optional) - The role(s) the policy should be applied to
-* `groups` (Optional) - The group(s) the policy should be applied to
-* `policy_arn` (Required) - The ARN of the policy you want to apply
-
-## Attributes Reference
-
-The following attributes are exported:
-
-* `id` - The policy's ID.
-* `name` - The name of the policy.
diff --git a/website/source/docs/providers/aws/r/iam_role.html.markdown b/website/source/docs/providers/aws/r/iam_role.html.markdown
deleted file mode 100644
index ad8b983d3..000000000
--- a/website/source/docs/providers/aws/r/iam_role.html.markdown
+++ /dev/null
@@ -1,88 +0,0 @@
----
-layout: "aws"
-page_title: "AWS: aws_iam_role"
-sidebar_current: "docs-aws-resource-iam-role"
-description: |-
- Provides an IAM role.
----
-
-# aws\_iam\_role
-
-Provides an IAM role.
-
-## Example Usage
-
-```hcl
-resource "aws_iam_role" "test_role" {
- name = "test_role"
-
- assume_role_policy = < **NOTE:** This `assume_role_policy` is very similar but slightly different than just a standard IAM policy and cannot use an `aws_iam_policy` resource. It _can_ however, use an `aws_iam_policy_document` [data source](https://www.terraform.io/docs/providers/aws/d/iam_policy_document.html), see example below for how this could work.
-
-* `path` - (Optional) The path to the role.
- See [IAM Identifiers](https://docs.aws.amazon.com/IAM/latest/UserGuide/Using_Identifiers.html) for more information.
-* `description` - (Optional) The description of the role.
-
-## Attributes Reference
-
-The following attributes are exported:
-
-* `arn` - The Amazon Resource Name (ARN) specifying the role.
-* `create_date` - The creation date of the IAM role.
-* `unique_id` - The stable and unique string identifying the role.
-* `name` - The name of the role.
-* `description` - The description of the role.
-
-## Example of Using Data Source for Assume Role Policy
-
-```hcl
-data "aws_iam_policy_document" "instance-assume-role-policy" {
- statement {
- actions = ["sts:AssumeRole"]
-
- principals {
- type = "Service"
- identifiers = ["ec2.amazonaws.com"]
- }
- }
-}
-
-resource "aws_iam_role" "instance" {
- name = "instance_role"
- path = "/system/"
- assume_role_policy = "${data.aws_iam_policy_document.instance-assume-role-policy.json}"
-}
-```
-
-## Import
-
-IAM Roles can be imported using the `name`, e.g.
-
-```
-$ terraform import aws_iam_role.developer developer_name
-```
diff --git a/website/source/docs/providers/aws/r/iam_role_policy.html.markdown b/website/source/docs/providers/aws/r/iam_role_policy.html.markdown
deleted file mode 100644
index 540ee0105..000000000
--- a/website/source/docs/providers/aws/r/iam_role_policy.html.markdown
+++ /dev/null
@@ -1,74 +0,0 @@
----
-layout: "aws"
-page_title: "AWS: aws_iam_role_policy"
-sidebar_current: "docs-aws-resource-iam-role-policy"
-description: |-
- Provides an IAM role policy.
----
-
-# aws\_iam\_role\_policy
-
-Provides an IAM role policy.
-
-## Example Usage
-
-```hcl
-resource "aws_iam_role_policy" "test_policy" {
- name = "test_policy"
- role = "${aws_iam_role.test_role.id}"
-
- policy = < **Note:** All arguments including the private key will be stored in the raw state as plain-text.
-[Read more about sensitive data in state](/docs/state/sensitive-data.html).
-
-## Example Usage
-
-**Using certs on file:**
-
-```hcl
-resource "aws_iam_server_certificate" "test_cert" {
- name = "some_test_cert"
- certificate_body = "${file("self-ca-cert.pem")}"
- private_key = "${file("test-key.pem")}"
-}
-```
-
-**Example with cert in-line:**
-
-```hcl
-resource "aws_iam_server_certificate" "test_cert_alt" {
- name = "alt_test_cert"
-
- certificate_body = < **NOTE:** AWS performs behind-the-scenes modifications to some certificate files if they do not adhere to a specific format. These modifications will result in terraform forever believing that it needs to update the resources since the local and AWS file contents will not match after theses modifications occur. In order to prevent this from happening you must ensure that all your PEM-encoded files use UNIX line-breaks and that `certificate_body` contains only one certificate. All other certificates should go in `certificate_chain`. It is common for some Certificate Authorities to issue certificate files that have DOS line-breaks and that are actually multiple certificates concatenated together in order to form a full certificate chain.
-
-## Attributes Reference
-
-* `id` - The unique Server Certificate name
-* `name` - The name of the Server Certificate
-* `arn` - The Amazon Resource Name (ARN) specifying the server certificate.
-
-
-[1]: https://docs.aws.amazon.com/IAM/latest/UserGuide/Using_Identifiers.html
-[2]: https://docs.aws.amazon.com/IAM/latest/UserGuide/ManagingServerCerts.html
-[lifecycle]: /docs/configuration/resources.html
diff --git a/website/source/docs/providers/aws/r/iam_user.html.markdown b/website/source/docs/providers/aws/r/iam_user.html.markdown
deleted file mode 100644
index 2966c4b5f..000000000
--- a/website/source/docs/providers/aws/r/iam_user.html.markdown
+++ /dev/null
@@ -1,73 +0,0 @@
----
-layout: "aws"
-page_title: "AWS: aws_iam_user"
-sidebar_current: "docs-aws-resource-iam-user"
-description: |-
- Provides an IAM user.
----
-
-# aws\_iam\_user
-
-Provides an IAM user.
-
-## Example Usage
-
-```hcl
-resource "aws_iam_user" "lb" {
- name = "loadbalancer"
- path = "/system/"
-}
-
-resource "aws_iam_access_key" "lb" {
- user = "${aws_iam_user.lb.name}"
-}
-
-resource "aws_iam_user_policy" "lb_ro" {
- name = "test"
- user = "${aws_iam_user.lb.name}"
-
- policy = < **NOTE:** The encrypted password may be decrypted using the command line,
- for example: `terraform output password | base64 --decode | keybase pgp decrypt`.
-
-## Import
-
-IAM Login Profiles may not be imported.
diff --git a/website/source/docs/providers/aws/r/iam_user_policy.html.markdown b/website/source/docs/providers/aws/r/iam_user_policy.html.markdown
deleted file mode 100644
index 4d32daeae..000000000
--- a/website/source/docs/providers/aws/r/iam_user_policy.html.markdown
+++ /dev/null
@@ -1,58 +0,0 @@
----
-layout: "aws"
-page_title: "AWS: aws_iam_user_policy"
-sidebar_current: "docs-aws-resource-iam-user-policy"
-description: |-
- Provides an IAM policy attached to a user.
----
-
-# aws\_iam\_user\_policy
-
-Provides an IAM policy attached to a user.
-
-## Example Usage
-
-```hcl
-resource "aws_iam_user_policy" "lb_ro" {
- name = "test"
- user = "${aws_iam_user.lb.name}"
-
- policy = < **NOTE on EBS block devices:** If you use `ebs_block_device` on an `aws_instance`, Terraform will assume management over the full set of non-root EBS block devices for the instance, and treats additional block devices as drift. For this reason, `ebs_block_device` cannot be mixed with external `aws_ebs_volume` + `aws_volume_attachment` resources for a given instance.
-
-Each `ephemeral_block_device` supports the following:
-
-* `device_name` - The name of the block device to mount on the instance.
-* `virtual_name` - (Optional) The [Instance Store Device
- Name](https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/InstanceStorage.html#InstanceStoreDeviceNames)
- (e.g. `"ephemeral0"`).
-* `no_device` - (Optional) Suppresses the specified device included in the AMI's block device mapping.
-
-Each AWS Instance type has a different set of Instance Store block devices
-available for attachment. AWS [publishes a
-list](https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/InstanceStorage.html#StorageOnInstanceTypes)
-of which ephemeral devices are available on each type. The devices are always
-identified by the `virtual_name` in the format `"ephemeral{0..N}"`.
-
-~> **NOTE:** Currently, changes to `*_block_device` configuration of _existing_
-resources cannot be automatically detected by Terraform. After making updates
-to block device configuration, resource recreation can be manually triggered by
-using the [`taint` command](/docs/commands/taint.html).
-
-### Network Interfaces
-
-Each of the `network_interface` blocks attach a network interface to an EC2 Instance during boot time. However, because
-the network interface is attached at boot-time, replacing/modifying the network interface **WILL** trigger a recreation
-of the EC2 Instance. If you should need at any point to detach/modify/re-attach a network interface to the instance, use
-the `aws_network_interface` or `aws_network_interface_attachment` resources instead.
-
-The `network_interface` configuration block _does_, however, allow users to supply their own network interface to be used
-as the default network interface on an EC2 Instance, attached at `eth0`.
-
-Each `network_interface` block supports the following:
-
-* `device_index` - (Required) The integer index of the network interface attachment. Limited by instance type.
-* `network_interface_id` - (Required) The ID of the network interface to attach.
-* `delete_on_termination` - (Optional) Whether or not to delete the network interface on instance termination. Defaults to `false`.
-
-### Example
-
-```hcl
-resource "aws_vpc" "my_vpc" {
- cidr_block = "172.16.0.0/16"
- tags {
- Name = "tf-example"
- }
-}
-
-resource "aws_subnet" "my_subnet" {
- vpc_id = "${aws_vpc.my_vpc.id}"
- cidr_block = "172.16.10.0/24"
- availability_zone = "us-west-2a"
- tags {
- Name = "tf-example"
- }
-}
-
-resource "aws_network_interface" "foo" {
- subnet_id = "${aws_subnet.my_subnet.id}"
- private_ips = ["172.16.10.100"]
- tags {
- Name = "primary_network_interface"
- }
-}
-
-resource "aws_instance" "foo" {
- ami = "ami-22b9a343" # us-west-2
- instance_type = "t2.micro"
- network_interface {
- network_interface_id = "${aws_network_interface.foo.id}"
- device_index = 0
- }
-}
-```
-
-## Attributes Reference
-
-The following attributes are exported:
-
-* `id` - The instance ID.
-* `availability_zone` - The availability zone of the instance.
-* `placement_group` - The placement group of the instance.
-* `key_name` - The key name of the instance
-* `public_dns` - The public DNS name assigned to the instance. For EC2-VPC, this
- is only available if you've enabled DNS hostnames for your VPC
-* `public_ip` - The public IP address assigned to the instance, if applicable. **NOTE**: If you are using an [`aws_eip`](/docs/providers/aws/r/eip.html) with your instance, you should refer to the EIP's address directly and not use `public_ip`, as this field will change after the EIP is attached.
-* `network_interface_id` - The ID of the network interface that was created with the instance.
-* `primary_network_interface_id` - The ID of the instance's primary network interface.
-* `private_dns` - The private DNS name assigned to the instance. Can only be
- used inside the Amazon EC2, and only available if you've enabled DNS hostnames
- for your VPC
-* `private_ip` - The private IP address assigned to the instance
-* `security_groups` - The associated security groups.
-* `vpc_security_group_ids` - The associated security groups in non-default VPC
-* `subnet_id` - The VPC subnet ID.
-
-
-## Import
-
-Instances can be imported using the `id`, e.g.
-
-```
-$ terraform import aws_instance.web i-12345678
-```
diff --git a/website/source/docs/providers/aws/r/internet_gateway.html.markdown b/website/source/docs/providers/aws/r/internet_gateway.html.markdown
deleted file mode 100644
index 339a894e6..000000000
--- a/website/source/docs/providers/aws/r/internet_gateway.html.markdown
+++ /dev/null
@@ -1,57 +0,0 @@
----
-layout: "aws"
-page_title: "AWS: aws_internet_gateway"
-sidebar_current: "docs-aws-resource-internet-gateway"
-description: |-
- Provides a resource to create a VPC Internet Gateway.
----
-
-# aws\_internet\_gateway
-
-Provides a resource to create a VPC Internet Gateway.
-
-## Example Usage
-
-```hcl
-resource "aws_internet_gateway" "gw" {
- vpc_id = "${aws_vpc.main.id}"
-
- tags {
- Name = "main"
- }
-}
-```
-
-## Argument Reference
-
-The following arguments are supported:
-
-* `vpc_id` - (Required) The VPC ID to create in.
-* `tags` - (Optional) A mapping of tags to assign to the resource.
-
--> **Note:** It's recommended to denote that the AWS Instance or Elastic IP depends on the Internet Gateway. For example:
-
-
- resource "aws_internet_gateway" "gw" {
- vpc_id = "${aws_vpc.main.id}"
- }
-
- resource "aws_instance" "foo" {
- depends_on = ["aws_internet_gateway.gw"]
- }
-
-
-## Attributes Reference
-
-The following attributes are exported:
-
-* `id` - The ID of the Internet Gateway.
-
-
-## Import
-
-Internet Gateways can be imported using the `id`, e.g.
-
-```
-$ terraform import aws_internet_gateway.gw igw-c0a643a9
-```
\ No newline at end of file
diff --git a/website/source/docs/providers/aws/r/key_pair.html.markdown b/website/source/docs/providers/aws/r/key_pair.html.markdown
deleted file mode 100644
index 89ad0abfb..000000000
--- a/website/source/docs/providers/aws/r/key_pair.html.markdown
+++ /dev/null
@@ -1,51 +0,0 @@
----
-layout: "aws"
-page_title: "AWS: aws_key_pair"
-sidebar_current: "docs-aws-resource-key-pair"
-description: |-
- Provides a Key Pair resource. Currently this supports importing an existing key pair but not creating a new key pair.
----
-
-# aws\_key\_pair
-
-Provides an [EC2 key pair](https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/ec2-key-pairs.html) resource. A key pair is used to control login access to EC2 instances.
-
-Currently this resource requires an existing user-supplied key pair. This key pair's public key will be registered with AWS to allow logging-in to EC2 instances.
-
-When importing an existing key pair the public key material may be in any format supported by AWS. Supported formats (per the [AWS documentation](https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/ec2-key-pairs.html#how-to-generate-your-own-key-and-import-it-to-aws)) are:
-
-* OpenSSH public key format (the format in ~/.ssh/authorized_keys)
-* Base64 encoded DER format
-* SSH public key file format as specified in RFC4716
-
-## Example Usage
-
-```hcl
-resource "aws_key_pair" "deployer" {
- key_name = "deployer-key"
- public_key = "ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAABAQD3F6tyPEFEzV0LX3X8BsXdMsQz1x2cEikKDEY0aIj41qgxMCP/iteneqXSIFZBp5vizPvaoIR3Um9xK7PGoW8giupGn+EPuxIA4cDM4vzOqOkiMPhz5XK0whEjkVzTo4+S0puvDZuwIsdiW9mxhJc7tgBNL0cYlWSYVkz4G/fslNfRPW5mYAM49f4fhtxPb5ok4Q2Lg9dPKVHO/Bgeu5woMc7RY0p1ej6D4CKFE6lymSDJpW0YHX/wqE9+cfEauh7xZcG0q9t2ta6F6fmX0agvpFyZo8aFbXeUBr7osSCJNgvavWbM/06niWrOvYX2xwWdhXmXSrbX8ZbabVohBK41 email@example.com"
-}
-```
-
-## Argument Reference
-
-The following arguments are supported:
-
-* `key_name` - (Optional) The name for the key pair.
-* `key_name_prefix` - (Optional) Creates a unique name beginning with the specified prefix. Conflicts with `key_name`.
-* `public_key` - (Required) The public key material.
-
-## Attributes Reference
-
-The following attributes are exported:
-
-* `key_name` - The key pair name.
-* `fingerprint` - The MD5 public key fingerprint as specified in section 4 of RFC 4716.
-
-## Import
-
-Key Pairs can be imported using the `key_name`, e.g.
-
-```
-$ terraform import aws_key_pair.deployer deployer-key
-```
diff --git a/website/source/docs/providers/aws/r/kinesis_firehose_delivery_stream.html.markdown b/website/source/docs/providers/aws/r/kinesis_firehose_delivery_stream.html.markdown
deleted file mode 100644
index c77386d2e..000000000
--- a/website/source/docs/providers/aws/r/kinesis_firehose_delivery_stream.html.markdown
+++ /dev/null
@@ -1,183 +0,0 @@
----
-layout: "aws"
-page_title: "AWS: aws_kinesis_firehose_delivery_stream"
-sidebar_current: "docs-aws-resource-kinesis-firehose-delivery-stream"
-description: |-
- Provides a AWS Kinesis Firehose Delivery Stream
----
-
-# aws\_kinesis\_firehose\_delivery\_stream
-
-Provides a Kinesis Firehose Delivery Stream resource. Amazon Kinesis Firehose is a fully managed, elastic service to easily deliver real-time data streams to destinations such as Amazon S3 and Amazon Redshift.
-
-For more details, see the [Amazon Kinesis Firehose Documentation][1].
-
-## Example Usage
-
-### S3 Destination
-
-```hcl
-resource "aws_s3_bucket" "bucket" {
- bucket = "tf-test-bucket"
- acl = "private"
-}
-
-resource "aws_iam_role" "firehose_role" {
- name = "firehose_test_role"
-
- assume_role_policy = < **NOTE:** Kinesis Firehose is currently only supported in us-east-1, us-west-2 and eu-west-1.
-
-## Argument Reference
-
-The following arguments are supported:
-
-* `name` - (Required) A name to identify the stream. This is unique to the
-AWS account and region the Stream is created in.
-* `destination` – (Required) This is the destination to where the data is delivered. The only options are `s3`, `redshift`, and `elasticsearch`.
-* `s3_configuration` - (Required) Configuration options for the s3 destination (or the intermediate bucket if the destination
-is redshift). More details are given below.
-* `redshift_configuration` - (Optional) Configuration options if redshift is the destination.
-Using `redshift_configuration` requires the user to also specify a
-`s3_configuration` block. More details are given below.
-
-The `s3_configuration` object supports the following:
-
-* `role_arn` - (Required) The ARN of the AWS credentials.
-* `bucket_arn` - (Required) The ARN of the S3 bucket
-* `prefix` - (Optional) The "YYYY/MM/DD/HH" time format prefix is automatically used for delivered S3 files. You can specify an extra prefix to be added in front of the time format prefix. Note that if the prefix ends with a slash, it appears as a folder in the S3 bucket
-* `buffer_size` - (Optional) Buffer incoming data to the specified size, in MBs, before delivering it to the destination. The default value is 5.
- We recommend setting SizeInMBs to a value greater than the amount of data you typically ingest into the delivery stream in 10 seconds. For example, if you typically ingest data at 1 MB/sec set SizeInMBs to be 10 MB or higher.
-* `buffer_interval` - (Optional) Buffer incoming data for the specified period of time, in seconds, before delivering it to the destination. The default value is 300.
-* `compression_format` - (Optional) The compression format. If no value is specified, the default is UNCOMPRESSED. Other supported values are GZIP, ZIP & Snappy. If the destination is redshift you cannot use ZIP or Snappy.
-* `kms_key_arn` - (Optional) Specifies the KMS key ARN the stream will use to encrypt data. If not set, no encryption will
-be used.
-* `cloudwatch_logging_options` - (Optional) The CloudWatch Logging Options for the delivery stream. More details are given below
-
-The `redshift_configuration` object supports the following:
-
-* `cluster_jdbcurl` - (Required) The jdbcurl of the redshift cluster.
-* `username` - (Required) The username that the firehose delivery stream will assume. It is strongly recommended that the username and password provided is used exclusively for Amazon Kinesis Firehose purposes, and that the permissions for the account are restricted for Amazon Redshift INSERT permissions.
-* `password` - (Required) The password for the username above.
-* `retry_duration` - (Optional) The length of time during which Firehose retries delivery after a failure, starting from the initial request and including the first attempt. The default value is 3600 seconds (60 minutes). Firehose does not retry if the value of DurationInSeconds is 0 (zero) or if the first delivery attempt takes longer than the current value.
-* `role_arn` - (Required) The arn of the role the stream assumes.
-* `data_table_name` - (Required) The name of the table in the redshift cluster that the s3 bucket will copy to.
-* `copy_options` - (Optional) Copy options for copying the data from the s3 intermediate bucket into redshift, for example to change the default delimiter. For valid values, see the [AWS documentation](http://docs.aws.amazon.com/firehose/latest/APIReference/API_CopyCommand.html)
-* `data_table_columns` - (Optional) The data table columns that will be targeted by the copy command.
-* `cloudwatch_logging_options` - (Optional) The CloudWatch Logging Options for the delivery stream. More details are given below
-
-The `elasticsearch_configuration` object supports the following:
-
-* `buffering_interval` - (Optional) Buffer incoming data for the specified period of time, in seconds between 60 to 900, before delivering it to the destination. The default value is 300s.
-* `buffering_size` - (Optional) Buffer incoming data to the specified size, in MBs between 1 to 100, before delivering it to the destination. The default value is 5MB.
-* `domain_arn` - (Required) The ARN of the Amazon ES domain. The IAM role must have permission for `DescribeElasticsearchDomain`, `DescribeElasticsearchDomains`, and `DescribeElasticsearchDomainConfig` after assuming `RoleARN`. The pattern needs to be `arn:.*`.
-* `index_name` - (Required) The Elasticsearch index name.
-* `index_rotation_period` - (Optional) The Elasticsearch index rotation period. Index rotation appends a timestamp to the IndexName to facilitate expiration of old data. Valid values are `NoRotation`, `OneHour`, `OneDay`, `OneWeek`, and `OneMonth`. The default value is `OneDay`.
-* `retry_duration` - (Optional) After an initial failure to deliver to Amazon Elasticsearch, the total amount of time, in seconds between 0 to 7200, during which Firehose re-attempts delivery (including the first attempt). After this time has elapsed, the failed documents are written to Amazon S3. The default value is 300s. There will be no retry if the value is 0.
-* `role_arn` - (Required) The ARN of the IAM role to be assumed by Firehose for calling the Amazon ES Configuration API and for indexing documents. The pattern needs to be `arn:.*`.
-* `s3_backup_mode` - (Optional) Defines how documents should be delivered to Amazon S3. Valid values are `FailedDocumentsOnly` and `AllDocuments`. Default value is `FailedDocumentsOnly`.
-* `type_name` - (Required) The Elasticsearch type name with maximum length of 100 characters.
-* `cloudwatch_logging_options` - (Optional) The CloudWatch Logging Options for the delivery stream. More details are given below
-
-The `cloudwatch_logging_options` object supports the following:
-
-* `enabled` - (Optional) Enables or disables the logging. Defaults to `false`.
-* `log_group_name` - (Optional) The CloudWatch group name for logging. This value is required if `enabled` is true.
-* `log_stream_name` - (Optional) The CloudWatch log stream name for logging. This value is required if `enabled` is true.
-
-## Attributes Reference
-
-* `arn` - The Amazon Resource Name (ARN) specifying the Stream
-
-[1]: https://aws.amazon.com/documentation/firehose/
diff --git a/website/source/docs/providers/aws/r/kinesis_stream.html.markdown b/website/source/docs/providers/aws/r/kinesis_stream.html.markdown
deleted file mode 100644
index a840cc2c0..000000000
--- a/website/source/docs/providers/aws/r/kinesis_stream.html.markdown
+++ /dev/null
@@ -1,67 +0,0 @@
----
-layout: "aws"
-page_title: "AWS: aws_kinesis_stream"
-sidebar_current: "docs-aws-resource-kinesis-stream"
-description: |-
- Provides a AWS Kinesis Stream
----
-
-# aws\_kinesis\_stream
-
-Provides a Kinesis Stream resource. Amazon Kinesis is a managed service that
-scales elastically for real-time processing of streaming big data.
-
-For more details, see the [Amazon Kinesis Documentation][1].
-
-## Example Usage
-
-```hcl
-resource "aws_kinesis_stream" "test_stream" {
- name = "terraform-kinesis-test"
- shard_count = 1
- retention_period = 48
-
- shard_level_metrics = [
- "IncomingBytes",
- "OutgoingBytes",
- ]
-
- tags {
- Environment = "test"
- }
-}
-```
-
-## Argument Reference
-
-The following arguments are supported:
-
-* `name` - (Required) A name to identify the stream. This is unique to the
-AWS account and region the Stream is created in.
-* `shard_count` – (Required) The number of shards that the stream will use.
-Amazon has guidlines for specifying the Stream size that should be referenced
-when creating a Kinesis stream. See [Amazon Kinesis Streams][2] for more.
-* `retention_period` - (Optional) Length of time data records are accessible after they are added to the stream. The maximum value of a stream's retention period is 168 hours. Minimum value is 24. Default is 24.
-* `shard_level_metrics` - (Optional) A list of shard-level CloudWatch metrics which can be enabled for the stream. See [Monitoring with CloudWatch][3] for more. Note that the value ALL should not be used; instead you should provide an explicit list of metrics you wish to enable.
-* `tags` - (Optional) A mapping of tags to assign to the resource.
-
-## Attributes Reference
-
-* `id` - The unique Stream id
-* `name` - The unique Stream name
-* `shard_count` - The count of Shards for this Stream
-* `arn` - The Amazon Resource Name (ARN) specifying the Stream (same as `id`)
-
-
-## Import
-
-Kinesis Streams can be imported using the `name`, e.g.
-
-```
-$ terraform import aws_kinesis_stream.test_stream terraform-kinesis-test
-```
-
-[1]: https://aws.amazon.com/documentation/kinesis/
-[2]: https://docs.aws.amazon.com/kinesis/latest/dev/amazon-kinesis-streams.html
-[3]: https://docs.aws.amazon.com/streams/latest/dev/monitoring-with-cloudwatch.html
-
diff --git a/website/source/docs/providers/aws/r/kms_alias.html.markdown b/website/source/docs/providers/aws/r/kms_alias.html.markdown
deleted file mode 100644
index 8ead81e08..000000000
--- a/website/source/docs/providers/aws/r/kms_alias.html.markdown
+++ /dev/null
@@ -1,48 +0,0 @@
----
-layout: "aws"
-page_title: "AWS: aws_kms_alias"
-sidebar_current: "docs-aws-resource-kms-alias"
-description: |-
- Provides a display name for a customer master key.
----
-
-# aws\_kms\_alias
-
-Provides an alias for a KMS customer master key. AWS Console enforces 1-to-1 mapping between aliases & keys,
-but API (hence Terraform too) allows you to create as many aliases as
-the [account limits](http://docs.aws.amazon.com/kms/latest/developerguide/limits.html) allow you.
-
-## Example Usage
-
-```hcl
-resource "aws_kms_key" "a" {}
-
-resource "aws_kms_alias" "a" {
- name = "alias/my-key-alias"
- target_key_id = "${aws_kms_key.a.key_id}"
-}
-```
-
-## Argument Reference
-
-The following arguments are supported:
-
-
-* `name` - (Optional) The display name of the alias. The name must start with the word "alias" followed by a forward slash (alias/)
-* `name_prefix` - (Optional) Creates an unique alias beginning with the specified prefix.
-The name must start with the word "alias" followed by a forward slash (alias/). Conflicts with `name`.
-* `target_key_id` - (Required) Identifier for the key for which the alias is for, can be either an ARN or key_id.
-
-## Attributes Reference
-
-The following attributes are exported:
-
-* `arn` - The Amazon Resource Name (ARN) of the key alias.
-
-## Import
-
-KMS aliases can be imported using the `name`, e.g.
-
-```
-$ terraform import aws_kms_alias.a alias/my-key-alias
-```
diff --git a/website/source/docs/providers/aws/r/kms_key.html.markdown b/website/source/docs/providers/aws/r/kms_key.html.markdown
deleted file mode 100644
index 2a7f014e1..000000000
--- a/website/source/docs/providers/aws/r/kms_key.html.markdown
+++ /dev/null
@@ -1,50 +0,0 @@
----
-layout: "aws"
-page_title: "AWS: aws_kms_key"
-sidebar_current: "docs-aws-resource-kms-key"
-description: |-
- Provides a KMS customer master key.
----
-
-# aws\_kms\_key
-
-Provides a KMS customer master key.
-
-## Example Usage
-
-```hcl
-resource "aws_kms_key" "a" {
- description = "KMS key 1"
- deletion_window_in_days = 10
-}
-```
-
-## Argument Reference
-
-The following arguments are supported:
-
-* `description` - (Optional) The description of the key as viewed in AWS console.
-* `key_usage` - (Optional) Specifies the intended use of the key.
- Defaults to ENCRYPT/DECRYPT, and only symmetric encryption and decryption are supported.
-* `policy` - (Optional) A valid policy JSON document.
-* `deletion_window_in_days` - (Optional) Duration in days after which the key is deleted
- after destruction of the resource, must be between 7 and 30 days. Defaults to 30 days.
-* `is_enabled` - (Optional) Specifies whether the key is enabled. Defaults to true.
-* `enable_key_rotation` - (Optional) Specifies whether [key rotation](http://docs.aws.amazon.com/kms/latest/developerguide/rotate-keys.html)
- is enabled. Defaults to false.
-* `tags` - (Optional) A mapping of tags to assign to the object.
-
-## Attributes Reference
-
-The following attributes are exported:
-
-* `arn` - The Amazon Resource Name (ARN) of the key.
-* `key_id` - The globally unique identifier for the key.
-
-## Import
-
-KMS Keys can be imported using the `id`, e.g.
-
-```
-$ terraform import aws_kms_key.a arn:aws:kms:us-west-2:111122223333:key/1234abcd-12ab-34cd-56ef-1234567890ab
-```
\ No newline at end of file
diff --git a/website/source/docs/providers/aws/r/lambda_alias.html.markdown b/website/source/docs/providers/aws/r/lambda_alias.html.markdown
deleted file mode 100644
index dac242fa0..000000000
--- a/website/source/docs/providers/aws/r/lambda_alias.html.markdown
+++ /dev/null
@@ -1,39 +0,0 @@
----
-layout: "aws"
-page_title: "AWS: aws_lambda_alias"
-sidebar_current: "docs-aws-resource-lambda-alias"
-description: |-
- Creates a Lambda function alias.
----
-
-# aws\_lambda\_alias
-
-Creates a Lambda function alias. Creates an alias that points to the specified Lambda function version.
-
-For information about Lambda and how to use it, see [What is AWS Lambda?][1]
-For information about function aliases, see [CreateAlias][2] in the API docs.
-
-## Example Usage
-
-```hcl
-resource "aws_lambda_alias" "test_alias" {
- name = "testalias"
- description = "a sample description"
- function_name = "${aws_lambda_function.lambda_function_test.arn}"
- function_version = "$LATEST"
-}
-```
-
-## Argument Reference
-
-* `name` - (Required) Name for the alias you are creating. Pattern: `(?!^[0-9]+$)([a-zA-Z0-9-_]+)`
-* `description` - (Optional) Description of the alias.
-* `function_name` - (Required) The function ARN of the Lambda function for which you want to create an alias.
-* `function_version` - (Required) Lambda function version for which you are creating the alias. Pattern: `(\$LATEST|[0-9]+)`.
-
-## Attributes Reference
-
-* `arn` - The Amazon Resource Name (ARN) identifying your Lambda function alias.
-
-[1]: http://docs.aws.amazon.com/lambda/latest/dg/welcome.html
-[2]: http://docs.aws.amazon.com/lambda/latest/dg/API_CreateAlias.html
diff --git a/website/source/docs/providers/aws/r/lambda_event_source_mapping.html.markdown b/website/source/docs/providers/aws/r/lambda_event_source_mapping.html.markdown
deleted file mode 100644
index 6ef9c1aa0..000000000
--- a/website/source/docs/providers/aws/r/lambda_event_source_mapping.html.markdown
+++ /dev/null
@@ -1,56 +0,0 @@
----
-layout: "aws"
-page_title: "AWS: aws_lambda_event_source_mapping"
-sidebar_current: "docs-aws-resource-lambda-event-source-mapping"
-description: |-
- Provides a Lambda event source mapping. This allows Lambda functions to get events from Kinesis and DynamoDB.
----
-
-# aws\_lambda\_event\_source\_mapping
-
-Provides a Lambda event source mapping. This allows Lambda functions to get events from Kinesis and DynamoDB.
-
-For information about Lambda and how to use it, see [What is AWS Lambda?][1]
-For information about event source mappings, see [CreateEventSourceMapping][2] in the API docs.
-
-## Example Usage
-
-```hcl
-resource "aws_lambda_event_source_mapping" "event_source_mapping" {
- batch_size = 100
- event_source_arn = "arn:aws:kinesis:REGION:123456789012:stream/stream_name"
- enabled = true
- function_name = "arn:aws:lambda:REGION:123456789012:function:function_name"
- starting_position = "TRIM_HORIZON|LATEST"
-}
-```
-
-## Argument Reference
-
-* `batch_size` - (Optional) The largest number of records that Lambda will retrieve from your event source at the time of invocation. Defaults to `100`.
-* `event_source_arn` - (Required) The event source ARN - can either be a Kinesis or DynamoDB stream.
-* `enabled` - (Optional) Determines if the mapping will be enabled on creation. Defaults to `true`.
-* `function_name` - (Required) The name or the ARN of the Lambda function that will be subscribing to events.
-* `starting_position` - (Required) The position in the stream where AWS Lambda should start reading. Can be one of either `TRIM_HORIZON` or `LATEST`.
-
-## Attributes Reference
-
-* `function_arn` - The the ARN of the Lambda function the event source mapping is sending events to. (Note: this is a computed value that differs from `function_name` above.)
-* `last_modified` - The date this resource was last modified.
-* `last_processing_result` - The result of the last AWS Lambda invocation of your Lambda function.
-* `state` - The state of the event source mapping.
-* `state_transition_reason` - The reason the event source mapping is in its current state.
-* `uuid` - The UUID of the created event source mapping.
-
-
-[1]: http://docs.aws.amazon.com/lambda/latest/dg/welcome.html
-[2]: http://docs.aws.amazon.com/lambda/latest/dg/API_CreateEventSourceMapping.html
-
-
-## Import
-
-Lambda Event Source Mappings can be imported using the `UUID` (event source mapping identifier), e.g.
-
-```
-$ terraform import aws_lambda_event_source_mapping.event_source_mapping 12345kxodurf3443
-```
\ No newline at end of file
diff --git a/website/source/docs/providers/aws/r/lambda_function.html.markdown b/website/source/docs/providers/aws/r/lambda_function.html.markdown
deleted file mode 100644
index 98ca76a38..000000000
--- a/website/source/docs/providers/aws/r/lambda_function.html.markdown
+++ /dev/null
@@ -1,141 +0,0 @@
----
-layout: "aws"
-page_title: "AWS: aws_lambda_function"
-sidebar_current: "docs-aws-resource-lambda-function"
-description: |-
- Provides a Lambda Function resource. Lambda allows you to trigger execution of code in response to events in AWS. The Lambda Function itself includes source code and runtime configuration.
----
-
-# aws\_lambda\_function
-
-Provides a Lambda Function resource. Lambda allows you to trigger execution of code in response to events in AWS. The Lambda Function itself includes source code and runtime configuration.
-
-For information about Lambda and how to use it, see [What is AWS Lambda?][1]
-
-## Example Usage
-
-```hcl
-resource "aws_iam_role" "iam_for_lambda" {
- name = "iam_for_lambda"
-
- assume_role_policy = < **NOTE:** if both `subnet_ids` and `security_group_ids` are empty then vpc_config is considered to be empty or unset.
-
-For **environment** the following attributes are supported:
-
-* `variables` - (Optional) A map that defines environment variables for the Lambda function.
-
-## Attributes Reference
-
-* `arn` - The Amazon Resource Name (ARN) identifying your Lambda Function.
-* `qualified_arn` - The Amazon Resource Name (ARN) identifying your Lambda Function Version
- (if versioning is enabled via `publish = true`).
-* `invoke_arn` - The ARN to be used for invoking Lambda Function from API Gateway - to be used in [`aws_api_gateway_integration`](/docs/providers/aws/r/api_gateway_integration.html)'s `uri`
-* `version` - Latest published version of your Lambda Function.
-* `last_modified` - The date this resource was last modified.
-* `kms_key_arn` - (Optional) The ARN for the KMS encryption key.
-* `source_code_hash` - Base64-encoded representation of raw SHA-256 sum of the zip file
- provided either via `filename` or `s3_*` parameters.
-
-[1]: https://docs.aws.amazon.com/lambda/latest/dg/welcome.html
-[2]: https://docs.aws.amazon.com/lambda/latest/dg/walkthrough-s3-events-adminuser-create-test-function-create-function.html
-[3]: https://docs.aws.amazon.com/lambda/latest/dg/walkthrough-custom-events-create-test-function.html
-[4]: https://docs.aws.amazon.com/lambda/latest/dg/intro-permission-model.html
-[5]: https://docs.aws.amazon.com/lambda/latest/dg/limits.html
-[6]: https://docs.aws.amazon.com/lambda/latest/dg/API_CreateFunction.html#SSS-CreateFunction-request-Runtime
-[7]: http://docs.aws.amazon.com/lambda/latest/dg/vpc.html
-[8]: https://docs.aws.amazon.com/lambda/latest/dg/deployment-package-v2.html
-
-## Import
-
-Lambda Functions can be imported using the `function_name`, e.g.
-
-```
-$ terraform import aws_lambda_function.test_lambda my_test_lambda_function
-```
diff --git a/website/source/docs/providers/aws/r/lambda_permission.html.markdown b/website/source/docs/providers/aws/r/lambda_permission.html.markdown
deleted file mode 100644
index 9c962ec29..000000000
--- a/website/source/docs/providers/aws/r/lambda_permission.html.markdown
+++ /dev/null
@@ -1,130 +0,0 @@
----
-layout: "aws"
-page_title: "AWS: aws_lambda_permission"
-sidebar_current: "docs-aws-resource-lambda-permission"
-description: |-
- Creates a Lambda function permission.
----
-
-# aws\_lambda\_permission
-
-Creates a Lambda permission to allow external sources invoking the Lambda function
-(e.g. CloudWatch Event Rule, SNS or S3).
-
-## Example Usage
-
-```hcl
-resource "aws_lambda_permission" "allow_cloudwatch" {
- statement_id = "AllowExecutionFromCloudWatch"
- action = "lambda:InvokeFunction"
- function_name = "${aws_lambda_function.test_lambda.function_name}"
- principal = "events.amazonaws.com"
- source_account = "111122223333"
- source_arn = "arn:aws:events:eu-west-1:111122223333:rule/RunDaily"
- qualifier = "${aws_lambda_alias.test_alias.name}"
-}
-
-resource "aws_lambda_alias" "test_alias" {
- name = "testalias"
- description = "a sample description"
- function_name = "${aws_lambda_function.test_lambda.function_name}"
- function_version = "$LATEST"
-}
-
-resource "aws_lambda_function" "test_lambda" {
- filename = "lambdatest.zip"
- function_name = "lambda_function_name"
- role = "${aws_iam_role.iam_for_lambda.arn}"
- handler = "exports.handler"
- runtime = "nodejs6.10"
-}
-
-resource "aws_iam_role" "iam_for_lambda" {
- name = "iam_for_lambda"
-
- assume_role_policy = < **NOTE:** Changes to `*_block_device` configuration of _existing_ resources
-cannot currently be detected by Terraform. After updating to block device
-configuration, resource recreation can be manually triggered by using the
-[`taint` command](/docs/commands/taint.html).
-
-## Attributes Reference
-
-The following attributes are exported:
-
-* `id` - The ID of the launch configuration.
-* `name` - The name of the launch configuration.
-
-[1]: /docs/providers/aws/r/autoscaling_group.html
-[2]: /docs/configuration/resources.html#lifecycle
-[3]: /docs/providers/aws/r/spot_instance_request.html
-
-## Import
-
-Launch configurations can be imported using the `name`, e.g.
-
-```
-$ terraform import aws_launch_configuration.as_conf terraform-lg-123456
-```
diff --git a/website/source/docs/providers/aws/r/lb_cookie_stickiness_policy.html.markdown b/website/source/docs/providers/aws/r/lb_cookie_stickiness_policy.html.markdown
deleted file mode 100644
index 08914033f..000000000
--- a/website/source/docs/providers/aws/r/lb_cookie_stickiness_policy.html.markdown
+++ /dev/null
@@ -1,57 +0,0 @@
----
-layout: "aws"
-page_title: "AWS: aws_lb_cookie_stickiness_policy"
-sidebar_current: "docs-aws-resource-lb-cookie-stickiness-policy"
-description: |-
- Provides a load balancer cookie stickiness policy, which allows an ELB to control the sticky session lifetime of the browser.
----
-
-# aws\_lb\_cookie\_stickiness\_policy
-
-Provides a load balancer cookie stickiness policy, which allows an ELB to control the sticky session lifetime of the browser.
-
-## Example Usage
-
-```hcl
-resource "aws_elb" "lb" {
- name = "test-lb"
- availability_zones = ["us-east-1a"]
-
- listener {
- instance_port = 8000
- instance_protocol = "http"
- lb_port = 80
- lb_protocol = "http"
- }
-}
-
-resource "aws_lb_cookie_stickiness_policy" "foo" {
- name = "foo-policy"
- load_balancer = "${aws_elb.lb.id}"
- lb_port = 80
- cookie_expiration_period = 600
-}
-```
-
-## Argument Reference
-
-The following arguments are supported:
-
-* `name` - (Required) The name of the stickiness policy.
-* `load_balancer` - (Required) The load balancer to which the policy
- should be attached.
-* `lb_port` - (Required) The load balancer port to which the policy
- should be applied. This must be an active listener on the load
-balancer.
-* `cookie_expiration_period` - (Optional) The time period after which
- the session cookie should be considered stale, expressed in seconds.
-
-## Attributes Reference
-
-The following attributes are exported:
-
-* `id` - The ID of the policy.
-* `name` - The name of the stickiness policy.
-* `load_balancer` - The load balancer to which the policy is attached.
-* `lb_port` - The load balancer port to which the policy is applied.
-* `cookie_expiration_period` - The time period after which the session cookie is considered stale, expressed in seconds.
diff --git a/website/source/docs/providers/aws/r/lb_ssl_negotiation_policy.html.markdown b/website/source/docs/providers/aws/r/lb_ssl_negotiation_policy.html.markdown
deleted file mode 100644
index 92a02179e..000000000
--- a/website/source/docs/providers/aws/r/lb_ssl_negotiation_policy.html.markdown
+++ /dev/null
@@ -1,97 +0,0 @@
----
-layout: "aws"
-page_title: "AWS: aws_lb_ssl_negotiation_policy"
-sidebar_current: "docs-aws-resource-lb-ssl-negotiation-policy"
-description: |-
- Provides a load balancer SSL negotiation policy, which allows an ELB to control which ciphers and protocols are supported during SSL negotiations between a client and a load balancer.
----
-
-# aws\_lb\_ssl\_negotiation\_policy
-
-Provides a load balancer SSL negotiation policy, which allows an ELB to control the ciphers and protocols that are supported during SSL negotiations between a client and a load balancer.
-
-## Example Usage
-
-```hcl
-resource "aws_elb" "lb" {
- name = "test-lb"
- availability_zones = ["us-east-1a"]
-
- listener {
- instance_port = 8000
- instance_protocol = "https"
- lb_port = 443
- lb_protocol = "https"
- ssl_certificate_id = "arn:aws:iam::123456789012:server-certificate/certName"
- }
-}
-
-resource "aws_lb_ssl_negotiation_policy" "foo" {
- name = "foo-policy"
- load_balancer = "${aws_elb.lb.id}"
- lb_port = 443
-
- attribute {
- name = "Protocol-TLSv1"
- value = "false"
- }
-
- attribute {
- name = "Protocol-TLSv1.1"
- value = "false"
- }
-
- attribute {
- name = "Protocol-TLSv1.2"
- value = "true"
- }
-
- attribute {
- name = "Server-Defined-Cipher-Order"
- value = "true"
- }
-
- attribute {
- name = "ECDHE-RSA-AES128-GCM-SHA256"
- value = "true"
- }
-
- attribute {
- name = "AES128-GCM-SHA256"
- value = "true"
- }
-
- attribute {
- name = "EDH-RSA-DES-CBC3-SHA"
- value = "false"
- }
-}
-```
-
-## Argument Reference
-
-The following arguments are supported:
-
-* `name` - (Required) The name of the SSL negotiation policy.
-* `load_balancer` - (Required) The load balancer to which the policy
- should be attached.
-* `lb_port` - (Required) The load balancer port to which the policy
- should be applied. This must be an active listener on the load
-balancer.
-* `attribute` - (Optional) An SSL Negotiation policy attribute. Each has two properties:
- * `name` - The name of the attribute
- * `value` - The value of the attribute
-
-To set your attributes, please see the [AWS Elastic Load Balancing Developer Guide](http://docs.aws.amazon.com/ElasticLoadBalancing/latest/DeveloperGuide/elb-security-policy-table.html) for a listing of the supported SSL protocols, SSL options, and SSL ciphers.
-
-~> **NOTE:** The AWS documentation references Server Order Preference, which the AWS Elastic Load Balancing API refers to as `Server-Defined-Cipher-Order`. If you wish to set Server Order Preference, use this value instead.
-
-## Attributes Reference
-
-The following attributes are exported:
-
-* `id` - The ID of the policy.
-* `name` - The name of the stickiness policy.
-* `load_balancer` - The load balancer to which the policy is attached.
-* `lb_port` - The load balancer port to which the policy is applied.
-* `attribute` - The SSL Negotiation policy attributes.
diff --git a/website/source/docs/providers/aws/r/lightsail_domain.html.markdown b/website/source/docs/providers/aws/r/lightsail_domain.html.markdown
deleted file mode 100644
index 49577f5fb..000000000
--- a/website/source/docs/providers/aws/r/lightsail_domain.html.markdown
+++ /dev/null
@@ -1,38 +0,0 @@
----
-layout: "aws"
-page_title: "AWS: aws_lightsail_domain"
-sidebar_current: "docs-aws-resource-lightsail-domain"
-description: |-
- Provides an Lightsail Domain
----
-
-# aws\_lightsail\_domain
-
-Creates a domain resource for the specified domain (e.g., example.com).
-You cannot register a new domain name using Lightsail. You must register
-a domain name using Amazon Route 53 or another domain name registrar.
-If you have already registered your domain, you can enter its name in
-this parameter to manage the DNS records for that domain.
-
-~> **Note:** Lightsail is currently only supported in a limited number of AWS Regions, please see ["Regions and Availability Zones in Amazon Lightsail"](https://lightsail.aws.amazon.com/ls/docs/overview/article/understanding-regions-and-availability-zones-in-amazon-lightsail) for more details
-
-## Example Usage, creating a new domain
-
-```hcl
-resource "aws_lightsail_domain" "domain_test" {
- domain_name = "mydomain.com"
-}
-```
-
-## Argument Reference
-
-The following arguments are supported:
-
-* `domain_name` - (Required) The name of the Lightsail domain to manage
-
-## Attributes Reference
-
-The following attributes are exported in addition to the arguments listed above:
-
-* `id` - The name used for this domain
-* `arn` - The ARN of the Lightsail domain
diff --git a/website/source/docs/providers/aws/r/lightsail_instance.html.markdown b/website/source/docs/providers/aws/r/lightsail_instance.html.markdown
deleted file mode 100644
index b6995b4db..000000000
--- a/website/source/docs/providers/aws/r/lightsail_instance.html.markdown
+++ /dev/null
@@ -1,90 +0,0 @@
----
-layout: "aws"
-page_title: "AWS: aws_lightsail_instance"
-sidebar_current: "docs-aws-resource-lightsail-instance"
-description: |-
- Provides an Lightsail Instance
----
-
-# aws\_lightsail\_instance
-
-Provides a Lightsail Instance. Amazon Lightsail is a service to provide easy virtual private servers
-with custom software already setup. See [What is Amazon Lightsail?](https://lightsail.aws.amazon.com/ls/docs/getting-started/article/what-is-amazon-lightsail)
-for more information.
-
-~> **Note:** Lightsail is currently only supported in a limited number of AWS Regions, please see ["Regions and Availability Zones in Amazon Lightsail"](https://lightsail.aws.amazon.com/ls/docs/overview/article/understanding-regions-and-availability-zones-in-amazon-lightsail) for more details
-
-## Example Usage
-
-```hcl
-# Create a new GitLab Lightsail Instance
-resource "aws_lightsail_instance" "gitlab_test" {
- name = "custom gitlab"
- availability_zone = "us-east-1b"
- blueprint_id = "string"
- bundle_id = "string"
- key_pair_name = "some_key_name"
-}
-```
-
-## Argument Reference
-
-The following arguments are supported:
-
-* `name` - (Required) The name of the Lightsail Instance
-* `availability_zone` - (Required) The Availability Zone in which to create your
-instance. At this time, must be in `us-east-1`, `us-east-2`, `us-west-2`, `eu-west-1`, `eu-west-2`, `eu-central-1` regions
-* `blueprint_id` - (Required) The ID for a virtual private server image
-(see list below)
-* `bundle_id` - (Required) The bundle of specification information (see list below)
-* `key_pair_name` - (Required) The name of your key pair. Created in the
-Lightsail console (cannot use `aws_key_pair` at this time)
-* `user_data` - (Optional) launch script to configure server with additional user data
-
-
-## Blueprints
-
-Lightsail currently supports the following Blueprint IDs:
-
-- `amazon_linux_2016_09_0`
-- `ubuntu_16_04`
-- `wordpress_4_6_1`
-- `lamp_5_6_27`
-- `nodejs_6_9_1`
-- `joomla_3_6_3`
-- `magento_2_1_2`
-- `mean_3_2_10`
-- `drupal_8_2_1`
-- `gitlab_8_12_6`
-- `redmine_3_3_1`
-- `nginx_1_10_2`
-
-## Bundles
-
-Lightsail currently supports the following Bundle IDs:
-
-- `nano_1_0`
-- `micro_1_0`
-- `small_1_0`
-- `medium_1_0`
-- `large_1_0`
-
-## Attributes Reference
-
-The following attributes are exported in addition to the arguments listed above:
-
-* `id` - The ARN of the Lightsail instance (matches `arn`).
-* `arn` - The ARN of the Lightsail instance (matches `id`).
-* `availability_zone`
-* `blueprint_id`
-* `bundle_id`
-* `key_pair_name`
-* `user_data`
-
-## Import
-
-Lightsail Instances can be imported using their ARN, e.g.
-
-```
-$ terraform import aws_lightsail_instance.bar
-```
diff --git a/website/source/docs/providers/aws/r/lightsail_key_pair.html.markdown b/website/source/docs/providers/aws/r/lightsail_key_pair.html.markdown
deleted file mode 100644
index 935e874e1..000000000
--- a/website/source/docs/providers/aws/r/lightsail_key_pair.html.markdown
+++ /dev/null
@@ -1,78 +0,0 @@
----
-layout: "aws"
-page_title: "AWS: aws_lightsail_key_pair"
-sidebar_current: "docs-aws-resource-lightsail-key-pair"
-description: |-
- Provides an Lightsail Key Pair
----
-
-# aws\_lightsail\_key\_pair
-
-Provides a Lightsail Key Pair, for use with Lightsail Instances. These key pairs
-are seperate from EC2 Key Pairs, and must be created or imported for use with
-Lightsail.
-
-~> **Note:** Lightsail is currently only supported in a limited number of AWS Regions, please see ["Regions and Availability Zones in Amazon Lightsail"](https://lightsail.aws.amazon.com/ls/docs/overview/article/understanding-regions-and-availability-zones-in-amazon-lightsail) for more details
-
-## Example Usage, creating a new Key Pair
-
-```hcl
-# Create a new Lightsail Key Pair
-resource "aws_lightsail_key_pair" "lg_key_pair" {
- name = "lg_key_pair"
-}
-```
-
-## Create new Key Pair, encrypting the private key with a PGP Key
-
-```hcl
-resource "aws_lightsail_key_pair" "lg_key_pair" {
- name = "lg_key_pair"
- pgp_key = "keybase:keybaseusername"
-}
-```
-
-## Import an existing public key
-
-```hcl
-resource "aws_lightsail_key_pair" "lg_key_pair" {
- name = "importing"
- public_key = "${file("~/.ssh/id_rsa.pub")}"
-}
-```
-
-## Argument Reference
-
-The following arguments are supported:
-
-* `name` - (Optional) The name of the Lightsail Key Pair. If omitted, a unique
-name will be generated by Terraform
-* `pgp_key` – (Optional) An optional PGP key to encrypt the resulting private
-key material. Only used when creating a new key pair
-* `public_key` - (Required) The public key material. This public key will be
-imported into Lightsail
-
-~> **NOTE:** a PGP key is not required, however it is strongly encouraged.
-Without a PGP key, the private key material will be stored in state unencrypted.
-`pgp_key` is ignored if `public_key` is supplied.
-
-## Attributes Reference
-
-The following attributes are exported in addition to the arguments listed above:
-
-* `id` - The name used for this key pair
-* `arn` - The ARN of the Lightsail key pair
-* `fingerprint` - The MD5 public key fingerprint as specified in section 4 of RFC 4716.
-* `public_key` - the public key, base64 encoded
-* `private_key` - the private key, base64 encoded. This is only populated
-when creating a new key, and when no `pgp_key` is provided
-* `encrypted_private_key` – the private key material, base 64 encoded and
-encrypted with the given `pgp_key`. This is only populated when creating a new
-key and `pgp_key` is supplied
-* `encrypted_fingerprint` - The MD5 public key fingerprint for the encrypted
-private key
-
-## Import
-
-Lightsail Key Pairs cannot be imported, because the private and public key are
-only available on initial creation.
diff --git a/website/source/docs/providers/aws/r/lightsail_static_ip.html.markdown b/website/source/docs/providers/aws/r/lightsail_static_ip.html.markdown
deleted file mode 100644
index 8410f3f1c..000000000
--- a/website/source/docs/providers/aws/r/lightsail_static_ip.html.markdown
+++ /dev/null
@@ -1,35 +0,0 @@
----
-layout: "aws"
-page_title: "AWS: aws_lightsail_static_ip"
-sidebar_current: "docs-aws-resource-lightsail-static-ip"
-description: |-
- Provides an Lightsail Static IP
----
-
-# aws\_lightsail\_static\_ip
-
-Allocates a static IP address.
-
-~> **Note:** Lightsail is currently only supported in a limited number of AWS Regions, please see ["Regions and Availability Zones in Amazon Lightsail"](https://lightsail.aws.amazon.com/ls/docs/overview/article/understanding-regions-and-availability-zones-in-amazon-lightsail) for more details
-
-## Example Usage
-
-```hcl
-resource "aws_lightsail_static_ip" "test" {
- name = "example"
-}
-```
-
-## Argument Reference
-
-The following arguments are supported:
-
-* `name` - (Required) The name for the allocated static IP
-
-## Attributes Reference
-
-The following attributes are exported in addition to the arguments listed above:
-
-* `arn` - The ARN of the Lightsail static IP
-* `ip_address` - The allocated static IP address
-* `support_code` - The support code.
diff --git a/website/source/docs/providers/aws/r/lightsail_static_ip_attachment.html.markdown b/website/source/docs/providers/aws/r/lightsail_static_ip_attachment.html.markdown
deleted file mode 100644
index 7611c93ad..000000000
--- a/website/source/docs/providers/aws/r/lightsail_static_ip_attachment.html.markdown
+++ /dev/null
@@ -1,49 +0,0 @@
----
-layout: "aws"
-page_title: "AWS: aws_lightsail_static_ip_attachment"
-sidebar_current: "docs-aws-resource-lightsail-static-ip-attachment"
-description: |-
- Provides an Lightsail Static IP Attachment
----
-
-# aws\_lightsail\_static\_ip\_attachment
-
-Provides a static IP address attachment - relationship between a Lightsail static IP & Lightsail instance.
-
-~> **Note:** Lightsail is currently only supported in a limited number of AWS Regions, please see ["Regions and Availability Zones in Amazon Lightsail"](https://lightsail.aws.amazon.com/ls/docs/overview/article/understanding-regions-and-availability-zones-in-amazon-lightsail) for more details
-
-## Example Usage
-
-```hcl
-resource "aws_lightsail_static_ip_attachment" "test" {
- static_ip_name = "${aws_lightsail_static_ip.test.name}"
- instance_name = "${aws_lightsail_instance.test.name}"
-}
-
-resource "aws_lightsail_static_ip" "test" {
- name = "example"
-}
-
-resource "aws_lightsail_instance" "test" {
- name = "example"
- availability_zone = "us-east-1b"
- blueprint_id = "string"
- bundle_id = "string"
- key_pair_name = "some_key_name"
-}
-```
-
-## Argument Reference
-
-The following arguments are supported:
-
-* `static_ip_name` - (Required) The name of the allocated static IP
-* `instance_name` - (Required) The name of the Lightsail instance to attach the IP to
-
-## Attributes Reference
-
-The following attributes are exported in addition to the arguments listed above:
-
-* `arn` - The ARN of the Lightsail static IP
-* `ip_address` - The allocated static IP address
-* `support_code` - The support code.
diff --git a/website/source/docs/providers/aws/r/load_balancer_backend_server_policy.html.markdown b/website/source/docs/providers/aws/r/load_balancer_backend_server_policy.html.markdown
deleted file mode 100644
index 2b4a8c3fe..000000000
--- a/website/source/docs/providers/aws/r/load_balancer_backend_server_policy.html.markdown
+++ /dev/null
@@ -1,88 +0,0 @@
----
-layout: "aws"
-page_title: "AWS: aws_load_balancer_backend_server_policy"
-sidebar_current: "docs-aws-resource-load-balancer-backend-server-policy"
-description: |-
- Attaches a load balancer policy to an ELB backend server.
----
-
-# aws\_elb\_load\_balancer\_backend\_server\_policy
-
-Attaches a load balancer policy to an ELB backend server.
-
-
-## Example Usage
-
-```hcl
-resource "aws_elb" "wu-tang" {
- name = "wu-tang"
- availability_zones = ["us-east-1a"]
-
- listener {
- instance_port = 443
- instance_protocol = "http"
- lb_port = 443
- lb_protocol = "https"
- ssl_certificate_id = "arn:aws:iam::000000000000:server-certificate/wu-tang.net"
- }
-
- tags {
- Name = "wu-tang"
- }
-}
-
-resource "aws_load_balancer_policy" "wu-tang-ca-pubkey-policy" {
- load_balancer_name = "${aws_elb.wu-tang.name}"
- policy_name = "wu-tang-ca-pubkey-policy"
- policy_type_name = "PublicKeyPolicyType"
-
- policy_attribute = {
- name = "PublicKey"
- value = "${file("wu-tang-pubkey")}"
- }
-}
-
-resource "aws_load_balancer_policy" "wu-tang-root-ca-backend-auth-policy" {
- load_balancer_name = "${aws_elb.wu-tang.name}"
- policy_name = "wu-tang-root-ca-backend-auth-policy"
- policy_type_name = "BackendServerAuthenticationPolicyType"
-
- policy_attribute = {
- name = "PublicKeyPolicyName"
- value = "${aws_load_balancer_policy.wu-tang-root-ca-pubkey-policy.policy_name}"
- }
-}
-
-resource "aws_load_balancer_backend_server_policy" "wu-tang-backend-auth-policies-443" {
- load_balancer_name = "${aws_elb.wu-tang.name}"
- instance_port = 443
-
- policy_names = [
- "${aws_load_balancer_policy.wu-tang-root-ca-backend-auth-policy.policy_name}",
- ]
-}
-```
-
-Where the file `pubkey` in the current directory contains only the _public key_ of the certificate.
-
-```shell
-cat wu-tang-ca.pem | openssl x509 -pubkey -noout | grep -v '\-\-\-\-' | tr -d '\n' > wu-tang-pubkey
-```
-
-This example shows how to enable backend authentication for an ELB as well as customize the TLS settings.
-
-## Argument Reference
-
-The following arguments are supported:
-
-* `load_balancer_name` - (Required) The load balancer to attach the policy to.
-* `policy_names` - (Required) List of Policy Names to apply to the backend server.
-* `instance_port` - (Required) The instance port to apply the policy to.
-
-## Attributes Reference
-
-The following attributes are exported:
-
-* `id` - The ID of the policy.
-* `load_balancer_name` - The load balancer on which the policy is defined.
-* `instance_port` - The backend port the policies are applied to
diff --git a/website/source/docs/providers/aws/r/load_balancer_listener_policy.html.markdown b/website/source/docs/providers/aws/r/load_balancer_listener_policy.html.markdown
deleted file mode 100644
index 9ef19e262..000000000
--- a/website/source/docs/providers/aws/r/load_balancer_listener_policy.html.markdown
+++ /dev/null
@@ -1,76 +0,0 @@
----
-layout: "aws"
-page_title: "AWS: aws_load_balancer_listener_policy"
-sidebar_current: "docs-aws-resource-load-balancer-listener-policy"
-description: |-
- Attaches a load balancer policy to an ELB Listener.
----
-
-# aws\_elb\_load\_balancer\_listener\_policy
-
-Attaches a load balancer policy to an ELB Listener.
-
-
-## Example Usage
-
-```hcl
-resource "aws_elb" "wu-tang" {
- name = "wu-tang"
- availability_zones = ["us-east-1a"]
-
- listener {
- instance_port = 443
- instance_protocol = "http"
- lb_port = 443
- lb_protocol = "https"
- ssl_certificate_id = "arn:aws:iam::000000000000:server-certificate/wu-tang.net"
- }
-
- tags {
- Name = "wu-tang"
- }
-}
-
-resource "aws_load_balancer_policy" "wu-tang-ssl" {
- load_balancer_name = "${aws_elb.wu-tang.name}"
- policy_name = "wu-tang-ssl"
- policy_type_name = "SSLNegotiationPolicyType"
-
- policy_attribute = {
- name = "ECDHE-ECDSA-AES128-GCM-SHA256"
- value = "true"
- }
-
- policy_attribute = {
- name = "Protocol-TLSv1.2"
- value = "true"
- }
-}
-
-resource "aws_load_balancer_listener_policy" "wu-tang-listener-policies-443" {
- load_balancer_name = "${aws_elb.wu-tang.name}"
- load_balancer_port = 443
-
- policy_names = [
- "${aws_load_balancer_policy.wu-tang-ssl.policy_name}",
- ]
-}
-```
-
-This example shows how to customize the TLS settings of an HTTPS listener.
-
-## Argument Reference
-
-The following arguments are supported:
-
-* `load_balancer_name` - (Required) The load balancer to attach the policy to.
-* `load_balancer_port` - (Required) The load balancer listener port to apply the policy to.
-* `policy_names` - (Required) List of Policy Names to apply to the backend server.
-
-## Attributes Reference
-
-The following attributes are exported:
-
-* `id` - The ID of the policy.
-* `load_balancer_name` - The load balancer on which the policy is defined.
-* `load_balancer_port` - The load balancer listener port the policies are applied to
diff --git a/website/source/docs/providers/aws/r/load_balancer_policy.html.markdown b/website/source/docs/providers/aws/r/load_balancer_policy.html.markdown
deleted file mode 100644
index 178fcfb41..000000000
--- a/website/source/docs/providers/aws/r/load_balancer_policy.html.markdown
+++ /dev/null
@@ -1,114 +0,0 @@
----
-layout: "aws"
-page_title: "AWS: aws_load_balancer_policy"
-sidebar_current: "docs-aws-resource-load-balancer-policy"
-description: |-
- Provides a load balancer policy, which can be attached to an ELB listener or backend server.
----
-
-# aws\_elb\_load\_balancer\_policy
-
-Provides a load balancer policy, which can be attached to an ELB listener or backend server.
-
-## Example Usage
-
-```hcl
-resource "aws_elb" "wu-tang" {
- name = "wu-tang"
- availability_zones = ["us-east-1a"]
-
- listener {
- instance_port = 443
- instance_protocol = "http"
- lb_port = 443
- lb_protocol = "https"
- ssl_certificate_id = "arn:aws:iam::000000000000:server-certificate/wu-tang.net"
- }
-
- tags {
- Name = "wu-tang"
- }
-}
-
-resource "aws_load_balancer_policy" "wu-tang-ca-pubkey-policy" {
- load_balancer_name = "${aws_elb.wu-tang.name}"
- policy_name = "wu-tang-ca-pubkey-policy"
- policy_type_name = "PublicKeyPolicyType"
-
- policy_attribute = {
- name = "PublicKey"
- value = "${file("wu-tang-pubkey")}"
- }
-}
-
-resource "aws_load_balancer_policy" "wu-tang-root-ca-backend-auth-policy" {
- load_balancer_name = "${aws_elb.wu-tang.name}"
- policy_name = "wu-tang-root-ca-backend-auth-policy"
- policy_type_name = "BackendServerAuthenticationPolicyType"
-
- policy_attribute = {
- name = "PublicKeyPolicyName"
- value = "${aws_load_balancer_policy.wu-tang-root-ca-pubkey-policy.policy_name}"
- }
-}
-
-resource "aws_load_balancer_policy" "wu-tang-ssl" {
- load_balancer_name = "${aws_elb.wu-tang.name}"
- policy_name = "wu-tang-ssl"
- policy_type_name = "SSLNegotiationPolicyType"
-
- policy_attribute = {
- name = "ECDHE-ECDSA-AES128-GCM-SHA256"
- value = "true"
- }
-
- policy_attribute = {
- name = "Protocol-TLSv1.2"
- value = "true"
- }
-}
-
-resource "aws_load_balancer_backend_server_policy" "wu-tang-backend-auth-policies-443" {
- load_balancer_name = "${aws_elb.wu-tang.name}"
- instance_port = 443
-
- policy_names = [
- "${aws_load_balancer_policy.wu-tang-root-ca-backend-auth-policy.policy_name}",
- ]
-}
-
-resource "aws_load_balancer_listener_policy" "wu-tang-listener-policies-443" {
- load_balancer_name = "${aws_elb.wu-tang.name}"
- load_balancer_port = 443
-
- policy_names = [
- "${aws_load_balancer_policy.wu-tang-ssl.policy_name}",
- ]
-}
-```
-
-Where the file `pubkey` in the current directory contains only the _public key_ of the certificate.
-
-```shell
-cat wu-tang-ca.pem | openssl x509 -pubkey -noout | grep -v '\-\-\-\-' | tr -d '\n' > wu-tang-pubkey
-```
-
-This example shows how to enable backend authentication for an ELB as well as customize the TLS settings.
-
-## Argument Reference
-
-The following arguments are supported:
-
-* `load_balancer_name` - (Required) The load balancer on which the policy is defined.
-* `policy_name` - (Required) The name of the load balancer policy.
-* `policy_type_name` - (Required) The policy type.
-* `policy_attribute` - (Optional) Policy attribute to apply to the policy.
-
-## Attributes Reference
-
-The following attributes are exported:
-
-* `id` - The ID of the policy.
-* `policy_name` - The name of the stickiness policy.
-* `policy_type_name` - The policy type of the policy.
-* `load_balancer_name` - The load balancer on which the policy is defined.
diff --git a/website/source/docs/providers/aws/r/main_route_table_assoc.html.markdown b/website/source/docs/providers/aws/r/main_route_table_assoc.html.markdown
deleted file mode 100644
index 2551ad738..000000000
--- a/website/source/docs/providers/aws/r/main_route_table_assoc.html.markdown
+++ /dev/null
@@ -1,44 +0,0 @@
----
-layout: "aws"
-page_title: "AWS: aws_main_route_table_association"
-sidebar_current: "docs-aws-resource-main-route-table-assoc"
-description: |-
- Provides a resource for managing the main routing table of a VPC.
----
-
-# aws\_main\_route\_table\_association
-
-Provides a resource for managing the main routing table of a VPC.
-
-## Example Usage
-
-```hcl
-resource "aws_main_route_table_association" "a" {
- vpc_id = "${aws_vpc.foo.id}"
- route_table_id = "${aws_route_table.bar.id}"
-}
-```
-
-## Argument Reference
-
-The following arguments are supported:
-
-* `vpc_id` - (Required) The ID of the VPC whose main route table should be set
-* `route_table_id` - (Required) The ID of the Route Table to set as the new
- main route table for the target VPC
-
-## Attributes Reference
-
-The following attributes are exported:
-
-* `id` - The ID of the Route Table Association
-* `original_route_table_id` - Used internally, see __Notes__ below
-
-## Notes
-
-On VPC creation, the AWS API always creates an initial Main Route Table. This
-resource records the ID of that Route Table under `original_route_table_id`.
-The "Delete" action for a `main_route_table_association` consists of resetting
-this original table as the Main Route Table for the VPC. You'll see this
-additional Route Table in the AWS console; it must remain intact in order for
-the `main_route_table_association` delete to work properly.
diff --git a/website/source/docs/providers/aws/r/nat_gateway.html.markdown b/website/source/docs/providers/aws/r/nat_gateway.html.markdown
deleted file mode 100644
index b5a397b25..000000000
--- a/website/source/docs/providers/aws/r/nat_gateway.html.markdown
+++ /dev/null
@@ -1,59 +0,0 @@
----
-layout: "aws"
-page_title: "AWS: aws_nat_gateway"
-sidebar_current: "docs-aws-resource-nat-gateway"
-description: |-
- Provides a resource to create a VPC NAT Gateway.
----
-
-# aws\_nat\_gateway
-
-Provides a resource to create a VPC NAT Gateway.
-
-## Example Usage
-
-```hcl
-resource "aws_nat_gateway" "gw" {
- allocation_id = "${aws_eip.nat.id}"
- subnet_id = "${aws_subnet.public.id}"
-}
-```
-
-## Argument Reference
-
-The following arguments are supported:
-
-* `allocation_id` - (Required) The Allocation ID of the Elastic IP address for the gateway.
-* `subnet_id` - (Required) The Subnet ID of the subnet in which to place the gateway.
-
--> **Note:** It's recommended to denote that the NAT Gateway depends on the Internet Gateway for the VPC in which the NAT Gateway's subnet is located. For example:
-
- resource "aws_internet_gateway" "gw" {
- vpc_id = "${aws_vpc.main.id}"
- }
-
- resource "aws_nat_gateway" "gw" {
- //other arguments
-
- depends_on = ["aws_internet_gateway.gw"]
- }
-
-
-## Attributes Reference
-
-The following attributes are exported:
-
-* `id` - The ID of the NAT Gateway.
-* `allocation_id` - The Allocation ID of the Elastic IP address for the gateway.
-* `subnet_id` - The Subnet ID of the subnet in which the NAT gateway is placed.
-* `network_interface_id` - The ENI ID of the network interface created by the NAT gateway.
-* `private_ip` - The private IP address of the NAT Gateway.
-* `public_ip` - The public IP address of the NAT Gateway.
-
-## Import
-
-NAT Gateways can be imported using the `id`, e.g.
-
-```
-$ terraform import aws_nat_gateway.private_gw nat-05dba92075d71c408
-```
\ No newline at end of file
diff --git a/website/source/docs/providers/aws/r/network_acl.html.markdown b/website/source/docs/providers/aws/r/network_acl.html.markdown
deleted file mode 100644
index adb73dcc6..000000000
--- a/website/source/docs/providers/aws/r/network_acl.html.markdown
+++ /dev/null
@@ -1,85 +0,0 @@
----
-layout: "aws"
-page_title: "AWS: aws_network_acl"
-sidebar_current: "docs-aws-resource-network-acl"
-description: |-
- Provides an network ACL resource.
----
-
-# aws\_network\_acl
-
-Provides an network ACL resource. You might set up network ACLs with rules similar
-to your security groups in order to add an additional layer of security to your VPC.
-
-## Example Usage
-
-```hcl
-resource "aws_network_acl" "main" {
- vpc_id = "${aws_vpc.main.id}"
-
- egress {
- protocol = "tcp"
- rule_no = 2
- action = "allow"
- cidr_block = "10.3.0.0/18"
- from_port = 443
- to_port = 443
- }
-
- ingress {
- protocol = "tcp"
- rule_no = 1
- action = "allow"
- cidr_block = "10.3.0.0/18"
- from_port = 80
- to_port = 80
- }
-
- tags {
- Name = "main"
- }
-}
-```
-
-## Argument Reference
-
-The following arguments are supported:
-
-* `vpc_id` - (Required) The ID of the associated VPC.
-* `subnet_ids` - (Optional) A list of Subnet IDs to apply the ACL to
-* `subnet_id` - (Optional, Deprecated) The ID of the associated Subnet. This
-attribute is deprecated, please use the `subnet_ids` attribute instead
-* `ingress` - (Optional) Specifies an ingress rule. Parameters defined below.
-* `egress` - (Optional) Specifies an egress rule. Parameters defined below.
-* `tags` - (Optional) A mapping of tags to assign to the resource.
-
-Both `egress` and `ingress` support the following keys:
-
-* `from_port` - (Required) The from port to match.
-* `to_port` - (Required) The to port to match.
-* `rule_no` - (Required) The rule number. Used for ordering.
-* `action` - (Required) The action to take.
-* `protocol` - (Required) The protocol to match. If using the -1 'all'
-protocol, you must specify a from and to port of 0.
-* `cidr_block` - (Optional) The CIDR block to match. This must be a
-valid network mask.
-* `ipv6_cidr_block` - (Optional) The IPv6 CIDR block.
-* `icmp_type` - (Optional) The ICMP type to be used. Default 0.
-* `icmp_code` - (Optional) The ICMP type code to be used. Default 0.
-
-~> Note: For more information on ICMP types and codes, see here: http://www.nthelp.com/icmp.html
-
-## Attributes Reference
-
-The following attributes are exported:
-
-* `id` - The ID of the network ACL
-
-
-## Import
-
-Network ACLs can be imported using the `id`, e.g.
-
-```
-$ terraform import aws_network_acl.main acl-7aaabd18
-```
diff --git a/website/source/docs/providers/aws/r/network_acl_rule.html.markdown b/website/source/docs/providers/aws/r/network_acl_rule.html.markdown
deleted file mode 100644
index fc9805526..000000000
--- a/website/source/docs/providers/aws/r/network_acl_rule.html.markdown
+++ /dev/null
@@ -1,60 +0,0 @@
----
-layout: "aws"
-page_title: "AWS: aws_network_acl_rule"
-sidebar_current: "docs-aws-resource-network-acl-rule"
-description: |-
- Provides an network ACL Rule resource.
----
-
-# aws\_network\_acl\_rule
-
-Creates an entry (a rule) in a network ACL with the specified rule number.
-
-## Example Usage
-
-```hcl
-resource "aws_network_acl" "bar" {
- vpc_id = "${aws_vpc.foo.id}"
-}
-
-resource "aws_network_acl_rule" "bar" {
- network_acl_id = "${aws_network_acl.bar.id}"
- rule_number = 200
- egress = false
- protocol = "tcp"
- rule_action = "allow"
- cidr_block = "0.0.0.0/0"
- from_port = 22
- to_port = 22
-}
-```
-
-~> **Note:** One of either `cidr_block` or `ipv6_cidr_block` is required.
-
-## Argument Reference
-
-The following arguments are supported:
-
-* `network_acl_id` - (Required) The ID of the network ACL.
-* `rule_number` - (Required) The rule number for the entry (for example, 100). ACL entries are processed in ascending order by rule number.
-* `egress` - (Optional, bool) Indicates whether this is an egress rule (rule is applied to traffic leaving the subnet). Default `false`.
-* `protocol` - (Required) The protocol. A value of -1 means all protocols.
-* `rule_action` - (Required) Indicates whether to allow or deny the traffic that matches the rule. Accepted values: `allow` | `deny`
-* `cidr_block` - (Optional) The network range to allow or deny, in CIDR notation (for example 172.16.0.0/24 ).
-* `ipv6_cidr_block` - (Optional) The IPv6 CIDR block to allow or deny.
-* `from_port` - (Optional) The from port to match.
-* `to_port` - (Optional) The to port to match.
-* `icmp_type` - (Optional) ICMP protocol: The ICMP type. Required if specifying ICMP for the protocol. e.g. -1
-* `icmp_code` - (Optional) ICMP protocol: The ICMP code. Required if specifying ICMP for the protocol. e.g. -1
-
-~> **NOTE:** If the value of `protocol` is `-1` or `all`, the `from_port` and `to_port` values will be ignored and the rule will apply to all ports.
-
-~> **NOTE:** If the value of `icmp_type` is `-1` (which results in a wildcard ICMP type), the `icmp_code` must also be set to `-1` (wildcard ICMP code).
-
-~> Note: For more information on ICMP types and codes, see here: http://www.nthelp.com/icmp.html
-
-## Attributes Reference
-
-The following attributes are exported:
-
-* `id` - The ID of the network ACL Rule
diff --git a/website/source/docs/providers/aws/r/network_interface.markdown b/website/source/docs/providers/aws/r/network_interface.markdown
deleted file mode 100644
index 02ac9cdd3..000000000
--- a/website/source/docs/providers/aws/r/network_interface.markdown
+++ /dev/null
@@ -1,66 +0,0 @@
----
-layout: "aws"
-page_title: "AWS: aws_network_interface"
-sidebar_current: "docs-aws-resource-network-interface"
-description: |-
- Provides an Elastic network interface (ENI) resource.
----
-
-# aws\_network\_interface
-
-Provides an Elastic network interface (ENI) resource.
-
-## Example Usage
-
-```hcl
-resource "aws_network_interface" "test" {
- subnet_id = "${aws_subnet.public_a.id}"
- private_ips = ["10.0.0.50"]
- security_groups = ["${aws_security_group.web.id}"]
-
- attachment {
- instance = "${aws_instance.test.id}"
- device_index = 1
- }
-}
-```
-
-## Argument Reference
-
-The following arguments are supported:
-
-* `subnet_id` - (Required) Subnet ID to create the ENI in.
-* `description` - (Optional) A description for the network interface.
-* `private_ips` - (Optional) List of private IPs to assign to the ENI.
-* `private_ips_count` - (Optional) Number of private IPs to assign to the ENI.
-* `security_groups` - (Optional) List of security group IDs to assign to the ENI.
-* `attachment` - (Optional) Block to define the attachment of the ENI. Documented below.
-* `source_dest_check` - (Optional) Whether to enable source destination checking for the ENI. Default true.
-* `tags` - (Optional) A mapping of tags to assign to the resource.
-
-The `attachment` block supports:
-
-* `instance` - (Required) ID of the instance to attach to.
-* `device_index` - (Required) Integer to define the devices index.
-
-## Attributes Reference
-
-The following attributes are exported:
-
-* `subnet_id` - Subnet ID the ENI is in.
-* `description` - A description for the network interface.
-* `private_ips` - List of private IPs assigned to the ENI.
-* `security_groups` - List of security groups attached to the ENI.
-* `attachment` - Block defining the attachment of the ENI.
-* `source_dest_check` - Whether source destination checking is enabled
-* `tags` - Tags assigned to the ENI.
-
-
-
-## Import
-
-Network Interfaces can be imported using the `id`, e.g.
-
-```
-$ terraform import aws_network_interface.test eni-e5aa89a3
-```
diff --git a/website/source/docs/providers/aws/r/network_interface_attachment.html.markdown b/website/source/docs/providers/aws/r/network_interface_attachment.html.markdown
deleted file mode 100644
index f02978ab9..000000000
--- a/website/source/docs/providers/aws/r/network_interface_attachment.html.markdown
+++ /dev/null
@@ -1,38 +0,0 @@
----
-layout: "aws"
-page_title: "AWS: aws_network_interface_attachment"
-sidebar_current: "docs-aws-resource-network-interface-attachment"
-description: |-
- Attach an Elastic network interface (ENI) resource with EC2 instance.
----
-
-# aws\_network\_interface\_attachment
-
-Attach an Elastic network interface (ENI) resource with EC2 instance.
-
-## Example Usage
-
-```
-resource "aws_network_interface_attachment" "test" {
- instance_id = "${aws_instance.test.id}"
- network_interface_id = "${aws_network_interface.test.id}"
- device_index = 0
-}
-```
-
-## Argument Reference
-
-The following arguments are supported:
-
-* `instance_id` - (Required) Instance ID to attach.
-* `network_interface_id` - (Required) ENI ID to attach.
-* `device_index` - (Required) Network interface index (int).
-
-## Attributes Reference
-
-The following attributes are exported:
-
-* `instance_id` - Instance ID.
-* `network_interface_id` - Network interface ID.
-* `attachment_id` - The ENI Attachment ID.
-* `status` - The status of the Network Interface Attachment.
diff --git a/website/source/docs/providers/aws/r/opsworks_application.html.markdown b/website/source/docs/providers/aws/r/opsworks_application.html.markdown
deleted file mode 100644
index cbe437d69..000000000
--- a/website/source/docs/providers/aws/r/opsworks_application.html.markdown
+++ /dev/null
@@ -1,100 +0,0 @@
----
-layout: "aws"
-page_title: "AWS: aws_opsworks_application"
-sidebar_current: "docs-aws-resource-opsworks-application"
-description: |-
- Provides an OpsWorks application resource.
----
-
-# aws\_opsworks\_application
-
-Provides an OpsWorks application resource.
-
-## Example Usage
-
-```hcl
-resource "aws_opsworks_application" "foo-app" {
- name = "foobar application"
- short_name = "foobar"
- stack_id = "${aws_opsworks_stack.stack.id}"
- type = "rails"
- description = "This is a Rails application"
-
- domains = [
- "example.com",
- "sub.example.com",
- ]
-
- environment = {
- key = "key"
- value = "value"
- secure = false
- }
-
- app_source = {
- type = "git"
- revision = "master"
- url = "https://github.com/example.git"
- }
-
- enable_ssl = true
-
- ssl_configuration = {
- private_key = "${file("./foobar.key")}"
- certificate = "${file("./foobar.crt")}"
- }
-
- document_root = "public"
- auto_bundle_on_deploy = true
- rails_env = "staging"
-}
-```
-
-## Argument Reference
-
-The following arguments are supported:
-
-* `name` - (Required) A human-readable name for the application.
-* `short_name` - (Required) A short, machine-readable name for the application. This can only be defined on resource creation and ignored on resource update.
-* `stack_id` - (Required) The id of the stack the application will belong to.
-* `type` - (Required) Opsworks application type. One of `aws-flow-ruby`, `java`, `rails`, `php`, `nodejs`, `static` or `other`.
-* `description` - (Optional) A description of the app.
-* `environment` - (Optional) Object to define environment variables. Object is described below.
-* `enable_ssl` - (Optional) Whether to enable SSL for the app. This must be set in order to let `ssl_configuration.private_key`, `ssl_configuration.certificate` and `ssl_configuration.chain` take effect.
-* `ssl_configuration` - (Optional) The SSL configuration of the app. Object is described below.
-* `app_source` - (Optional) SCM configuration of the app as described below.
-* `data_source_arn` - (Optional) The data source's ARN.
-* `data_source_type` - (Optional) The data source's type one of `AutoSelectOpsworksMysqlInstance`, `OpsworksMysqlInstance`, or `RdsDbInstance`.
-* `data_source_database_name` - (Optional) The database name.
-* `domains` - (Optional) A list of virtual host alias.
-* `document_root` - (Optional) Subfolder for the document root for application of type `rails`.
-* `auto_bundle_on_deploy` - (Optional) Run bundle install when deploying for application of type `rails`.
-* `rails_env` - (Required if `type` = `rails`) The name of the Rails environment for application of type `rails`.
-* `aws_flow_ruby_settings` - (Optional) Specify activity and workflow workers for your app using the aws-flow gem.
-
-An `app_source` block supports the following arguments (can only be defined once per resource):
-
-* `type` - (Required) The type of source to use. For example, "archive".
-* `url` - (Required) The URL where the app resource can be found.
-* `username` - (Optional) Username to use when authenticating to the source.
-* `password` - (Optional) Password to use when authenticating to the source.
-* `ssh_key` - (Optional) SSH key to use when authenticating to the source.
-* `revision` - (Optional) For sources that are version-aware, the revision to use.
-
-An `environment` block supports the following arguments:
-
-* `key` - (Required) Variable name.
-* `value` - (Required) Variable value.
-* `secure` - (Optional) Set visibility of the variable value to `true` or `false`.
-
-A `ssl_configuration` block supports the following arguments (can only be defined once per resource):
-
-* `private_key` - (Required) The private key; the contents of the certificate's domain.key file.
-* `certificate` - (Required) The contents of the certificate's domain.crt file.
-* `chain` - (Optional) Can be used to specify an intermediate certificate authority key or client authentication.
-
-## Attributes Reference
-
-The following attributes are exported:
-
-* `id` - The id of the application.
diff --git a/website/source/docs/providers/aws/r/opsworks_custom_layer.html.markdown b/website/source/docs/providers/aws/r/opsworks_custom_layer.html.markdown
deleted file mode 100644
index 81427aed5..000000000
--- a/website/source/docs/providers/aws/r/opsworks_custom_layer.html.markdown
+++ /dev/null
@@ -1,75 +0,0 @@
----
-layout: "aws"
-page_title: "AWS: aws_opsworks_custom_layer"
-sidebar_current: "docs-aws-resource-opsworks-custom-layer"
-description: |-
- Provides an OpsWorks custom layer resource.
----
-
-# aws\_opsworks\_custom\_layer
-
-Provides an OpsWorks custom layer resource.
-
-## Example Usage
-
-```hcl
-resource "aws_opsworks_custom_layer" "custlayer" {
- name = "My Awesome Custom Layer"
- short_name = "awesome"
- stack_id = "${aws_opsworks_stack.main.id}"
-}
-```
-
-## Argument Reference
-
-The following arguments are supported:
-
-* `name` - (Required) A human-readable name for the layer.
-* `short_name` - (Required) A short, machine-readable name for the layer, which will be used to identify it in the Chef node JSON.
-* `stack_id` - (Required) The id of the stack the layer will belong to.
-* `auto_assign_elastic_ips` - (Optional) Whether to automatically assign an elastic IP address to the layer's instances.
-* `auto_assign_public_ips` - (Optional) For stacks belonging to a VPC, whether to automatically assign a public IP address to each of the layer's instances.
-* `custom_instance_profile_arn` - (Optional) The ARN of an IAM profile that will be used for the layer's instances.
-* `custom_security_group_ids` - (Optional) Ids for a set of security groups to apply to the layer's instances.
-* `auto_healing` - (Optional) Whether to enable auto-healing for the layer.
-* `install_updates_on_boot` - (Optional) Whether to install OS and package updates on each instance when it boots.
-* `instance_shutdown_timeout` - (Optional) The time, in seconds, that OpsWorks will wait for Chef to complete after triggering the Shutdown event.
-* `elastic_load_balancer` - (Optional) Name of an Elastic Load Balancer to attach to this layer
-* `drain_elb_on_shutdown` - (Optional) Whether to enable Elastic Load Balancing connection draining.
-* `system_packages` - (Optional) Names of a set of system packages to install on the layer's instances.
-* `use_ebs_optimized_instances` - (Optional) Whether to use EBS-optimized instances.
-* `ebs_volume` - (Optional) `ebs_volume` blocks, as described below, will each create an EBS volume and connect it to the layer's instances.
-* `custom_json` - (Optional) Custom JSON attributes to apply to the layer.
-
-The following extra optional arguments, all lists of Chef recipe names, allow
-custom Chef recipes to be applied to layer instances at the five different
-lifecycle events, if custom cookbooks are enabled on the layer's stack:
-
-* `custom_configure_recipes`
-* `custom_deploy_recipes`
-* `custom_setup_recipes`
-* `custom_shutdown_recipes`
-* `custom_undeploy_recipes`
-
-An `ebs_volume` block supports the following arguments:
-
-* `mount_point` - (Required) The path to mount the EBS volume on the layer's instances.
-* `size` - (Required) The size of the volume in gigabytes.
-* `number_of_disks` - (Required) The number of disks to use for the EBS volume.
-* `raid_level` - (Required) The RAID level to use for the volume.
-* `type` - (Optional) The type of volume to create. This may be `standard` (the default), `io1` or `gp2`.
-* `iops` - (Optional) For PIOPS volumes, the IOPS per disk.
-
-## Attributes Reference
-
-The following attributes are exported:
-
-* `id` - The id of the layer.
-
-## Import
-
-OpsWorks Custom Layers can be imported using the `id`, e.g.
-
-```
-$ terraform import aws_opsworks_custom_layer.bar 00000000-0000-0000-0000-000000000000
-```
\ No newline at end of file
diff --git a/website/source/docs/providers/aws/r/opsworks_ganglia_layer.html.markdown b/website/source/docs/providers/aws/r/opsworks_ganglia_layer.html.markdown
deleted file mode 100644
index a3be106ea..000000000
--- a/website/source/docs/providers/aws/r/opsworks_ganglia_layer.html.markdown
+++ /dev/null
@@ -1,68 +0,0 @@
----
-layout: "aws"
-page_title: "AWS: aws_opsworks_ganglia_layer"
-sidebar_current: "docs-aws-resource-opsworks-ganglia-layer"
-description: |-
- Provides an OpsWorks Ganglia layer resource.
----
-
-# aws\_opsworks\_ganglia\_layer
-
-Provides an OpsWorks Ganglia layer resource.
-
-## Example Usage
-
-```hcl
-resource "aws_opsworks_ganglia_layer" "monitor" {
- stack_id = "${aws_opsworks_stack.main.id}"
- password = "foobarbaz"
-}
-```
-
-## Argument Reference
-
-The following arguments are supported:
-
-* `stack_id` - (Required) The id of the stack the layer will belong to.
-* `password` - (Required) The password to use for Ganglia.
-* `name` - (Optional) A human-readable name for the layer.
-* `auto_assign_elastic_ips` - (Optional) Whether to automatically assign an elastic IP address to the layer's instances.
-* `auto_assign_public_ips` - (Optional) For stacks belonging to a VPC, whether to automatically assign a public IP address to each of the layer's instances.
-* `custom_instance_profile_arn` - (Optional) The ARN of an IAM profile that will be used for the layer's instances.
-* `custom_security_group_ids` - (Optional) Ids for a set of security groups to apply to the layer's instances.
-* `auto_healing` - (Optional) Whether to enable auto-healing for the layer.
-* `install_updates_on_boot` - (Optional) Whether to install OS and package updates on each instance when it boots.
-* `instance_shutdown_timeout` - (Optional) The time, in seconds, that OpsWorks will wait for Chef to complete after triggering the Shutdown event.
-* `elastic_load_balancer` - (Optional) Name of an Elastic Load Balancer to attach to this layer
-* `drain_elb_on_shutdown` - (Optional) Whether to enable Elastic Load Balancing connection draining.
-* `system_packages` - (Optional) Names of a set of system packages to install on the layer's instances.
-* `url` - (Optional) The URL path to use for Ganglia. Defaults to "/ganglia".
-* `username` - (Optiona) The username to use for Ganglia. Defaults to "opsworks".
-* `use_ebs_optimized_instances` - (Optional) Whether to use EBS-optimized instances.
-* `ebs_volume` - (Optional) `ebs_volume` blocks, as described below, will each create an EBS volume and connect it to the layer's instances.
-* `custom_json` - (Optional) Custom JSON attributes to apply to the layer.
-
-The following extra optional arguments, all lists of Chef recipe names, allow
-custom Chef recipes to be applied to layer instances at the five different
-lifecycle events, if custom cookbooks are enabled on the layer's stack:
-
-* `custom_configure_recipes`
-* `custom_deploy_recipes`
-* `custom_setup_recipes`
-* `custom_shutdown_recipes`
-* `custom_undeploy_recipes`
-
-An `ebs_volume` block supports the following arguments:
-
-* `mount_point` - (Required) The path to mount the EBS volume on the layer's instances.
-* `size` - (Required) The size of the volume in gigabytes.
-* `number_of_disks` - (Required) The number of disks to use for the EBS volume.
-* `raid_level` - (Required) The RAID level to use for the volume.
-* `type` - (Optional) The type of volume to create. This may be `standard` (the default), `io1` or `gp2`.
-* `iops` - (Optional) For PIOPS volumes, the IOPS per disk.
-
-## Attributes Reference
-
-The following attributes are exported:
-
-* `id` - The id of the layer.
diff --git a/website/source/docs/providers/aws/r/opsworks_haproxy_layer.html.markdown b/website/source/docs/providers/aws/r/opsworks_haproxy_layer.html.markdown
deleted file mode 100644
index ca0427b8a..000000000
--- a/website/source/docs/providers/aws/r/opsworks_haproxy_layer.html.markdown
+++ /dev/null
@@ -1,71 +0,0 @@
----
-layout: "aws"
-page_title: "AWS: aws_opsworks_haproxy_layer"
-sidebar_current: "docs-aws-resource-opsworks-haproxy-layer"
-description: |-
- Provides an OpsWorks HAProxy layer resource.
----
-
-# aws\_opsworks\_haproxy\_layer
-
-Provides an OpsWorks haproxy layer resource.
-
-## Example Usage
-
-```hcl
-resource "aws_opsworks_haproxy_layer" "lb" {
- stack_id = "${aws_opsworks_stack.main.id}"
- stats_password = "foobarbaz"
-}
-```
-
-## Argument Reference
-
-The following arguments are supported:
-
-* `stack_id` - (Required) The id of the stack the layer will belong to.
-* `stats_password` - (Required) The password to use for HAProxy stats.
-* `name` - (Optional) A human-readable name for the layer.
-* `auto_assign_elastic_ips` - (Optional) Whether to automatically assign an elastic IP address to the layer's instances.
-* `auto_assign_public_ips` - (Optional) For stacks belonging to a VPC, whether to automatically assign a public IP address to each of the layer's instances.
-* `custom_instance_profile_arn` - (Optional) The ARN of an IAM profile that will be used for the layer's instances.
-* `custom_security_group_ids` - (Optional) Ids for a set of security groups to apply to the layer's instances.
-* `auto_healing` - (Optional) Whether to enable auto-healing for the layer.
-* `healthcheck_method` - (Optional) HTTP method to use for instance healthchecks. Defaults to "OPTIONS".
-* `healthcheck_url` - (Optional) URL path to use for instance healthchecks. Defaults to "/".
-* `install_updates_on_boot` - (Optional) Whether to install OS and package updates on each instance when it boots.
-* `instance_shutdown_timeout` - (Optional) The time, in seconds, that OpsWorks will wait for Chef to complete after triggering the Shutdown event.
-* `elastic_load_balancer` - (Optional) Name of an Elastic Load Balancer to attach to this layer
-* `drain_elb_on_shutdown` - (Optional) Whether to enable Elastic Load Balancing connection draining.
-* `stats_enabled` - (Optional) Whether to enable HAProxy stats.
-* `stats_url` - (Optional) The HAProxy stats URL. Defaults to "/haproxy?stats".
-* `stats_user` - (Optional) The username for HAProxy stats. Defaults to "opsworks".
-* `system_packages` - (Optional) Names of a set of system packages to install on the layer's instances.
-* `use_ebs_optimized_instances` - (Optional) Whether to use EBS-optimized instances.
-* `ebs_volume` - (Optional) `ebs_volume` blocks, as described below, will each create an EBS volume and connect it to the layer's instances.
-* `custom_json` - (Optional) Custom JSON attributes to apply to the layer.
-
-The following extra optional arguments, all lists of Chef recipe names, allow
-custom Chef recipes to be applied to layer instances at the five different
-lifecycle events, if custom cookbooks are enabled on the layer's stack:
-
-* `custom_configure_recipes`
-* `custom_deploy_recipes`
-* `custom_setup_recipes`
-* `custom_shutdown_recipes`
-* `custom_undeploy_recipes`
-
-An `ebs_volume` block supports the following arguments:
-
-* `mount_point` - (Required) The path to mount the EBS volume on the layer's instances.
-* `size` - (Required) The size of the volume in gigabytes.
-* `number_of_disks` - (Required) The number of disks to use for the EBS volume.
-* `raid_level` - (Required) The RAID level to use for the volume.
-* `type` - (Optional) The type of volume to create. This may be `standard` (the default), `io1` or `gp2`.
-* `iops` - (Optional) For PIOPS volumes, the IOPS per disk.
-
-## Attributes Reference
-
-The following attributes are exported:
-
-* `id` - The id of the layer.
diff --git a/website/source/docs/providers/aws/r/opsworks_instance.html.markdown b/website/source/docs/providers/aws/r/opsworks_instance.html.markdown
deleted file mode 100644
index fa06aff52..000000000
--- a/website/source/docs/providers/aws/r/opsworks_instance.html.markdown
+++ /dev/null
@@ -1,142 +0,0 @@
----
-layout: "aws"
-page_title: "AWS: aws_opsworks_instance"
-sidebar_current: "docs-aws-resource-opsworks-instance"
-description: |-
- Provides an OpsWorks instance resource.
----
-
-# aws\_opsworks\_instance
-
-Provides an OpsWorks instance resource.
-
-## Example Usage
-
-```hcl
-resource "aws_opsworks_instance" "my-instance" {
- stack_id = "${aws_opsworks_stack.my-stack.id}"
-
- layer_ids = [
- "${aws_opsworks_custom_layer.my-layer.id}",
- ]
-
- instance_type = "t2.micro"
- os = "Amazon Linux 2015.09"
- state = "stopped"
-}
-```
-
-## Argument Reference
-
-The following arguments are supported:
-
-* `instance_type` - (Required) The type of instance to start
-* `stack_id` - (Required) The id of the stack the instance will belong to.
-* `layer_ids` - (Required) The ids of the layers the instance will belong to.
-* `state` - (Optional) The desired state of the instance. Can be either `"running"` or `"stopped"`.
-* `install_updates_on_boot` - (Optional) Controls where to install OS and package updates when the instance boots. Defaults to `true`.
-* `auto_scaling_type` - (Optional) Creates load-based or time-based instances. If set, can be either: `"load"` or `"timer"`.
-* `availability_zone` - (Optional) Name of the availability zone where instances will be created
- by default.
-* `ebs_optimized` - (Optional) If true, the launched EC2 instance will be EBS-optimized.
-* `hostname` - (Optional) The instance's host name.
-* `architecture` - (Optional) Machine architecture for created instances. Can be either `"x86_64"` (the default) or `"i386"`
-* `ami_id` - (Optional) The AMI to use for the instance. If an AMI is specified, `os` must be `"Custom"`.
-* `os` - (Optional) Name of operating system that will be installed.
-* `root_device_type` - (Optional) Name of the type of root device instances will have by default. Can be either `"ebs"` or `"instance-store"`
-* `ssh_key_name` - (Optional) Name of the SSH keypair that instances will have by default.
-* `agent_version` - (Optional) The AWS OpsWorks agent to install. Defaults to `"INHERIT"`.
-* `subnet_id` - (Optional) Subnet ID to attach to
-* `tenancy` - (Optional) Instance tenancy to use. Can be one of `"default"`, `"dedicated"` or `"host"`
-* `virtualization_type` - (Optional) Keyword to choose what virtualization mode created instances
- will use. Can be either `"paravirtual"` or `"hvm"`.
-* `root_block_device` - (Optional) Customize details about the root block
- device of the instance. See [Block Devices](#block-devices) below for details.
-* `ebs_block_device` - (Optional) Additional EBS block devices to attach to the
- instance. See [Block Devices](#block-devices) below for details.
-* `ephemeral_block_device` - (Optional) Customize Ephemeral (also known as
- "Instance Store") volumes on the instance. See [Block Devices](#block-devices) below for details.
-
-
-## Block devices
-
-Each of the `*_block_device` attributes controls a portion of the AWS
-Instance's "Block Device Mapping". It's a good idea to familiarize yourself with [AWS's Block Device
-Mapping docs](http://docs.aws.amazon.com/AWSEC2/latest/UserGuide/block-device-mapping-concepts.html)
-to understand the implications of using these attributes.
-
-The `root_block_device` mapping supports the following:
-
-* `volume_type` - (Optional) The type of volume. Can be `"standard"`, `"gp2"`,
- or `"io1"`. (Default: `"standard"`).
-* `volume_size` - (Optional) The size of the volume in gigabytes.
-* `iops` - (Optional) The amount of provisioned
- [IOPS](http://docs.aws.amazon.com/AWSEC2/latest/UserGuide/ebs-io-characteristics.html).
- This must be set with a `volume_type` of `"io1"`.
-* `delete_on_termination` - (Optional) Whether the volume should be destroyed
- on instance termination (Default: `true`).
-
-Modifying any of the `root_block_device` settings requires resource
-replacement.
-
-Each `ebs_block_device` supports the following:
-
-* `device_name` - The name of the device to mount.
-* `snapshot_id` - (Optional) The Snapshot ID to mount.
-* `volume_type` - (Optional) The type of volume. Can be `"standard"`, `"gp2"`,
- or `"io1"`. (Default: `"standard"`).
-* `volume_size` - (Optional) The size of the volume in gigabytes.
-* `iops` - (Optional) The amount of provisioned
- [IOPS](http://docs.aws.amazon.com/AWSEC2/latest/UserGuide/ebs-io-characteristics.html).
- This must be set with a `volume_type` of `"io1"`.
-* `delete_on_termination` - (Optional) Whether the volume should be destroyed
- on instance termination (Default: `true`).
-
-Modifying any `ebs_block_device` currently requires resource replacement.
-
-Each `ephemeral_block_device` supports the following:
-
-* `device_name` - The name of the block device to mount on the instance.
-* `virtual_name` - The [Instance Store Device
- Name](http://docs.aws.amazon.com/AWSEC2/latest/UserGuide/InstanceStorage.html#InstanceStoreDeviceNames)
- (e.g. `"ephemeral0"`)
-
-Each AWS Instance type has a different set of Instance Store block devices
-available for attachment. AWS [publishes a
-list](http://docs.aws.amazon.com/AWSEC2/latest/UserGuide/InstanceStorage.html#StorageOnInstanceTypes)
-of which ephemeral devices are available on each type. The devices are always
-identified by the `virtual_name` in the format `"ephemeral{0..N}"`.
-
-~> **NOTE:** Currently, changes to `*_block_device` configuration of _existing_
-resources cannot be automatically detected by Terraform. After making updates
-to block device configuration, resource recreation can be manually triggered by
-using the [`taint` command](/docs/commands/taint.html).
-
-
-## Attributes Reference
-
-The following attributes are exported:
-
-* `id` - The id of the OpsWorks instance.
-* `agent_version` - The AWS OpsWorks agent version.
-* `availability_zone` - The availability zone of the instance.
-* `ssh_key_name` - The key name of the instance
-* `public_dns` - The public DNS name assigned to the instance. For EC2-VPC, this
- is only available if you've enabled DNS hostnames for your VPC
-* `public_ip` - The public IP address assigned to the instance, if applicable.
-* `private_dns` - The private DNS name assigned to the instance. Can only be
- used inside the Amazon EC2, and only available if you've enabled DNS hostnames
- for your VPC
-* `private_ip` - The private IP address assigned to the instance
-* `subnet_id` - The VPC subnet ID.
-* `tenancy` - The Instance tenancy
-* `security_group_ids` - The associated security groups.
-
-## Import
-
-Opsworks Instances can be imported using the `instance id`, e.g.
-
-```
-$ terraform import aws_opsworks_instance.my_instance 4d6d1710-ded9-42a1-b08e-b043ad7af1e2
-```
-
diff --git a/website/source/docs/providers/aws/r/opsworks_java_app_layer.html.markdown b/website/source/docs/providers/aws/r/opsworks_java_app_layer.html.markdown
deleted file mode 100644
index 25b680175..000000000
--- a/website/source/docs/providers/aws/r/opsworks_java_app_layer.html.markdown
+++ /dev/null
@@ -1,69 +0,0 @@
----
-layout: "aws"
-page_title: "AWS: aws_opsworks_java_app_layer"
-sidebar_current: "docs-aws-resource-opsworks-java-app-layer"
-description: |-
- Provides an OpsWorks Java application layer resource.
----
-
-# aws\_opsworks\_java\_app\_layer
-
-Provides an OpsWorks Java application layer resource.
-
-## Example Usage
-
-```hcl
-resource "aws_opsworks_java_app_layer" "app" {
- stack_id = "${aws_opsworks_stack.main.id}"
-}
-```
-
-## Argument Reference
-
-The following arguments are supported:
-
-* `stack_id` - (Required) The id of the stack the layer will belong to.
-* `name` - (Optional) A human-readable name for the layer.
-* `app_server` - (Optional) Keyword for the application container to use. Defaults to "tomcat".
-* `app_server_version` - (Optional) Version of the selected application container to use. Defaults to "7".
-* `auto_assign_elastic_ips` - (Optional) Whether to automatically assign an elastic IP address to the layer's instances.
-* `auto_assign_public_ips` - (Optional) For stacks belonging to a VPC, whether to automatically assign a public IP address to each of the layer's instances.
-* `custom_instance_profile_arn` - (Optional) The ARN of an IAM profile that will be used for the layer's instances.
-* `custom_security_group_ids` - (Optional) Ids for a set of security groups to apply to the layer's instances.
-* `auto_healing` - (Optional) Whether to enable auto-healing for the layer.
-* `install_updates_on_boot` - (Optional) Whether to install OS and package updates on each instance when it boots.
-* `instance_shutdown_timeout` - (Optional) The time, in seconds, that OpsWorks will wait for Chef to complete after triggering the Shutdown event.
-* `jvm_type` - (Optional) Keyword for the type of JVM to use. Defaults to `openjdk`.
-* `jvm_options` - (Optional) Options to set for the JVM.
-* `jvm_version` - (Optional) Version of JVM to use. Defaults to "7".
-* `elastic_load_balancer` - (Optional) Name of an Elastic Load Balancer to attach to this layer
-* `drain_elb_on_shutdown` - (Optional) Whether to enable Elastic Load Balancing connection draining.
-* `system_packages` - (Optional) Names of a set of system packages to install on the layer's instances.
-* `use_ebs_optimized_instances` - (Optional) Whether to use EBS-optimized instances.
-* `ebs_volume` - (Optional) `ebs_volume` blocks, as described below, will each create an EBS volume and connect it to the layer's instances.
-* `custom_json` - (Optional) Custom JSON attributes to apply to the layer.
-
-The following extra optional arguments, all lists of Chef recipe names, allow
-custom Chef recipes to be applied to layer instances at the five different
-lifecycle events, if custom cookbooks are enabled on the layer's stack:
-
-* `custom_configure_recipes`
-* `custom_deploy_recipes`
-* `custom_setup_recipes`
-* `custom_shutdown_recipes`
-* `custom_undeploy_recipes`
-
-An `ebs_volume` block supports the following arguments:
-
-* `mount_point` - (Required) The path to mount the EBS volume on the layer's instances.
-* `size` - (Required) The size of the volume in gigabytes.
-* `number_of_disks` - (Required) The number of disks to use for the EBS volume.
-* `raid_level` - (Required) The RAID level to use for the volume.
-* `type` - (Optional) The type of volume to create. This may be `standard` (the default), `io1` or `gp2`.
-* `iops` - (Optional) For PIOPS volumes, the IOPS per disk.
-
-## Attributes Reference
-
-The following attributes are exported:
-
-* `id` - The id of the layer.
diff --git a/website/source/docs/providers/aws/r/opsworks_memcached_layer.html.markdown b/website/source/docs/providers/aws/r/opsworks_memcached_layer.html.markdown
deleted file mode 100644
index b89172fb5..000000000
--- a/website/source/docs/providers/aws/r/opsworks_memcached_layer.html.markdown
+++ /dev/null
@@ -1,65 +0,0 @@
----
-layout: "aws"
-page_title: "AWS: aws_opsworks_memcached_layer"
-sidebar_current: "docs-aws-resource-opsworks-memcached-layer"
-description: |-
- Provides an OpsWorks memcached layer resource.
----
-
-# aws\_opsworks\_memcached\_layer
-
-Provides an OpsWorks memcached layer resource.
-
-## Example Usage
-
-```hcl
-resource "aws_opsworks_memcached_layer" "cache" {
- stack_id = "${aws_opsworks_stack.main.id}"
-}
-```
-
-## Argument Reference
-
-The following arguments are supported:
-
-* `stack_id` - (Required) The id of the stack the layer will belong to.
-* `name` - (Optional) A human-readable name for the layer.
-* `allocated_memory` - (Optional) Amount of memory to allocate for the cache on each instance, in megabytes. Defaults to 512MB.
-* `auto_assign_elastic_ips` - (Optional) Whether to automatically assign an elastic IP address to the layer's instances.
-* `auto_assign_public_ips` - (Optional) For stacks belonging to a VPC, whether to automatically assign a public IP address to each of the layer's instances.
-* `custom_instance_profile_arn` - (Optional) The ARN of an IAM profile that will be used for the layer's instances.
-* `custom_security_group_ids` - (Optional) Ids for a set of security groups to apply to the layer's instances.
-* `auto_healing` - (Optional) Whether to enable auto-healing for the layer.
-* `install_updates_on_boot` - (Optional) Whether to install OS and package updates on each instance when it boots.
-* `instance_shutdown_timeout` - (Optional) The time, in seconds, that OpsWorks will wait for Chef to complete after triggering the Shutdown event.
-* `elastic_load_balancer` - (Optional) Name of an Elastic Load Balancer to attach to this layer
-* `drain_elb_on_shutdown` - (Optional) Whether to enable Elastic Load Balancing connection draining.
-* `system_packages` - (Optional) Names of a set of system packages to install on the layer's instances.
-* `use_ebs_optimized_instances` - (Optional) Whether to use EBS-optimized instances.
-* `ebs_volume` - (Optional) `ebs_volume` blocks, as described below, will each create an EBS volume and connect it to the layer's instances.
-* `custom_json` - (Optional) Custom JSON attributes to apply to the layer.
-
-The following extra optional arguments, all lists of Chef recipe names, allow
-custom Chef recipes to be applied to layer instances at the five different
-lifecycle events, if custom cookbooks are enabled on the layer's stack:
-
-* `custom_configure_recipes`
-* `custom_deploy_recipes`
-* `custom_setup_recipes`
-* `custom_shutdown_recipes`
-* `custom_undeploy_recipes`
-
-An `ebs_volume` block supports the following arguments:
-
-* `mount_point` - (Required) The path to mount the EBS volume on the layer's instances.
-* `size` - (Required) The size of the volume in gigabytes.
-* `number_of_disks` - (Required) The number of disks to use for the EBS volume.
-* `raid_level` - (Required) The RAID level to use for the volume.
-* `type` - (Optional) The type of volume to create. This may be `standard` (the default), `io1` or `gp2`.
-* `iops` - (Optional) For PIOPS volumes, the IOPS per disk.
-
-## Attributes Reference
-
-The following attributes are exported:
-
-* `id` - The id of the layer.
diff --git a/website/source/docs/providers/aws/r/opsworks_mysql_layer.html.markdown b/website/source/docs/providers/aws/r/opsworks_mysql_layer.html.markdown
deleted file mode 100644
index df9bad717..000000000
--- a/website/source/docs/providers/aws/r/opsworks_mysql_layer.html.markdown
+++ /dev/null
@@ -1,69 +0,0 @@
----
-layout: "aws"
-page_title: "AWS: aws_opsworks_mysql_layer"
-sidebar_current: "docs-aws-resource-opsworks-mysql-layer"
-description: |-
- Provides an OpsWorks MySQL layer resource.
----
-
-# aws\_opsworks\_mysql\_layer
-
-Provides an OpsWorks MySQL layer resource.
-
-~> **Note:** All arguments including the root password will be stored in the raw state as plain-text.
-[Read more about sensitive data in state](/docs/state/sensitive-data.html).
-
-## Example Usage
-
-```hcl
-resource "aws_opsworks_mysql_layer" "db" {
- stack_id = "${aws_opsworks_stack.main.id}"
-}
-```
-
-## Argument Reference
-
-The following arguments are supported:
-
-* `stack_id` - (Required) The id of the stack the layer will belong to.
-* `name` - (Optional) A human-readable name for the layer.
-* `auto_assign_elastic_ips` - (Optional) Whether to automatically assign an elastic IP address to the layer's instances.
-* `auto_assign_public_ips` - (Optional) For stacks belonging to a VPC, whether to automatically assign a public IP address to each of the layer's instances.
-* `custom_instance_profile_arn` - (Optional) The ARN of an IAM profile that will be used for the layer's instances.
-* `custom_security_group_ids` - (Optional) Ids for a set of security groups to apply to the layer's instances.
-* `auto_healing` - (Optional) Whether to enable auto-healing for the layer.
-* `install_updates_on_boot` - (Optional) Whether to install OS and package updates on each instance when it boots.
-* `instance_shutdown_timeout` - (Optional) The time, in seconds, that OpsWorks will wait for Chef to complete after triggering the Shutdown event.
-* `elastic_load_balancer` - (Optional) Name of an Elastic Load Balancer to attach to this layer
-* `drain_elb_on_shutdown` - (Optional) Whether to enable Elastic Load Balancing connection draining.
-* `root_password` - (Optional) Root password to use for MySQL.
-* `root_password_on_all_instances` - (Optional) Whether to set the root user password to all instances in the stack so they can access the instances in this layer.
-* `system_packages` - (Optional) Names of a set of system packages to install on the layer's instances.
-* `use_ebs_optimized_instances` - (Optional) Whether to use EBS-optimized instances.
-* `ebs_volume` - (Optional) `ebs_volume` blocks, as described below, will each create an EBS volume and connect it to the layer's instances.
-* `custom_json` - (Optional) Custom JSON attributes to apply to the layer.
-
-The following extra optional arguments, all lists of Chef recipe names, allow
-custom Chef recipes to be applied to layer instances at the five different
-lifecycle events, if custom cookbooks are enabled on the layer's stack:
-
-* `custom_configure_recipes`
-* `custom_deploy_recipes`
-* `custom_setup_recipes`
-* `custom_shutdown_recipes`
-* `custom_undeploy_recipes`
-
-An `ebs_volume` block supports the following arguments:
-
-* `mount_point` - (Required) The path to mount the EBS volume on the layer's instances.
-* `size` - (Required) The size of the volume in gigabytes.
-* `number_of_disks` - (Required) The number of disks to use for the EBS volume.
-* `raid_level` - (Required) The RAID level to use for the volume.
-* `type` - (Optional) The type of volume to create. This may be `standard` (the default), `io1` or `gp2`.
-* `iops` - (Optional) For PIOPS volumes, the IOPS per disk.
-
-## Attributes Reference
-
-The following attributes are exported:
-
-* `id` - The id of the layer.
diff --git a/website/source/docs/providers/aws/r/opsworks_nodejs_app_layer.html.markdown b/website/source/docs/providers/aws/r/opsworks_nodejs_app_layer.html.markdown
deleted file mode 100644
index f5e52ac78..000000000
--- a/website/source/docs/providers/aws/r/opsworks_nodejs_app_layer.html.markdown
+++ /dev/null
@@ -1,65 +0,0 @@
----
-layout: "aws"
-page_title: "AWS: aws_opsworks_nodejs_app_layer"
-sidebar_current: "docs-aws-resource-opsworks-nodejs-app-layer"
-description: |-
- Provides an OpsWorks NodeJS application layer resource.
----
-
-# aws\_opsworks\_nodejs\_app\_layer
-
-Provides an OpsWorks NodeJS application layer resource.
-
-## Example Usage
-
-```hcl
-resource "aws_opsworks_nodejs_app_layer" "app" {
- stack_id = "${aws_opsworks_stack.main.id}"
-}
-```
-
-## Argument Reference
-
-The following arguments are supported:
-
-* `stack_id` - (Required) The id of the stack the layer will belong to.
-* `name` - (Optional) A human-readable name for the layer.
-* `auto_assign_elastic_ips` - (Optional) Whether to automatically assign an elastic IP address to the layer's instances.
-* `auto_assign_public_ips` - (Optional) For stacks belonging to a VPC, whether to automatically assign a public IP address to each of the layer's instances.
-* `custom_instance_profile_arn` - (Optional) The ARN of an IAM profile that will be used for the layer's instances.
-* `custom_security_group_ids` - (Optional) Ids for a set of security groups to apply to the layer's instances.
-* `auto_healing` - (Optional) Whether to enable auto-healing for the layer.
-* `install_updates_on_boot` - (Optional) Whether to install OS and package updates on each instance when it boots.
-* `instance_shutdown_timeout` - (Optional) The time, in seconds, that OpsWorks will wait for Chef to complete after triggering the Shutdown event.
-* `elastic_load_balancer` - (Optional) Name of an Elastic Load Balancer to attach to this layer
-* `drain_elb_on_shutdown` - (Optional) Whether to enable Elastic Load Balancing connection draining.
-* `nodejs_version` - (Optional) The version of NodeJS to use. Defaults to "0.10.38".
-* `system_packages` - (Optional) Names of a set of system packages to install on the layer's instances.
-* `use_ebs_optimized_instances` - (Optional) Whether to use EBS-optimized instances.
-* `ebs_volume` - (Optional) `ebs_volume` blocks, as described below, will each create an EBS volume and connect it to the layer's instances.
-* `custom_json` - (Optional) Custom JSON attributes to apply to the layer.
-
-The following extra optional arguments, all lists of Chef recipe names, allow
-custom Chef recipes to be applied to layer instances at the five different
-lifecycle events, if custom cookbooks are enabled on the layer's stack:
-
-* `custom_configure_recipes`
-* `custom_deploy_recipes`
-* `custom_setup_recipes`
-* `custom_shutdown_recipes`
-* `custom_undeploy_recipes`
-
-An `ebs_volume` block supports the following arguments:
-
-* `mount_point` - (Required) The path to mount the EBS volume on the layer's instances.
-* `size` - (Required) The size of the volume in gigabytes.
-* `number_of_disks` - (Required) The number of disks to use for the EBS volume.
-* `raid_level` - (Required) The RAID level to use for the volume.
-* `type` - (Optional) The type of volume to create. This may be `standard` (the default), `io1` or `gp2`.
-* `iops` - (Optional) For PIOPS volumes, the IOPS per disk.
-
-## Attributes Reference
-
-The following attributes are exported:
-
-* `id` - The id of the layer.
diff --git a/website/source/docs/providers/aws/r/opsworks_permission.html.markdown b/website/source/docs/providers/aws/r/opsworks_permission.html.markdown
deleted file mode 100644
index 408e2538f..000000000
--- a/website/source/docs/providers/aws/r/opsworks_permission.html.markdown
+++ /dev/null
@@ -1,39 +0,0 @@
----
-layout: "aws"
-page_title: "AWS: aws_opsworks_permission"
-sidebar_current: "docs-aws-resource-opsworks-permission"
-description: |-
- Provides an OpsWorks permission resource.
----
-
-# aws\_opsworks\_permission
-
-Provides an OpsWorks permission resource.
-
-## Example Usage
-
-```hcl
-resource "aws_opsworks_permission" "my_stack_permission" {
- allow_ssh = true
- allow_sudo = true
- level = "iam_only"
- user_arn = "${aws_iam_user.user.arn}"
- stack_id = "${aws_opsworks_stack.stack.id}"
-}
-```
-
-## Argument Reference
-
-The following arguments are supported:
-
-* `allow_ssh` - (Optional) Whether the user is allowed to use SSH to communicate with the instance
-* `allow_sudo` - (Optional) Whether the user is allowed to use sudo to elevate privileges
-* `user_arn` - (Required) The user's IAM ARN to set permissions for
-* `level` - (Optional) The users permission level. Mus be one of `deny`, `show`, `deploy`, `manage`, `iam_only`
-* `stack_id` - (Required) The stack to set the permissions for
-
-## Attributes Reference
-
-The following attributes are exported:
-
-* `id` - The computed id of the permission. Please note that this is only used internally to identify the permission. This value is not used in aws.
diff --git a/website/source/docs/providers/aws/r/opsworks_php_app_layer.html.markdown b/website/source/docs/providers/aws/r/opsworks_php_app_layer.html.markdown
deleted file mode 100644
index 963684f87..000000000
--- a/website/source/docs/providers/aws/r/opsworks_php_app_layer.html.markdown
+++ /dev/null
@@ -1,64 +0,0 @@
----
-layout: "aws"
-page_title: "AWS: aws_opsworks_php_app_layer"
-sidebar_current: "docs-aws-resource-opsworks-php-app-layer"
-description: |-
- Provides an OpsWorks PHP application layer resource.
----
-
-# aws\_opsworks\_php\_app\_layer
-
-Provides an OpsWorks PHP application layer resource.
-
-## Example Usage
-
-```hcl
-resource "aws_opsworks_php_app_layer" "app" {
- stack_id = "${aws_opsworks_stack.main.id}"
-}
-```
-
-## Argument Reference
-
-The following arguments are supported:
-
-* `stack_id` - (Required) The id of the stack the layer will belong to.
-* `name` - (Optional) A human-readable name for the layer.
-* `auto_assign_elastic_ips` - (Optional) Whether to automatically assign an elastic IP address to the layer's instances.
-* `auto_assign_public_ips` - (Optional) For stacks belonging to a VPC, whether to automatically assign a public IP address to each of the layer's instances.
-* `custom_instance_profile_arn` - (Optional) The ARN of an IAM profile that will be used for the layer's instances.
-* `custom_security_group_ids` - (Optional) Ids for a set of security groups to apply to the layer's instances.
-* `auto_healing` - (Optional) Whether to enable auto-healing for the layer.
-* `install_updates_on_boot` - (Optional) Whether to install OS and package updates on each instance when it boots.
-* `instance_shutdown_timeout` - (Optional) The time, in seconds, that OpsWorks will wait for Chef to complete after triggering the Shutdown event.
-* `elastic_load_balancer` - (Optional) Name of an Elastic Load Balancer to attach to this layer
-* `drain_elb_on_shutdown` - (Optional) Whether to enable Elastic Load Balancing connection draining.
-* `system_packages` - (Optional) Names of a set of system packages to install on the layer's instances.
-* `use_ebs_optimized_instances` - (Optional) Whether to use EBS-optimized instances.
-* `ebs_volume` - (Optional) `ebs_volume` blocks, as described below, will each create an EBS volume and connect it to the layer's instances.
-* `custom_json` - (Optional) Custom JSON attributes to apply to the layer.
-
-The following extra optional arguments, all lists of Chef recipe names, allow
-custom Chef recipes to be applied to layer instances at the five different
-lifecycle events, if custom cookbooks are enabled on the layer's stack:
-
-* `custom_configure_recipes`
-* `custom_deploy_recipes`
-* `custom_setup_recipes`
-* `custom_shutdown_recipes`
-* `custom_undeploy_recipes`
-
-An `ebs_volume` block supports the following arguments:
-
-* `mount_point` - (Required) The path to mount the EBS volume on the layer's instances.
-* `size` - (Required) The size of the volume in gigabytes.
-* `number_of_disks` - (Required) The number of disks to use for the EBS volume.
-* `raid_level` - (Required) The RAID level to use for the volume.
-* `type` - (Optional) The type of volume to create. This may be `standard` (the default), `io1` or `gp2`.
-* `iops` - (Optional) For PIOPS volumes, the IOPS per disk.
-
-## Attributes Reference
-
-The following attributes are exported:
-
-* `id` - The id of the layer.
diff --git a/website/source/docs/providers/aws/r/opsworks_rails_app_layer.html.markdown b/website/source/docs/providers/aws/r/opsworks_rails_app_layer.html.markdown
deleted file mode 100644
index 84cd69d6c..000000000
--- a/website/source/docs/providers/aws/r/opsworks_rails_app_layer.html.markdown
+++ /dev/null
@@ -1,70 +0,0 @@
----
-layout: "aws"
-page_title: "AWS: aws_opsworks_rails_app_layer"
-sidebar_current: "docs-aws-resource-opsworks-rails-app-layer"
-description: |-
- Provides an OpsWorks Ruby on Rails application layer resource.
----
-
-# aws\_opsworks\_rails\_app\_layer
-
-Provides an OpsWorks Ruby on Rails application layer resource.
-
-## Example Usage
-
-```hcl
-resource "aws_opsworks_rails_app_layer" "app" {
- stack_id = "${aws_opsworks_stack.main.id}"
-}
-```
-
-## Argument Reference
-
-The following arguments are supported:
-
-* `stack_id` - (Required) The id of the stack the layer will belong to.
-* `name` - (Optional) A human-readable name for the layer.
-* `app_server` - (Optional) Keyword for the app server to use. Defaults to "apache_passenger".
-* `auto_assign_elastic_ips` - (Optional) Whether to automatically assign an elastic IP address to the layer's instances.
-* `auto_assign_public_ips` - (Optional) For stacks belonging to a VPC, whether to automatically assign a public IP address to each of the layer's instances.
-* `bundler_version` - (Optional) When OpsWorks is managing Bundler, which version to use. Defaults to "1.5.3".
-* `custom_instance_profile_arn` - (Optional) The ARN of an IAM profile that will be used for the layer's instances.
-* `custom_security_group_ids` - (Optional) Ids for a set of security groups to apply to the layer's instances.
-* `auto_healing` - (Optional) Whether to enable auto-healing for the layer.
-* `install_updates_on_boot` - (Optional) Whether to install OS and package updates on each instance when it boots.
-* `instance_shutdown_timeout` - (Optional) The time, in seconds, that OpsWorks will wait for Chef to complete after triggering the Shutdown event.
-* `elastic_load_balancer` - (Optional) Name of an Elastic Load Balancer to attach to this layer
-* `drain_elb_on_shutdown` - (Optional) Whether to enable Elastic Load Balancing connection draining.
-* `manage_bundler` - (Optional) Whether OpsWorks should manage bundler. On by default.
-* `passenger_version` - (Optional) The version of Passenger to use. Defaults to "4.0.46".
-* `ruby_version` - (Optional) The version of Ruby to use. Defaults to "2.0.0".
-* `rubygems_version` - (Optional) The version of RubyGems to use. Defaults to "2.2.2".
-* `system_packages` - (Optional) Names of a set of system packages to install on the layer's instances.
-* `use_ebs_optimized_instances` - (Optional) Whether to use EBS-optimized instances.
-* `ebs_volume` - (Optional) `ebs_volume` blocks, as described below, will each create an EBS volume and connect it to the layer's instances.
-* `custom_json` - (Optional) Custom JSON attributes to apply to the layer.
-
-The following extra optional arguments, all lists of Chef recipe names, allow
-custom Chef recipes to be applied to layer instances at the five different
-lifecycle events, if custom cookbooks are enabled on the layer's stack:
-
-* `custom_configure_recipes`
-* `custom_deploy_recipes`
-* `custom_setup_recipes`
-* `custom_shutdown_recipes`
-* `custom_undeploy_recipes`
-
-An `ebs_volume` block supports the following arguments:
-
-* `mount_point` - (Required) The path to mount the EBS volume on the layer's instances.
-* `size` - (Required) The size of the volume in gigabytes.
-* `number_of_disks` - (Required) The number of disks to use for the EBS volume.
-* `raid_level` - (Required) The RAID level to use for the volume.
-* `type` - (Optional) The type of volume to create. This may be `standard` (the default), `io1` or `gp2`.
-* `iops` - (Optional) For PIOPS volumes, the IOPS per disk.
-
-## Attributes Reference
-
-The following attributes are exported:
-
-* `id` - The id of the layer.
diff --git a/website/source/docs/providers/aws/r/opsworks_rds_db_instance.html.markdown b/website/source/docs/providers/aws/r/opsworks_rds_db_instance.html.markdown
deleted file mode 100644
index 496d253c0..000000000
--- a/website/source/docs/providers/aws/r/opsworks_rds_db_instance.html.markdown
+++ /dev/null
@@ -1,40 +0,0 @@
----
-layout: "aws"
-page_title: "AWS: aws_opsworks_rds_db_instance"
-sidebar_current: "docs-aws-resource-opsworks-rds-db-instance"
-description: |-
- Provides an OpsWorks RDS DB Instance resource.
----
-
-# aws\_opsworks\_rds\_db\_instance
-
-Provides an OpsWorks RDS DB Instance resource.
-
-~> **Note:** All arguments including the username and password will be stored in the raw state as plain-text.
-[Read more about sensitive data in state](/docs/state/sensitive-data.html).
-
-## Example Usage
-
-```hcl
-resource "aws_opsworks_rds_db_instance" "my_instance" {
- stack_id = "${aws_opsworks_stack.my_stack.id}"
- rds_db_instance_arn = "${aws_db_instance.my_instance.arn}"
- db_user = "someUser"
- db_password = "somePass"
-}
-```
-
-## Argument Reference
-
-The following arguments are supported:
-
-* `stack_id` - (Required) The stack to register a db inatance for. Changing this will force a new resource.
-* `rds_db_instance_arn` - (Required) The db instance to register for this stack. Changing this will force a new resource.
-* `db_user` - (Required) A db username
-* `db_password` - (Required) A db password
-
-## Attributes Reference
-
-The following attributes are exported:
-
-* `id` - The computed id. Please note that this is only used internally to identify the stack <-> instance relation. This value is not used in aws.
diff --git a/website/source/docs/providers/aws/r/opsworks_stack.html.markdown b/website/source/docs/providers/aws/r/opsworks_stack.html.markdown
deleted file mode 100644
index f9cceee7f..000000000
--- a/website/source/docs/providers/aws/r/opsworks_stack.html.markdown
+++ /dev/null
@@ -1,87 +0,0 @@
----
-layout: "aws"
-page_title: "AWS: aws_opsworks_stack"
-sidebar_current: "docs-aws-resource-opsworks-stack"
-description: |-
- Provides an OpsWorks stack resource.
----
-
-# aws\_opsworks\_stack
-
-Provides an OpsWorks stack resource.
-
-## Example Usage
-
-```hcl
-resource "aws_opsworks_stack" "main" {
- name = "awesome-stack"
- region = "us-west-1"
- service_role_arn = "${aws_iam_role.opsworks.arn}"
- default_instance_profile_arn = "${aws_iam_instance_profile.opsworks.arn}"
-
- custom_json = < **Note:** using `apply_immediately` can result in a
-brief downtime as the server reboots. See the AWS Docs on [RDS Maintenance][4]
-for more information.
-
-~> **Note:** All arguments including the username and password will be stored in the raw state as plain-text.
-[Read more about sensitive data in state](/docs/state/sensitive-data.html).
-
-## Example Usage
-
-```hcl
-resource "aws_rds_cluster" "default" {
- cluster_identifier = "aurora-cluster-demo"
- availability_zones = ["us-west-2a", "us-west-2b", "us-west-2c"]
- database_name = "mydb"
- master_username = "foo"
- master_password = "bar"
- backup_retention_period = 5
- preferred_backup_window = "07:00-09:00"
-}
-```
-
-~> **NOTE:** RDS Clusters resources that are created without any matching
-RDS Cluster Instances do not currently display in the AWS Console.
-
-## Argument Reference
-
-For more detailed documentation about each argument, refer to
-the [AWS official documentation](https://docs.aws.amazon.com/AmazonRDS/latest/CommandLineReference/CLIReference-cmd-ModifyDBInstance.html).
-
-The following arguments are supported:
-
-* `cluster_identifier` - (Optional, Forces new resources) The cluster identifier. If omitted, Terraform will assign a random, unique identifier.
-* `cluster_identifier_prefix` - (Optional, Forces new resource) Creates a unique cluster identifier beginning with the specified prefix. Conflicts with `cluster_identifer`.
-* `database_name` - (Optional) The name for your database of up to 8 alpha-numeric
- characters. If you do not provide a name, Amazon RDS will not create a
- database in the DB cluster you are creating
-* `master_password` - (Required unless a `snapshot_identifier` is provided) Password for the master DB user. Note that this may
- show up in logs, and it will be stored in the state file
-* `master_username` - (Required unless a `snapshot_identifier` is provided) Username for the master DB user
-* `final_snapshot_identifier` - (Optional) The name of your final DB snapshot
- when this DB cluster is deleted. If omitted, no final snapshot will be
- made.
-* `skip_final_snapshot` - (Optional) Determines whether a final DB snapshot is created before the DB cluster is deleted. If true is specified, no DB snapshot is created. If false is specified, a DB snapshot is created before the DB cluster is deleted, using the value from `final_snapshot_identifier`. Default is `false`.
-* `availability_zones` - (Optional) A list of EC2 Availability Zones that
- instances in the DB cluster can be created in
-* `backup_retention_period` - (Optional) The days to retain backups for. Default
-1
-* `preferred_backup_window` - (Optional) The daily time range during which automated backups are created if automated backups are enabled using the BackupRetentionPeriod parameter.Time in UTC
-Default: A 30-minute window selected at random from an 8-hour block of time per region. e.g. 04:00-09:00
-* `preferred_maintenance_window` - (Optional) The weekly time range during which system maintenance can occur, in (UTC) e.g. wed:04:00-wed:04:30
-* `port` - (Optional) The port on which the DB accepts connections
-* `vpc_security_group_ids` - (Optional) List of VPC security groups to associate
- with the Cluster
-* `snapshot_identifier` - (Optional) Specifies whether or not to create this cluster from a snapshot. This correlates to the snapshot ID you'd find in the RDS console, e.g: rds:production-2015-06-26-06-05.
-* `storage_encrypted` - (Optional) Specifies whether the DB cluster is encrypted. The default is `false` if not specified.
-* `apply_immediately` - (Optional) Specifies whether any cluster modifications
- are applied immediately, or during the next maintenance window. Default is
- `false`. See [Amazon RDS Documentation for more information.](https://docs.aws.amazon.com/AmazonRDS/latest/UserGuide/Overview.DBInstance.Modifying.html)
-* `db_subnet_group_name` - (Optional) A DB subnet group to associate with this DB instance. **NOTE:** This must match the `db_subnet_group_name` specified on every [`aws_rds_cluster_instance`](/docs/providers/aws/r/rds_cluster_instance.html) in the cluster.
-* `db_cluster_parameter_group_name` - (Optional) A cluster parameter group to associate with the cluster.
-* `kms_key_id` - (Optional) The ARN for the KMS encryption key. When specifying `kms_key_id`, `storage_encrypted` needs to be set to true.
-* `iam_database_authentication_enabled` - (Optional) Specifies whether or mappings of AWS Identity and Access Management (IAM) accounts to database accounts is enabled.
-
-## Attributes Reference
-
-The following attributes are exported:
-
-* `id` - The RDS Cluster Identifier
-* `cluster_identifier` - The RDS Cluster Identifier
-* `cluster_resource_id` - The RDS Cluster Resource ID
-* `cluster_members` – List of RDS Instances that are a part of this cluster
-* `allocated_storage` - The amount of allocated storage
-* `availability_zones` - The availability zone of the instance
-* `backup_retention_period` - The backup retention period
-* `preferred_backup_window` - The backup window
-* `preferred_maintenance_window` - The maintenance window
-* `endpoint` - The DNS address of the RDS instance
-* `reader_endpoint` - A read-only endpoint for the Aurora cluster, automatically
-load-balanced across replicas
-* `engine` - The database engine
-* `engine_version` - The database engine version
-* `maintenance_window` - The instance maintenance window
-* `database_name` - The database name
-* `port` - The database port
-* `status` - The RDS instance status
-* `master_username` - The master username for the database
-* `storage_encrypted` - Specifies whether the DB cluster is encrypted
-* `preferred_backup_window` - The daily time range during which the backups happen
-* `replication_source_identifier` - ARN of the source DB cluster if this DB cluster is created as a Read Replica.
-
-[1]: https://docs.aws.amazon.com/AmazonRDS/latest/UserGuide/Overview.Replication.html
-[2]: https://docs.aws.amazon.com/AmazonRDS/latest/UserGuide/CHAP_Aurora.html
-[3]: /docs/providers/aws/r/rds_cluster_instance.html
-[4]: https://docs.aws.amazon.com/AmazonRDS/latest/UserGuide/USER_UpgradeDBInstance.Maintenance.html
-
-## Timeouts
-
-`aws_rds_cluster` provides the following
-[Timeouts](/docs/configuration/resources.html#timeouts) configuration options:
-
-- `create` - (Default `120 minutes`) Used for Cluster creation
-- `update` - (Default `120 minutes`) Used for Cluster modifications
-- `delete` - (Default `120 minutes`) Used for destroying cluster. This includes
-any cleanup task during the destroying process.
-
-## Import
-
-RDS Clusters can be imported using the `cluster_identifier`, e.g.
-
-```
-$ terraform import aws_rds_cluster.aurora_cluster aurora-prod-cluster
-```
diff --git a/website/source/docs/providers/aws/r/rds_cluster_instance.html.markdown b/website/source/docs/providers/aws/r/rds_cluster_instance.html.markdown
deleted file mode 100644
index 4ea2edd49..000000000
--- a/website/source/docs/providers/aws/r/rds_cluster_instance.html.markdown
+++ /dev/null
@@ -1,125 +0,0 @@
----
-layout: "aws"
-page_title: "AWS: aws_rds_cluster_instance"
-sidebar_current: "docs-aws-resource-rds-cluster-instance"
-description: |-
- Provides an RDS Cluster Resource Instance
----
-
-# aws\_rds\_cluster\_instance
-
-Provides an RDS Cluster Resource Instance. A Cluster Instance Resource defines
-attributes that are specific to a single instance in a [RDS Cluster][3],
-specifically running Amazon Aurora.
-
-Unlike other RDS resources that support replication, with Amazon Aurora you do
-not designate a primary and subsequent replicas. Instead, you simply add RDS
-Instances and Aurora manages the replication. You can use the [count][5]
-meta-parameter to make multiple instances and join them all to the same RDS
-Cluster, or you may specify different Cluster Instance resources with various
-`instance_class` sizes.
-
-For more information on Amazon Aurora, see [Aurora on Amazon RDS][2] in the Amazon RDS User Guide.
-
-## Example Usage
-
-```hcl
-resource "aws_rds_cluster_instance" "cluster_instances" {
- count = 2
- identifier = "aurora-cluster-demo-${count.index}"
- cluster_identifier = "${aws_rds_cluster.default.id}"
- instance_class = "db.r3.large"
-}
-
-resource "aws_rds_cluster" "default" {
- cluster_identifier = "aurora-cluster-demo"
- availability_zones = ["us-west-2a", "us-west-2b", "us-west-2c"]
- database_name = "mydb"
- master_username = "foo"
- master_password = "barbut8chars"
-}
-```
-
-## Argument Reference
-
-For more detailed documentation about each argument, refer to
-the [AWS official documentation](https://docs.aws.amazon.com/AmazonRDS/latest/CommandLineReference/CLIReference-cmd-ModifyDBInstance.html).
-
-The following arguments are supported:
-
-* `identifier` - (Optional, Forces new resource) The indentifier for the RDS instance, if omitted, Terraform will assign a random, unique identifier.
-* `identifier_prefix` - (Optional, Forces new resource) Creates a unique identifier beginning with the specified prefix. Conflicts with `identifer`.
-* `cluster_identifier` - (Required) The identifier of the [`aws_rds_cluster`](/docs/providers/aws/r/rds_cluster.html) in which to launch this instance.
-* `instance_class` - (Required) The instance class to use. For details on CPU
-and memory, see [Scaling Aurora DB Instances][4]. Aurora currently
- supports the below instance classes.
- - db.t2.small
- - db.t2.medium
- - db.r3.large
- - db.r3.xlarge
- - db.r3.2xlarge
- - db.r3.4xlarge
- - db.r3.8xlarge
-* `publicly_accessible` - (Optional) Bool to control if instance is publicly accessible.
-Default `false`. See the documentation on [Creating DB Instances][6] for more
-details on controlling this property.
-* `db_subnet_group_name` - (Required if `publicly_accessible = false`, Optional otherwise) A DB subnet group to associate with this DB instance. **NOTE:** This must match the `db_subnet_group_name` of the attached [`aws_rds_cluster`](/docs/providers/aws/r/rds_cluster.html).
-* `db_parameter_group_name` - (Optional) The name of the DB parameter group to associate with this instance.
-* `apply_immediately` - (Optional) Specifies whether any database modifications
- are applied immediately, or during the next maintenance window. Default is`false`.
-* `monitoring_role_arn` - (Optional) The ARN for the IAM role that permits RDS to send
-enhanced monitoring metrics to CloudWatch Logs. You can find more information on the [AWS Documentation](http://docs.aws.amazon.com/AmazonRDS/latest/UserGuide/USER_Monitoring.html)
-what IAM permissions are needed to allow Enhanced Monitoring for RDS Instances.
-* `monitoring_interval` - (Optional) The interval, in seconds, between points when Enhanced Monitoring metrics are collected for the DB instance. To disable collecting Enhanced Monitoring metrics, specify 0. The default is 0. Valid Values: 0, 1, 5, 10, 15, 30, 60.
-* `promotion_tier` - (Optional) Default 0. Failover Priority setting on instance level. The reader who has lower tier has higher priority to get promoter to writer.
-* `preferred_backup_window` - (Optional) The daily time range during which automated backups are created if automated backups are enabled.
- Eg: "04:00-09:00"
-* `preferred_maintenance_window` - (Optional) The window to perform maintenance in.
- Syntax: "ddd:hh24:mi-ddd:hh24:mi". Eg: "Mon:00:00-Mon:03:00".
-* `auto_minor_version_upgrade` - (Optional) Indicates that minor engine upgrades will be applied automatically to the DB instance during the maintenance window. Default `true`.
-* `tags` - (Optional) A mapping of tags to assign to the instance.
-
-## Attributes Reference
-
-The following attributes are exported:
-
-* `cluster_identifier` - The RDS Cluster Identifier
-* `identifier` - The Instance identifier
-* `id` - The Instance identifier
-* `writer` – Boolean indicating if this instance is writable. `False` indicates
-this instance is a read replica
-* `allocated_storage` - The amount of allocated storage
-* `availability_zones` - The availability zone of the instance
-* `endpoint` - The DNS address for this instance. May not be writable
-* `engine` - The database engine
-* `engine_version` - The database engine version
-* `database_name` - The database name
-* `port` - The database port
-* `status` - The RDS instance status
-* `storage_encrypted` - Specifies whether the DB cluster is encrypted.
-* `kms_key_id` - The ARN for the KMS encryption key if one is set to the cluster.
-
-[2]: https://docs.aws.amazon.com/AmazonRDS/latest/UserGuide/CHAP_Aurora.html
-[3]: /docs/providers/aws/r/rds_cluster.html
-[4]: https://docs.aws.amazon.com/AmazonRDS/latest/UserGuide/Aurora.Managing.html
-[5]: /docs/configuration/resources.html#count
-[6]: https://docs.aws.amazon.com/AmazonRDS/latest/APIReference/API_CreateDBInstance.html
-
-## Timeouts
-
-`aws_rds_cluster_instance` provides the following
-[Timeouts](/docs/configuration/resources.html#timeouts) configuration options:
-
-- `create` - (Default `90 minutes`) Used for Creating Instances, Replicas, and
-restoring from Snapshots
-- `update` - (Default `90 minutes`) Used for Database modifications
-- `delete` - (Default `90 minutes`) Used for destroying databases. This includes
-the time required to take snapshots
-
-## Import
-
-RDS Cluster Instances can be imported using the `identifier`, e.g.
-
-```
-$ terraform import aws_rds_cluster_instance.prod_instance_1 aurora-cluster-instance-1
-```
diff --git a/website/source/docs/providers/aws/r/rds_cluster_parameter_group.markdown b/website/source/docs/providers/aws/r/rds_cluster_parameter_group.markdown
deleted file mode 100644
index b52a4dd08..000000000
--- a/website/source/docs/providers/aws/r/rds_cluster_parameter_group.markdown
+++ /dev/null
@@ -1,64 +0,0 @@
----
-layout: "aws"
-page_title: "AWS: aws_rds_cluster_parameter_group"
-sidebar_current: "docs-aws-resource-rds-cluster-parameter-group"
----
-
-# aws\_rds\_cluster\_parameter\_group
-
-Provides an RDS DB cluster parameter group resource.
-
-## Example Usage
-
-```hcl
-resource "aws_rds_cluster_parameter_group" "default" {
- name = "rds-cluster-pg"
- family = "aurora5.6"
- description = "RDS default cluster parameter group"
-
- parameter {
- name = "character_set_server"
- value = "utf8"
- }
-
- parameter {
- name = "character_set_client"
- value = "utf8"
- }
-}
-```
-
-## Argument Reference
-
-The following arguments are supported:
-
-* `name` - (Optional, Forces new resource) The name of the DB cluster parameter group. If omitted, Terraform will assign a random, unique name.
-* `name_prefix` - (Optional, Forces new resource) Creates a unique name beginning with the specified prefix. Conflicts with `name`.
-* `family` - (Required) The family of the DB cluster parameter group.
-* `description` - (Optional) The description of the DB cluster parameter group. Defaults to "Managed by Terraform".
-* `parameter` - (Optional) A list of DB parameters to apply.
-* `tags` - (Optional) A mapping of tags to assign to the resource.
-
-Parameter blocks support the following:
-
-* `name` - (Required) The name of the DB parameter.
-* `value` - (Required) The value of the DB parameter.
-* `apply_method` - (Optional) "immediate" (default), or "pending-reboot". Some
- engines can't apply some parameters without a reboot, and you will need to
- specify "pending-reboot" here.
-
-## Attributes Reference
-
-The following attributes are exported:
-
-* `id` - The db cluster parameter group name.
-* `arn` - The ARN of the db cluster parameter group.
-
-
-## Import
-
-RDS Cluster Parameter Groups can be imported using the `name`, e.g.
-
-```
-$ terraform import aws_rds_cluster_parameter_group.cluster_pg production-pg-1
-```
diff --git a/website/source/docs/providers/aws/r/redshift_cluster.html.markdown b/website/source/docs/providers/aws/r/redshift_cluster.html.markdown
deleted file mode 100644
index bcee8cdba..000000000
--- a/website/source/docs/providers/aws/r/redshift_cluster.html.markdown
+++ /dev/null
@@ -1,106 +0,0 @@
----
-layout: "aws"
-page_title: "AWS: aws_redshift_cluster"
-sidebar_current: "docs-aws-resource-redshift-cluster"
----
-
-# aws\_redshift\_cluster
-
-Provides a Redshift Cluster Resource.
-
-~> **Note:** All arguments including the username and password will be stored in the raw state as plain-text.
-[Read more about sensitive data in state](/docs/state/sensitive-data.html).
-
-## Example Usage
-
-```hcl
-resource "aws_redshift_cluster" "default" {
- cluster_identifier = "tf-redshift-cluster"
- database_name = "mydb"
- master_username = "foo"
- master_password = "Mustbe8characters"
- node_type = "dc1.large"
- cluster_type = "single-node"
-}
-```
-
-## Argument Reference
-
-For more detailed documentation about each argument, refer to
-the [AWS official documentation](http://docs.aws.amazon.com/cli/latest/reference/redshift/index.html#cli-aws-redshift).
-
-The following arguments are supported:
-
-* `cluster_identifier` - (Required) The Cluster Identifier. Must be a lower case
-string.
-* `database_name` - (Optional) The name of the first database to be created when the cluster is created.
- If you do not provide a name, Amazon Redshift will create a default database called `dev`.
-* `node_type` - (Required) The node type to be provisioned for the cluster.
-* `cluster_type` - (Optional) The cluster type to use. Either `single-node` or `multi-node`.
-* `master_password` - (Required unless a `snapshot_identifier` is provided) Password for the master DB user.
- Note that this may show up in logs, and it will be stored in the state file. Password must contain at least 8 chars and
- contain at least one uppercase letter, one lowercase letter, and one number.
-* `master_username` - (Required unless a `snapshot_identifier` is provided) Username for the master DB user.
-
-* `cluster_security_groups` - (Optional) A list of security groups to be associated with this cluster.
-* `vpc_security_group_ids` - (Optional) A list of Virtual Private Cloud (VPC) security groups to be associated with the cluster.
-* `cluster_subnet_group_name` - (Optional) The name of a cluster subnet group to be associated with this cluster. If this parameter is not provided the resulting cluster will be deployed outside virtual private cloud (VPC).
-* `availability_zone` - (Optional) The EC2 Availability Zone (AZ) in which you want Amazon Redshift to provision the cluster. For example, if you have several EC2 instances running in a specific Availability Zone, then you might want the cluster to be provisioned in the same zone in order to decrease network latency.
-* `preferred_maintenance_window` - (Optional) The weekly time range (in UTC) during which automated cluster maintenance can occur.
- Format: ddd:hh24:mi-ddd:hh24:mi
-* `cluster_parameter_group_name` - (Optional) The name of the parameter group to be associated with this cluster.
-* `automated_snapshot_retention_period` - (Optional) The number of days that automated snapshots are retained. If the value is 0, automated snapshots are disabled. Even if automated snapshots are disabled, you can still create manual snapshots when you want with create-cluster-snapshot. Default is 1.
-* `port` - (Optional) The port number on which the cluster accepts incoming connections.
- The cluster is accessible only via the JDBC and ODBC connection strings. Part of the connection string requires the port on which the cluster will listen for incoming connections. Default port is 5439.
-* `cluster_version` - (Optional) The version of the Amazon Redshift engine software that you want to deploy on the cluster.
- The version selected runs on all the nodes in the cluster.
-* `allow_version_upgrade` - (Optional) If true , major version upgrades can be applied during the maintenance window to the Amazon Redshift engine that is running on the cluster. Default is true
-* `number_of_nodes` - (Optional) The number of compute nodes in the cluster. This parameter is required when the ClusterType parameter is specified as multi-node. Default is 1.
-* `publicly_accessible` - (Optional) If true, the cluster can be accessed from a public network. Default is `true`.
-* `encrypted` - (Optional) If true , the data in the cluster is encrypted at rest.
-* `enhanced_vpc_routing` - (Optional) If true , enhanced VPC routing is enabled.
-* `kms_key_id` - (Optional) The ARN for the KMS encryption key. When specifying `kms_key_id`, `encrypted` needs to be set to true.
-* `elastic_ip` - (Optional) The Elastic IP (EIP) address for the cluster.
-* `skip_final_snapshot` - (Optional) Determines whether a final snapshot of the cluster is created before Amazon Redshift deletes the cluster. If true , a final cluster snapshot is not created. If false , a final cluster snapshot is created before the cluster is deleted. Default is true.
-* `final_snapshot_identifier` - (Optional) The identifier of the final snapshot that is to be created immediately before deleting the cluster. If this parameter is provided, `skip_final_snapshot` must be false.
-* `snapshot_identifier` - (Optional) The name of the snapshot from which to create the new cluster.
-* `snapshot_cluster_identifier` - (Optional) The name of the cluster the source snapshot was created from.
-* `owner_account` - (Optional) The AWS customer account used to create or copy the snapshot. Required if you are restoring a snapshot you do not own, optional if you own the snapshot.
-* `iam_roles` - (Optional) A list of IAM Role ARNs to associate with the cluster. A Maximum of 10 can be associated to the cluster at any time.
-* `enable_logging` - (Optional) Enables logging information such as queries and connection attempts, for the specified Amazon Redshift cluster. Defaults to `false`.
-* `bucket_name` - (Optional, required when `enable_logging` is `true`) The name of an existing S3 bucket where the log files are to be stored. Must be in the same region as the cluster and the cluster must have read bucket and put object permissions.
-For more information on the permissions required for the bucket, please read the AWS [documentation](http://docs.aws.amazon.com/redshift/latest/mgmt/db-auditing.html#db-auditing-enable-logging)
-* `s3_key_prefix` - (Optional) The prefix applied to the log file names.
-* `tags` - (Optional) A mapping of tags to assign to the resource.
-
-
-## Attributes Reference
-
-The following attributes are exported:
-
-* `id` - The Redshift Cluster ID.
-* `cluster_identifier` - The Cluster Identifier
-* `cluster_type` - The cluster type
-* `node_type` - The type of nodes in the cluster
-* `database_name` - The name of the default database in the Cluster
-* `availability_zone` - The availability zone of the Cluster
-* `automated_snapshot_retention_period` - The backup retention period
-* `preferred_maintenance_window` - The backup window
-* `endpoint` - The connection endpoint
-* `encrypted` - Whether the data in the cluster is encrypted
-* `cluster_security_groups` - The security groups associated with the cluster
-* `vpc_security_group_ids` - The VPC security group Ids associated with the cluster
-* `port` - The Port the cluster responds on
-* `cluster_version` - The version of Redshift engine software
-* `cluster_parameter_group_name` - The name of the parameter group to be associated with this cluster
-* `cluster_subnet_group_name` - The name of a cluster subnet group to be associated with this cluster
-* `cluster_public_key` - The public key for the cluster
-* `cluster_revision_number` - The specific revision number of the database in the cluster
-
-## Import
-
-Redshift Clusters can be imported using the `cluster_identifier`, e.g.
-
-```
-$ terraform import aws_redshift_cluster.myprodcluster tf-redshift-cluster-12345
-```
diff --git a/website/source/docs/providers/aws/r/redshift_parameter_group.html.markdown b/website/source/docs/providers/aws/r/redshift_parameter_group.html.markdown
deleted file mode 100644
index 65472b540..000000000
--- a/website/source/docs/providers/aws/r/redshift_parameter_group.html.markdown
+++ /dev/null
@@ -1,63 +0,0 @@
----
-layout: "aws"
-page_title: "AWS: aws_redshift_parameter_group"
-sidebar_current: "docs-aws-resource-redshift-parameter-group"
----
-
-# aws\_redshift\_parameter\_group
-
-Provides a Redshift Cluster parameter group resource.
-
-## Example Usage
-
-```hcl
-resource "aws_redshift_parameter_group" "bar" {
- name = "parameter-group-test-terraform"
- family = "redshift-1.0"
-
- parameter {
- name = "require_ssl"
- value = "true"
- }
-
- parameter {
- name = "query_group"
- value = "example"
- }
-
- parameter {
- name = "enable_user_activity_logging"
- value = "true"
- }
-}
-```
-
-## Argument Reference
-
-The following arguments are supported:
-
-* `name` - (Required) The name of the Redshift parameter group.
-* `family` - (Required) The family of the Redshift parameter group.
-* `description` - (Optional) The description of the Redshift parameter group. Defaults to "Managed by Terraform".
-* `parameter` - (Optional) A list of Redshift parameters to apply.
-
-Parameter blocks support the following:
-
-* `name` - (Required) The name of the Redshift parameter.
-* `value` - (Required) The value of the Redshift parameter.
-
-You can read more about the parameters that Redshift supports in the [documentation](http://docs.aws.amazon.com/redshift/latest/mgmt/working-with-parameter-groups.html)
-
-## Attributes Reference
-
-The following attributes are exported:
-
-* `id` - The Redshift parameter group name.
-
-## Import
-
-Redshift Parameter Groups can be imported using the `name`, e.g.
-
-```
-$ terraform import aws_redshift_parameter_group.paramgroup1 parameter-group-test-terraform
-```
\ No newline at end of file
diff --git a/website/source/docs/providers/aws/r/redshift_security_group.html.markdown b/website/source/docs/providers/aws/r/redshift_security_group.html.markdown
deleted file mode 100644
index 4d6dc0367..000000000
--- a/website/source/docs/providers/aws/r/redshift_security_group.html.markdown
+++ /dev/null
@@ -1,52 +0,0 @@
----
-layout: "aws"
-page_title: "AWS: aws_redshift_security_group"
-sidebar_current: "docs-aws-resource-redshift-security-group"
-description: |-
- Provides a Redshift security group resource.
----
-
-# aws\_redshift\_security\_group
-
-Creates a new Amazon Redshift security group. You use security groups to control access to non-VPC clusters
-
-## Example Usage
-
-```hcl
-resource "aws_redshift_security_group" "default" {
- name = "redshift-sg"
-
- ingress {
- cidr = "10.0.0.0/24"
- }
-}
-```
-
-## Argument Reference
-
-The following arguments are supported:
-
-* `name` - (Required) The name of the Redshift security group.
-* `description` - (Optional) The description of the Redshift security group. Defaults to "Managed by Terraform".
-* `ingress` - (Optional) A list of ingress rules.
-
-Ingress blocks support the following:
-
-* `cidr` - The CIDR block to accept
-* `security_group_name` - The name of the security group to authorize
-* `security_group_owner_id` - The owner Id of the security group provided
- by `security_group_name`.
-
-## Attributes Reference
-
-The following attributes are exported:
-
-* `id` - The Redshift security group ID.
-
-## Import
-
-Redshift security groups can be imported using the `name`, e.g.
-
-```
-$ terraform import aws_redshift_security_group.testgroup1 redshift_test_group
-```
diff --git a/website/source/docs/providers/aws/r/redshift_subnet_group.html.markdown b/website/source/docs/providers/aws/r/redshift_subnet_group.html.markdown
deleted file mode 100644
index b5deadfd9..000000000
--- a/website/source/docs/providers/aws/r/redshift_subnet_group.html.markdown
+++ /dev/null
@@ -1,71 +0,0 @@
----
-layout: "aws"
-page_title: "AWS: aws_redshift_subnet_group"
-sidebar_current: "docs-aws-resource-redshift-subnet-group"
-description: |-
- Provides a Redshift Subnet Group resource.
----
-
-# aws\_redshift\_subnet\_group
-
-Creates a new Amazon Redshift subnet group. You must provide a list of one or more subnets in your existing Amazon Virtual Private Cloud (Amazon VPC) when creating Amazon Redshift subnet group.
-
-## Example Usage
-
-```hcl
-resource "aws_vpc" "foo" {
- cidr_block = "10.1.0.0/16"
-}
-
-resource "aws_subnet" "foo" {
- cidr_block = "10.1.1.0/24"
- availability_zone = "us-west-2a"
- vpc_id = "${aws_vpc.foo.id}"
-
- tags {
- Name = "tf-dbsubnet-test-1"
- }
-}
-
-resource "aws_subnet" "bar" {
- cidr_block = "10.1.2.0/24"
- availability_zone = "us-west-2b"
- vpc_id = "${aws_vpc.foo.id}"
-
- tags {
- Name = "tf-dbsubnet-test-2"
- }
-}
-
-resource "aws_redshift_subnet_group" "foo" {
- name = "foo"
- subnet_ids = ["${aws_subnet.foo.id}", "${aws_subnet.bar.id}"]
-
- tags {
- environment = "Production"
- }
-}
-```
-
-## Argument Reference
-
-The following arguments are supported:
-
-* `name` - (Required) The name of the Redshift Subnet group.
-* `description` - (Optional) The description of the Redshift Subnet group. Defaults to "Managed by Terraform".
-* `subnet_ids` - (Required) An array of VPC subnet IDs.
-* `tags` - (Optional) A mapping of tags to assign to the resource.
-
-## Attributes Reference
-
-The following attributes are exported:
-
-* `id` - The Redshift Subnet group ID.
-
-## Import
-
-Redshift subnet groups can be imported using the `name`, e.g.
-
-```
-$ terraform import aws_redshift_subnet_group.testgroup1 test-cluster-subnet-group
-```
diff --git a/website/source/docs/providers/aws/r/route.html.markdown b/website/source/docs/providers/aws/r/route.html.markdown
deleted file mode 100644
index dafe52651..000000000
--- a/website/source/docs/providers/aws/r/route.html.markdown
+++ /dev/null
@@ -1,83 +0,0 @@
----
-layout: "aws"
-page_title: "AWS: aws_route"
-sidebar_current: "docs-aws-resource-route|"
-description: |-
- Provides a resource to create a routing entry in a VPC routing table.
----
-
-# aws\_route
-
-Provides a resource to create a routing table entry (a route) in a VPC routing table.
-
-~> **NOTE on Route Tables and Routes:** Terraform currently
-provides both a standalone Route resource and a [Route Table](route_table.html) resource with routes
-defined in-line. At this time you cannot use a Route Table with in-line routes
-in conjunction with any Route resources. Doing so will cause
-a conflict of rule settings and will overwrite rules.
-
-## Example usage:
-
-```hcl
-resource "aws_route" "r" {
- route_table_id = "rtb-4fbb3ac4"
- destination_cidr_block = "10.0.1.0/22"
- vpc_peering_connection_id = "pcx-45ff3dc1"
- depends_on = ["aws_route_table.testing"]
-}
-```
-
-##Example IPv6 Usage:
-
-```hcl
-resource "aws_vpc" "vpc" {
- cidr_block = "10.1.0.0/16"
- assign_generated_ipv6_cidr_block = true
-}
-
-resource "aws_egress_only_internet_gateway" "egress" {
- vpc_id = "${aws_vpc.vpc.id}"
-}
-
-resource "aws_route" "r" {
- route_table_id = "rtb-4fbb3ac4"
- destination_ipv6_cidr_block = "::/0"
- egress_only_gateway_id = "${aws_egress_only_internet_gateway.egress.id}"
-}
-```
-
-## Argument Reference
-
-The following arguments are supported:
-
-* `route_table_id` - (Required) The ID of the routing table.
-* `destination_cidr_block` - (Optional) The destination CIDR block.
-* `destination_ipv6_cidr_block` - (Optional) The destination IPv6 CIDR block.
-* `vpc_peering_connection_id` - (Optional) An ID of a VPC peering connection.
-* `egress_only_gateway_id` - (Optional) An ID of a VPC Egress Only Internet Gateway.
-* `gateway_id` - (Optional) An ID of a VPC internet gateway or a virtual private gateway.
-* `nat_gateway_id` - (Optional) An ID of a VPC NAT gateway.
-* `instance_id` - (Optional) An ID of an EC2 instance.
-* `network_interface_id` - (Optional) An ID of a network interface.
-
-Each route must contain either a `gateway_id`, `egress_only_gateway_id` a `nat_gateway_id`, an
-`instance_id` or a `vpc_peering_connection_id` or a `network_interface_id`.
-Note that the default route, mapping the VPC's CIDR block to "local", is
-created implicitly and cannot be specified.
-
-## Attributes Reference
-
-The following attributes are exported:
-
-~> **NOTE:** Only the target type that is specified (one of the above)
-will be exported as an attribute once the resource is created.
-
-* `route_table_id` - The ID of the routing table.
-* `destination_cidr_block` - The destination CIDR block.
-* `destination_ipv6_cidr_block` - The destination IPv6 CIDR block.
-* `vpc_peering_connection_id` - An ID of a VPC peering connection.
-* `egress_only_gateway_id` - An ID of a VPC Egress Only Internet Gateway.
-* `gateway_id` - An ID of a VPC internet gateway or a virtual private gateway.
-* `nat_gateway_id` - An ID of a VPC NAT gateway.
-* `instance_id` - An ID of a NAT instance.
-* `network_interface_id` - An ID of a network interface.
diff --git a/website/source/docs/providers/aws/r/route53_delegation_set.html.markdown b/website/source/docs/providers/aws/r/route53_delegation_set.html.markdown
deleted file mode 100644
index 12f665a5c..000000000
--- a/website/source/docs/providers/aws/r/route53_delegation_set.html.markdown
+++ /dev/null
@@ -1,54 +0,0 @@
----
-layout: "aws"
-page_title: "AWS: aws_route53_delegation_set"
-sidebar_current: "docs-aws-resource-route53-delegation-set"
-description: |-
- Provides a Route53 Delegation Set resource.
----
-
-# aws\_route53\_delegation_set
-
-Provides a [Route53 Delegation Set](https://docs.aws.amazon.com/Route53/latest/APIReference/actions-on-reusable-delegation-sets.html) resource.
-
-## Example Usage
-
-```hcl
-resource "aws_route53_delegation_set" "main" {
- reference_name = "DynDNS"
-}
-
-resource "aws_route53_zone" "primary" {
- name = "hashicorp.com"
- delegation_set_id = "${aws_route53_delegation_set.main.id}"
-}
-
-resource "aws_route53_zone" "secondary" {
- name = "terraform.io"
- delegation_set_id = "${aws_route53_delegation_set.main.id}"
-}
-```
-
-## Argument Reference
-
-The following arguments are supported:
-
-* `reference_name` - (Optional) This is a reference name used in Caller Reference
- (helpful for identifying single delegation set amongst others)
-
-## Attributes Reference
-
-The following attributes are exported:
-
-* `id` - The delegation set ID
-* `name_servers` - A list of authoritative name servers for the hosted zone
- (effectively a list of NS records).
-
-
-
-## Import
-
-Route53 Delegation Sets can be imported using the `delegation set id`, e.g.
-
-```
-$ terraform import aws_route53_delegation_set.set1 N1PA6795SAMPLE
-```
\ No newline at end of file
diff --git a/website/source/docs/providers/aws/r/route53_health_check.html.markdown b/website/source/docs/providers/aws/r/route53_health_check.html.markdown
deleted file mode 100644
index 03d6bef1d..000000000
--- a/website/source/docs/providers/aws/r/route53_health_check.html.markdown
+++ /dev/null
@@ -1,96 +0,0 @@
----
-layout: "aws"
-page_title: "AWS: aws_route53_health_check"
-sidebar_current: "docs-aws-resource-route53-health-check"
-description: |-
- Provides a Route53 health check.
----
-# aws\_route53\_health\_check
-
-Provides a Route53 health check.
-
-## Example Usage
-
-```hcl
-resource "aws_route53_health_check" "child1" {
- fqdn = "foobar.terraform.com"
- port = 80
- type = "HTTP"
- resource_path = "/"
- failure_threshold = "5"
- request_interval = "30"
-
- tags = {
- Name = "tf-test-health-check"
- }
-}
-
-resource "aws_route53_health_check" "foo" {
- type = "CALCULATED"
- child_health_threshold = 1
- child_healthchecks = ["${aws_route53_health_check.child1.id}"]
-
- tags = {
- Name = "tf-test-calculated-health-check"
- }
-}
-```
-
-## CloudWatch Alarm Example
-
-```hcl
-resource "aws_cloudwatch_metric_alarm" "foobar" {
- alarm_name = "terraform-test-foobar5"
- comparison_operator = "GreaterThanOrEqualToThreshold"
- evaluation_periods = "2"
- metric_name = "CPUUtilization"
- namespace = "AWS/EC2"
- period = "120"
- statistic = "Average"
- threshold = "80"
- alarm_description = "This metric monitors ec2 cpu utilization"
-}
-
-resource "aws_route53_health_check" "foo" {
- type = "CLOUDWATCH_METRIC"
- cloudwatch_alarm_name = "${aws_cloudwatch_metric_alarm.foobar.alarm_name}"
- cloudwatch_alarm_region = "us-west-2"
- insufficient_data_health_status = "Healthy"
-}
-```
-
-## Argument Reference
-
-The following arguments are supported:
-
-* `reference_name` - (Optional) This is a reference name used in Caller Reference
- (helpful for identifying single health_check set amongst others)
-* `fqdn` - (Optional) The fully qualified domain name of the endpoint to be checked.
-* `ip_address` - (Optional) The IP address of the endpoint to be checked.
-* `port` - (Optional) The port of the endpoint to be checked.
-* `type` - (Required) The protocol to use when performing health checks. Valid values are `HTTP`, `HTTPS`, `HTTP_STR_MATCH`, `HTTPS_STR_MATCH`, `TCP`, `CALCULATED` and `CLOUDWATCH_METRIC`.
-* `failure_threshold` - (Required) The number of consecutive health checks that an endpoint must pass or fail.
-* `request_interval` - (Required) The number of seconds between the time that Amazon Route 53 gets a response from your endpoint and the time that it sends the next health-check request.
-* `resource_path` - (Optional) The path that you want Amazon Route 53 to request when performing health checks.
-* `search_string` - (Optional) String searched in the first 5120 bytes of the response body for check to be considered healthy.
-* `measure_latency` - (Optional) A Boolean value that indicates whether you want Route 53 to measure the latency between health checkers in multiple AWS regions and your endpoint and to display CloudWatch latency graphs in the Route 53 console.
-* `invert_healthcheck` - (Optional) A boolean value that indicates whether the status of health check should be inverted. For example, if a health check is healthy but Inverted is True , then Route 53 considers the health check to be unhealthy.
-* `enable_sni` - (Optional) A boolean value that indicates whether Route53 should send the `fqdn` to the endpoint when performing the health check. This defaults to AWS' defaults: when the `type` is "HTTPS" `enable_sni` defaults to `true`, when `type` is anything else `enable_sni` defaults to `false`.
-* `child_healthchecks` - (Optional) For a specified parent health check, a list of HealthCheckId values for the associated child health checks.
-* `child_health_threshold` - (Optional) The minimum number of child health checks that must be healthy for Route 53 to consider the parent health check to be healthy. Valid values are integers between 0 and 256, inclusive
-* `cloudwatch_alarm_name` - (Optional) The name of the CloudWatch alarm.
-* `cloudwatch_alarm_region` - (Optional) The CloudWatchRegion that the CloudWatch alarm was created in.
-* `insufficient_data_health_status` - (Optional) The status of the health check when CloudWatch has insufficient data about the state of associated alarm. Valid values are `Healthy` , `Unhealthy` and `LastKnownStatus`.
-
-* `tags` - (Optional) A mapping of tags to assign to the health check.
-
-At least one of either `fqdn` or `ip_address` must be specified.
-
-
-## Import
-
-Route53 Health Checks can be imported using the `health check id`, e.g.
-
-```
-$ terraform import aws_route53_health_check.http_check abcdef11-2222-3333-4444-555555fedcba
-```
diff --git a/website/source/docs/providers/aws/r/route53_record.html.markdown b/website/source/docs/providers/aws/r/route53_record.html.markdown
deleted file mode 100644
index 0f90cdec6..000000000
--- a/website/source/docs/providers/aws/r/route53_record.html.markdown
+++ /dev/null
@@ -1,159 +0,0 @@
----
-layout: "aws"
-page_title: "AWS: aws_route53_record"
-sidebar_current: "docs-aws-resource-route53-record"
-description: |-
- Provides a Route53 record resource.
----
-
-# aws\_route53\_record
-
-Provides a Route53 record resource.
-
-## Example Usage
-
-### Simple routing policy
-
-```hcl
-resource "aws_route53_record" "www" {
- zone_id = "${aws_route53_zone.primary.zone_id}"
- name = "www.example.com"
- type = "A"
- ttl = "300"
- records = ["${aws_eip.lb.public_ip}"]
-}
-```
-
-### Weighted routing policy
-Other routing policies are configured similarly. See [AWS Route53 Developer Guide](https://docs.aws.amazon.com/Route53/latest/DeveloperGuide/routing-policy.html) for details.
-
-```hcl
-resource "aws_route53_record" "www-dev" {
- zone_id = "${aws_route53_zone.primary.zone_id}"
- name = "www"
- type = "CNAME"
- ttl = "5"
-
- weighted_routing_policy {
- weight = 10
- }
-
- set_identifier = "dev"
- records = ["dev.example.com"]
-}
-
-resource "aws_route53_record" "www-live" {
- zone_id = "${aws_route53_zone.primary.zone_id}"
- name = "www"
- type = "CNAME"
- ttl = "5"
-
- weighted_routing_policy {
- weight = 90
- }
-
- set_identifier = "live"
- records = ["live.example.com"]
-}
-```
-
-### Alias record
-See [related part of AWS Route53 Developer Guide](https://docs.aws.amazon.com/Route53/latest/DeveloperGuide/resource-record-sets-choosing-alias-non-alias.html)
-to understand differences between alias and non-alias records.
-
-TTL for all alias records is [60 seconds](https://aws.amazon.com/route53/faqs/#dns_failover_do_i_need_to_adjust),
-you cannot change this, therefore `ttl` has to be omitted in alias records.
-
-```hcl
-resource "aws_elb" "main" {
- name = "foobar-terraform-elb"
- availability_zones = ["us-east-1c"]
-
- listener {
- instance_port = 80
- instance_protocol = "http"
- lb_port = 80
- lb_protocol = "http"
- }
-}
-
-resource "aws_route53_record" "www" {
- zone_id = "${aws_route53_zone.primary.zone_id}"
- name = "example.com"
- type = "A"
-
- alias {
- name = "${aws_elb.main.dns_name}"
- zone_id = "${aws_elb.main.zone_id}"
- evaluate_target_health = true
- }
-}
-```
-
-## Argument Reference
-
-The following arguments are supported:
-
-* `zone_id` - (Required) The ID of the hosted zone to contain this record.
-* `name` - (Required) The name of the record.
-* `type` - (Required) The record type.
-* `ttl` - (Required for non-alias records) The TTL of the record.
-* `records` - (Required for non-alias records) A string list of records.
-* `set_identifier` - (Optional) Unique identifier to differentiate records with routing policies from one another. Required if using `failover`, `geolocation`, `latency`, or `weighted` routing policies documented below.
-* `health_check_id` - (Optional) The health check the record should be associated with.
-* `alias` - (Optional) An alias block. Conflicts with `ttl` & `records`.
- Alias record documented below.
-* `failover_routing_policy` - (Optional) A block indicating the routing behavior when associated health check fails. Conflicts with any other routing policy. Documented below.
-* `geolocation_routing_policy` - (Optional) A block indicating a routing policy based on the geolocation of the requestor. Conflicts with any other routing policy. Documented below.
-* `latency_routing_policy` - (Optional) A block indicating a routing policy based on the latency between the requestor and an AWS region. Conflicts with any other routing policy. Documented below.
-* `weighted_routing_policy` - (Optional) A block indicating a weighted routing policy. Conflicts with any other routing policy. Documented below.
-
-Exactly one of `records` or `alias` must be specified: this determines whether it's an alias record.
-
-Alias records support the following:
-
-* `name` - (Required) DNS domain name for a CloudFront distribution, S3 bucket, ELB, or another resource record set in this hosted zone.
-* `zone_id` - (Required) Hosted zone ID for a CloudFront distribution, S3 bucket, ELB, or Route 53 hosted zone. See [`resource_elb.zone_id`](/docs/providers/aws/r/elb.html#zone_id) for example.
-* `evaluate_target_health` - (Required) Set to `true` if you want Route 53 to determine whether to respond to DNS queries using this resource record set by checking the health of the resource record set. Some resources have special requirements, see [related part of documentation](https://docs.aws.amazon.com/Route53/latest/DeveloperGuide/resource-record-sets-values.html#rrsets-values-alias-evaluate-target-health).
-
-Failover routing policies support the following:
-
-* `type` - (Required) `PRIMARY` or `SECONDARY`. A `PRIMARY` record will be served if its healthcheck is passing, otherwise the `SECONDARY` will be served. See http://docs.aws.amazon.com/Route53/latest/DeveloperGuide/dns-failover-configuring-options.html#dns-failover-failover-rrsets
-
-Geolocation routing policies support the following:
-
-* `continent` - A two-letter continent code. See http://docs.aws.amazon.com/Route53/latest/APIReference/API_GetGeoLocation.html for code details. Either `continent` or `country` must be specified.
-* `country` - A two-character country code or `*` to indicate a default resource record set.
-* `subdivision` - (Optional) A subdivision code for a country.
-
-Latency routing policies support the following:
-
-* `region` - (Required) An AWS region from which to measure latency. See http://docs.aws.amazon.com/Route53/latest/DeveloperGuide/routing-policy.html#routing-policy-latency
-
-Weighted routing policies support the following:
-
-* `weight` - (Required) A numeric value indicating the relative weight of the record. See http://docs.aws.amazon.com/Route53/latest/DeveloperGuide/routing-policy.html#routing-policy-weighted.
-
-## Attributes Reference
-
-* `fqdn` - [FQDN](https://en.wikipedia.org/wiki/Fully_qualified_domain_name) built using the zone domain and `name`
-
-
-## Import
-
-Route53 Records can be imported using ID of the record. The ID is made up as ZONEID_RECORDNAME_TYPE_SET-IDENTIFIER
-
-e.g.
-
-```
-Z4KAPRWWNC7JR_dev.example.com_NS_dev
-```
-
-In this example, `Z4KAPRWWNC7JR` is the ZoneID, `dev.example.com` is the Record Name, `NS` is the Type and `dev` is the Set Identifier.
-Only the Set Identifier is actually optional in the ID
-
-To import the ID above, it would look as follows:
-
-```
-$ terraform import aws_route53_record.myrecord Z4KAPRWWNC7JR_dev.example.com_NS_dev
-```
diff --git a/website/source/docs/providers/aws/r/route53_zone.html.markdown b/website/source/docs/providers/aws/r/route53_zone.html.markdown
deleted file mode 100644
index bab319f2e..000000000
--- a/website/source/docs/providers/aws/r/route53_zone.html.markdown
+++ /dev/null
@@ -1,83 +0,0 @@
----
-layout: "aws"
-page_title: "AWS: aws_route53_zone"
-sidebar_current: "docs-aws-resource-route53-zone"
-description: |-
- Provides a Route53 Hosted Zone resource.
----
-
-# aws\_route53\_zone
-
-Provides a Route53 Hosted Zone resource.
-
-## Example Usage
-
-```hcl
-resource "aws_route53_zone" "primary" {
- name = "example.com"
-}
-```
-
-For use in subdomains, note that you need to create a
-`aws_route53_record` of type `NS` as well as the subdomain
-zone.
-
-```hcl
-resource "aws_route53_zone" "main" {
- name = "example.com"
-}
-
-resource "aws_route53_zone" "dev" {
- name = "dev.example.com"
-
- tags {
- Environment = "dev"
- }
-}
-
-resource "aws_route53_record" "dev-ns" {
- zone_id = "${aws_route53_zone.main.zone_id}"
- name = "dev.example.com"
- type = "NS"
- ttl = "30"
-
- records = [
- "${aws_route53_zone.dev.name_servers.0}",
- "${aws_route53_zone.dev.name_servers.1}",
- "${aws_route53_zone.dev.name_servers.2}",
- "${aws_route53_zone.dev.name_servers.3}",
- ]
-}
-```
-
-## Argument Reference
-
-The following arguments are supported:
-
-* `name` - (Required) This is the name of the hosted zone.
-* `comment` - (Optional) A comment for the hosted zone. Defaults to 'Managed by Terraform'.
-* `tags` - (Optional) A mapping of tags to assign to the zone.
-* `vpc_id` - (Optional) The VPC to associate with a private hosted zone. Specifying `vpc_id` will create a private hosted zone.
- Conflicts w/ `delegation_set_id` as delegation sets can only be used for public zones.
-* `vpc_region` - (Optional) The VPC's region. Defaults to the region of the AWS provider.
-* `delegation_set_id` - (Optional) The ID of the reusable delegation set whose NS records you want to assign to the hosted zone.
- Conflicts w/ `vpc_id` as delegation sets can only be used for public zones.
-* `force_destroy` - (Optional) Whether to destroy all records (possibly managed outside of Terraform)
- in the zone when destroying the zone.
-
-## Attributes Reference
-
-The following attributes are exported:
-
-* `zone_id` - The Hosted Zone ID. This can be referenced by zone records.
-* `name_servers` - A list of name servers in associated (or default) delegation set.
- Find more about delegation sets in [AWS docs](https://docs.aws.amazon.com/Route53/latest/APIReference/actions-on-reusable-delegation-sets.html).
-
-
-## Import
-
-Route53 Zones can be imported using the `zone id`, e.g.
-
-```
-$ terraform import aws_route53_zone.myzone Z1D633PJN98FT9
-```
diff --git a/website/source/docs/providers/aws/r/route53_zone_association.html.markdown b/website/source/docs/providers/aws/r/route53_zone_association.html.markdown
deleted file mode 100644
index 9136e3d2a..000000000
--- a/website/source/docs/providers/aws/r/route53_zone_association.html.markdown
+++ /dev/null
@@ -1,54 +0,0 @@
----
-layout: "aws"
-page_title: "AWS: aws_route53_zone_association"
-sidebar_current: "docs-aws-resource-route53-zone-association"
-description: |-
- Provides a Route53 private Hosted Zone to VPC association resource.
----
-
-# aws\_route53\_zone\_association
-
-Provides a Route53 private Hosted Zone to VPC association resource.
-
-## Example Usage
-
-```hcl
-resource "aws_vpc" "primary" {
- cidr_block = "10.6.0.0/16"
- enable_dns_hostnames = true
- enable_dns_support = true
-}
-
-resource "aws_vpc" "secondary" {
- cidr_block = "10.7.0.0/16"
- enable_dns_hostnames = true
- enable_dns_support = true
-}
-
-resource "aws_route53_zone" "example" {
- name = "example.com"
- vpc_id = "${aws_vpc.primary.id}"
-}
-
-resource "aws_route53_zone_association" "secondary" {
- zone_id = "${aws_route53_zone.example.zone_id}"
- vpc_id = "${aws_vpc.secondary.id}"
-}
-```
-
-## Argument Reference
-
-The following arguments are supported:
-
-* `zone_id` - (Required) The private hosted zone to associate.
-* `vpc_id` - (Required) The VPC to associate with the private hosted zone.
-* `vpc_region` - (Optional) The VPC's region. Defaults to the region of the AWS provider.
-
-## Attributes Reference
-
-The following attributes are exported:
-
-* `id` - The calculated unique identifier for the association.
-* `zone_id` - The ID of the hosted zone for the association.
-* `vpc_id` - The ID of the VPC for the association.
-* `vpc_region` - The region in which the VPC identified by `vpc_id` was created.
diff --git a/website/source/docs/providers/aws/r/route_table.html.markdown b/website/source/docs/providers/aws/r/route_table.html.markdown
deleted file mode 100644
index dadc8e423..000000000
--- a/website/source/docs/providers/aws/r/route_table.html.markdown
+++ /dev/null
@@ -1,92 +0,0 @@
----
-layout: "aws"
-page_title: "AWS: aws_route_table"
-sidebar_current: "docs-aws-resource-route-table|"
-description: |-
- Provides a resource to create a VPC routing table.
----
-
-# aws\_route\_table
-
-Provides a resource to create a VPC routing table.
-
-~> **NOTE on Route Tables and Routes:** Terraform currently
-provides both a standalone [Route resource](route.html) and a Route Table resource with routes
-defined in-line. At this time you cannot use a Route Table with in-line routes
-in conjunction with any Route resources. Doing so will cause
-a conflict of rule settings and will overwrite rules.
-
-~> **NOTE on `gateway_id` and `nat_gateway_id`:** The AWS API is very foregiving with these two
-attributes and the `aws_route_table` resource can be created with a NAT ID specified as a Gateway ID attribute.
-This _will_ lead to a permanent diff between your configuration and statefile, as the API returns the correct
-parameters in the returned route table. If you're experiencing constant diffs in your `aws_route_table` resources,
-the first thing to check is whether or not you're specifying a NAT ID instead of a Gateway ID, or vice-versa.
-
-~> **NOTE on `propagating_vgws` and the `aws_vpn_gateway_route_propagation` resource:**
-If the `propagating_vgws` argument is present, it's not supported to _also_
-define route propagations using `aws_vpn_gateway_route_propagation`, since
-this resource will delete any propagating gateways not explicitly listed in
-`propagating_vgws`. Omit this argument when defining route propagation using
-the separate resource.
-
-## Example usage with tags:
-
-```hcl
-resource "aws_route_table" "r" {
- vpc_id = "${aws_vpc.default.id}"
-
- route {
- cidr_block = "10.0.1.0/24"
- gateway_id = "${aws_internet_gateway.main.id}"
- }
-
- route {
- ipv6_cidr_block = "::/0"
- egress_only_gateway_id = "${aws_egress_only_internet_gateway.foo.id}"
- }
-
- tags {
- Name = "main"
- }
-}
-```
-
-## Argument Reference
-
-The following arguments are supported:
-
-* `vpc_id` - (Required) The VPC ID.
-* `route` - (Optional) A list of route objects. Their keys are documented below.
-* `tags` - (Optional) A mapping of tags to assign to the resource.
-* `propagating_vgws` - (Optional) A list of virtual gateways for propagation.
-
-Each route supports the following:
-
-* `cidr_block` - (Optional) The CIDR block of the route.
-* `ipv6_cidr_block` - Optional) The Ipv6 CIDR block of the route
-* `egress_only_gateway_id` - (Optional) The Egress Only Internet Gateway ID.
-* `gateway_id` - (Optional) The Internet Gateway ID.
-* `nat_gateway_id` - (Optional) The NAT Gateway ID.
-* `instance_id` - (Optional) The EC2 instance ID.
-* `vpc_peering_connection_id` - (Optional) The VPC Peering ID.
-* `network_interface_id` - (Optional) The ID of the elastic network interface (eni) to use.
-
-Each route must contain either a `gateway_id`, an `instance_id`, a `nat_gateway_id`, a
-`vpc_peering_connection_id` or a `network_interface_id`. Note that the default route, mapping
-the VPC's CIDR block to "local", is created implicitly and cannot be specified.
-
-## Attributes Reference
-
-The following attributes are exported:
-~> **NOTE:** Only the target that is entered is exported as a readable
-attribute once the route resource is created.
-
-* `id` - The ID of the routing table
-
-## Import
-
-Route Tables can be imported using the `route table id`, e.g.
-
-```
-$ terraform import aws_route_table.public_rt rtb-22574640
-```
diff --git a/website/source/docs/providers/aws/r/route_table_association.html.markdown b/website/source/docs/providers/aws/r/route_table_association.html.markdown
deleted file mode 100644
index 149b6c54e..000000000
--- a/website/source/docs/providers/aws/r/route_table_association.html.markdown
+++ /dev/null
@@ -1,34 +0,0 @@
----
-layout: "aws"
-page_title: "AWS: aws_route_table_association"
-sidebar_current: "docs-aws-resource-route-table-association"
-description: |-
- Provides a resource to create an association between a subnet and routing table.
----
-
-# aws\_route\_table\_association
-
-Provides a resource to create an association between a subnet and routing table.
-
-## Example Usage
-
-```hcl
-resource "aws_route_table_association" "a" {
- subnet_id = "${aws_subnet.foo.id}"
- route_table_id = "${aws_route_table.bar.id}"
-}
-```
-
-## Argument Reference
-
-The following arguments are supported:
-
-* `subnet_id` - (Required) The subnet ID to create an association.
-* `route_table_id` - (Required) The ID of the routing table to associate with.
-
-## Attributes Reference
-
-The following attributes are exported:
-
-* `id` - The ID of the association
-
diff --git a/website/source/docs/providers/aws/r/s3_bucket.html.markdown b/website/source/docs/providers/aws/r/s3_bucket.html.markdown
deleted file mode 100644
index 2236b37cb..000000000
--- a/website/source/docs/providers/aws/r/s3_bucket.html.markdown
+++ /dev/null
@@ -1,406 +0,0 @@
----
-layout: "aws"
-page_title: "AWS: aws_s3_bucket"
-sidebar_current: "docs-aws-resource-s3-bucket"
-description: |-
- Provides a S3 bucket resource.
----
-
-# aws\_s3\_bucket
-
-Provides a S3 bucket resource.
-
-## Example Usage
-
-### Private Bucket w/ Tags
-
-```hcl
-resource "aws_s3_bucket" "b" {
- bucket = "my_tf_test_bucket"
- acl = "private"
-
- tags {
- Name = "My bucket"
- Environment = "Dev"
- }
-}
-```
-
-### Static Website Hosting
-
-```hcl
-resource "aws_s3_bucket" "b" {
- bucket = "s3-website-test.hashicorp.com"
- acl = "public-read"
- policy = "${file("policy.json")}"
-
- website {
- index_document = "index.html"
- error_document = "error.html"
-
- routing_rules = < **NOTE:** You cannot use `acceleration_status` in `cn-north-1` or `us-gov-west-1`
-
-The `website` object supports the following:
-
-* `index_document` - (Required, unless using `redirect_all_requests_to`) Amazon S3 returns this index document when requests are made to the root domain or any of the subfolders.
-* `error_document` - (Optional) An absolute path to the document to return in case of a 4XX error.
-* `redirect_all_requests_to` - (Optional) A hostname to redirect all website requests for this bucket to. Hostname can optionally be prefixed with a protocol (`http://` or `https://`) to use when redirecting requests. The default is the protocol that is used in the original request.
-* `routing_rules` - (Optional) A json array containing [routing rules](https://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-s3-websiteconfiguration-routingrules.html)
-describing redirect behavior and when redirects are applied.
-
-The `CORS` object supports the following:
-
-* `allowed_headers` (Optional) Specifies which headers are allowed.
-* `allowed_methods` (Required) Specifies which methods are allowed. Can be `GET`, `PUT`, `POST`, `DELETE` or `HEAD`.
-* `allowed_origins` (Required) Specifies which origins are allowed.
-* `expose_headers` (Optional) Specifies expose header in the response.
-* `max_age_seconds` (Optional) Specifies time in seconds that browser can cache the response for a preflight request.
-
-The `versioning` object supports the following:
-
-* `enabled` - (Optional) Enable versioning. Once you version-enable a bucket, it can never return to an unversioned state. You can, however, suspend versioning on that bucket.
-* `mfa_delete` - (Optional) Enable MFA delete for either `Change the versioning state of your bucket` or `Permanently delete an object version`. Default is `false`.
-
-The `logging` object supports the following:
-
-* `target_bucket` - (Required) The name of the bucket that will receive the log objects.
-* `target_prefix` - (Optional) To specify a key prefix for log objects.
-
-The `lifecycle_rule` object supports the following:
-
-* `id` - (Optional) Unique identifier for the rule.
-* `prefix` - (Required) Object key prefix identifying one or more objects to which the rule applies.
-* `enabled` - (Required) Specifies lifecycle rule status.
-* `abort_incomplete_multipart_upload_days` (Optional) Specifies the number of days after initiating a multipart upload when the multipart upload must be completed.
-* `expiration` - (Optional) Specifies a period in the object's expire (documented below).
-* `transition` - (Optional) Specifies a period in the object's transitions (documented below).
-* `noncurrent_version_expiration` - (Optional) Specifies when noncurrent object versions expire (documented below).
-* `noncurrent_version_transition` - (Optional) Specifies when noncurrent object versions transitions (documented below).
-
-At least one of `expiration`, `transition`, `noncurrent_version_expiration`, `noncurrent_version_transition` must be specified.
-
-The `expiration` object supports the following
-
-* `date` (Optional) Specifies the date after which you want the corresponding action to take effect.
-* `days` (Optional) Specifies the number of days after object creation when the specific rule action takes effect.
-* `expired_object_delete_marker` (Optional) On a versioned bucket (versioning-enabled or versioning-suspended bucket), you can add this element in the lifecycle configuration to direct Amazon S3 to delete expired object delete markers.
-
-The `transition` object supports the following
-
-* `date` (Optional) Specifies the date after which you want the corresponding action to take effect.
-* `days` (Optional) Specifies the number of days after object creation when the specific rule action takes effect.
-* `storage_class` (Required) Specifies the Amazon S3 storage class to which you want the object to transition. Can be `STANDARD_IA` or `GLACIER`.
-
-The `noncurrent_version_expiration` object supports the following
-
-* `days` (Required) Specifies the number of days an object is noncurrent object versions expire.
-
-The `noncurrent_version_transition` object supports the following
-
-* `days` (Required) Specifies the number of days an object is noncurrent object versions expire.
-* `storage_class` (Required) Specifies the Amazon S3 storage class to which you want the noncurrent versions object to transition. Can be `STANDARD_IA` or `GLACIER`.
-
-The `replication_configuration` object supports the following:
-
-* `role` - (Required) The ARN of the IAM role for Amazon S3 to assume when replicating the objects.
-* `rules` - (Required) Specifies the rules managing the replication (documented below).
-
-The `rules` object supports the following:
-
-* `id` - (Optional) Unique identifier for the rule.
-* `destination` - (Required) Specifies the destination for the rule (documented below).
-* `prefix` - (Required) Object keyname prefix identifying one or more objects to which the rule applies. Set as an empty string to replicate the whole bucket.
-* `status` - (Required) The status of the rule. Either `Enabled` or `Disabled`. The rule is ignored if status is not Enabled.
-
-The `destination` object supports the following:
-
-* `bucket` - (Required) The ARN of the S3 bucket where you want Amazon S3 to store replicas of the object identified by the rule.
-* `storage_class` - (Optional) The class of storage used to store the object.
-
-## Attributes Reference
-
-The following attributes are exported:
-
-* `id` - The name of the bucket.
-* `arn` - The ARN of the bucket. Will be of format `arn:aws:s3:::bucketname`.
-* `bucket_domain_name` - The bucket domain name. Will be of format `bucketname.s3.amazonaws.com`.
-* `hosted_zone_id` - The [Route 53 Hosted Zone ID](https://docs.aws.amazon.com/general/latest/gr/rande.html#s3_website_region_endpoints) for this bucket's region.
-* `region` - The AWS region this bucket resides in.
-* `website_endpoint` - The website endpoint, if the bucket is configured with a website. If not, this will be an empty string.
-* `website_domain` - The domain of the website endpoint, if the bucket is configured with a website. If not, this will be an empty string. This is used to create Route 53 alias records.
-
-## Import
-
-S3 bucket can be imported using the `bucket`, e.g.
-
-```
-$ terraform import aws_s3_bucket.bucket bucket-name
-```
diff --git a/website/source/docs/providers/aws/r/s3_bucket_notification.html.markdown b/website/source/docs/providers/aws/r/s3_bucket_notification.html.markdown
deleted file mode 100644
index a3c4826b2..000000000
--- a/website/source/docs/providers/aws/r/s3_bucket_notification.html.markdown
+++ /dev/null
@@ -1,327 +0,0 @@
----
-layout: "aws"
-page_title: "AWS: aws_s3_bucket_notification"
-side_bar_current: "docs-aws-resource-s3-bucket-notification"
-description: |-
- Provides a S3 bucket notification resource.
----
-
-# aws\_s3\_bucket\_notification
-
-Provides a S3 bucket notification resource.
-
-## Example Usage
-
-### Add notification configuration to SNS Topic
-
-```hcl
-resource "aws_sns_topic" "topic" {
- name = "s3-event-notification-topic"
-
- policy = < **Note:** If you specify `content_encoding` you are responsible for encoding the body appropriately (i.e. `source` and `content` both expect already encoded/compressed bytes)
-
-The following arguments are supported:
-
-* `bucket` - (Required) The name of the bucket to put the file in.
-* `key` - (Required) The name of the object once it is in the bucket.
-* `source` - (Required) The path to the source file being uploaded to the bucket.
-* `content` - (Required unless `source` given) The literal content being uploaded to the bucket.
-* `acl` - (Optional) The [canned ACL](https://docs.aws.amazon.com/AmazonS3/latest/dev/acl-overview.html#canned-acl) to apply. Defaults to "private".
-* `cache_control` - (Optional) Specifies caching behavior along the request/reply chain Read [w3c cache_control](http://www.w3.org/Protocols/rfc2616/rfc2616-sec14.html#sec14.9) for further details.
-* `content_disposition` - (Optional) Specifies presentational information for the object. Read [wc3 content_disposition](http://www.w3.org/Protocols/rfc2616/rfc2616-sec19.html#sec19.5.1) for further information.
-* `content_encoding` - (Optional) Specifies what content encodings have been applied to the object and thus what decoding mechanisms must be applied to obtain the media-type referenced by the Content-Type header field. Read [w3c content encoding](http://www.w3.org/Protocols/rfc2616/rfc2616-sec14.html#sec14.11) for further information.
-* `content_language` - (Optional) The language the content is in e.g. en-US or en-GB.
-* `content_type` - (Optional) A standard MIME type describing the format of the object data, e.g. application/octet-stream. All Valid MIME Types are valid for this input.
-* `storage_class` - (Optional) Specifies the desired [Storage Class](http://docs.aws.amazon.com/AmazonS3/latest/dev/storage-class-intro.html)
-for the object. Can be either "`STANDARD`", "`REDUCED_REDUNDANCY`", or "`STANDARD_IA`". Defaults to "`STANDARD`".
-* `etag` - (Optional) Used to trigger updates. The only meaningful value is `${md5(file("path/to/file"))}`.
-This attribute is not compatible with `kms_key_id`.
-* `server_side_encryption` - (Optional) Specifies server-side encryption of the object in S3. Valid values are "`AES256`" and "`aws:kms`".
-* `kms_key_id` - (Optional) Specifies the AWS KMS Key ARN to use for object encryption.
-This value is a fully qualified **ARN** of the KMS Key. If using `aws_kms_key`,
-use the exported `arn` attribute:
- `kms_key_id = "${aws_kms_key.foo.arn}"`
-* `tags` - (Optional) A mapping of tags to assign to the object.
-
-Either `source` or `content` must be provided to specify the bucket content.
-These two arguments are mutually-exclusive.
-
-## Attributes Reference
-
-The following attributes are exported
-
-* `id` - the `key` of the resource supplied above
-* `etag` - the ETag generated for the object (an MD5 sum of the object content).
-* `version_id` - A unique version ID value for the object, if bucket versioning
-is enabled.
diff --git a/website/source/docs/providers/aws/r/s3_bucket_policy.html.markdown b/website/source/docs/providers/aws/r/s3_bucket_policy.html.markdown
deleted file mode 100644
index 250ac0439..000000000
--- a/website/source/docs/providers/aws/r/s3_bucket_policy.html.markdown
+++ /dev/null
@@ -1,37 +0,0 @@
----
-layout: "aws"
-page_title: "AWS: aws_s3_bucket_policy"
-sidebar_current: "docs-aws-resource-s3-bucket-policy"
-description: |-
- Attaches a policy to an S3 bucket resource.
----
-
-# aws\_s3\_bucket\_policy
-
-Attaches a policy to an S3 bucket resource.
-
-## Example Usage
-
-### Using versioning
-
-```hcl
-resource "aws_s3_bucket" "b" {
- # Arguments
-}
-
-data "aws_iam_policy_document" "b" {
- # Policy statements
-}
-
-resource "aws_s3_bucket_policy" "b" {
- bucket = "${aws_s3_bucket.b.id}"
- policy = "${data.aws_iam_policy_document.b.json}"
-}
-```
-
-## Argument Reference
-
-The following arguments are supported:
-
-* `bucket` - (Required) The name of the bucket to which to apply the policy.
-* `policy` - (Required) The text of the policy.
diff --git a/website/source/docs/providers/aws/r/security_group.html.markdown b/website/source/docs/providers/aws/r/security_group.html.markdown
deleted file mode 100644
index 5b21907be..000000000
--- a/website/source/docs/providers/aws/r/security_group.html.markdown
+++ /dev/null
@@ -1,167 +0,0 @@
----
-layout: "aws"
-page_title: "AWS: aws_security_group"
-sidebar_current: "docs-aws-resource-security-group"
-description: |-
- Provides a security group resource.
----
-
-# aws\_security\_group
-
-Provides a security group resource.
-
-~> **NOTE on Security Groups and Security Group Rules:** Terraform currently
-provides both a standalone [Security Group Rule resource](security_group_rule.html) (a single `ingress` or
-`egress` rule), and a Security Group resource with `ingress` and `egress` rules
-defined in-line. At this time you cannot use a Security Group with in-line rules
-in conjunction with any Security Group Rule resources. Doing so will cause
-a conflict of rule settings and will overwrite rules.
-
-## Example Usage
-
-Basic usage
-
-```hcl
-resource "aws_security_group" "allow_all" {
- name = "allow_all"
- description = "Allow all inbound traffic"
-
- ingress {
- from_port = 0
- to_port = 0
- protocol = "-1"
- cidr_blocks = ["0.0.0.0/0"]
- }
-
- egress {
- from_port = 0
- to_port = 0
- protocol = "-1"
- cidr_blocks = ["0.0.0.0/0"]
- prefix_list_ids = ["pl-12c4e678"]
- }
-}
-```
-
-Basic usage with tags:
-
-```hcl
-resource "aws_security_group" "allow_all" {
- name = "allow_all"
- description = "Allow all inbound traffic"
-
- ingress {
- from_port = 0
- to_port = 65535
- protocol = "tcp"
- cidr_blocks = ["0.0.0.0/0"]
- }
-
- tags {
- Name = "allow_all"
- }
-}
-```
-
-## Argument Reference
-
-The following arguments are supported:
-
-* `name` - (Optional, Forces new resource) The name of the security group. If omitted, Terraform will
-assign a random, unique name
-* `name_prefix` - (Optional, Forces new resource) Creates a unique name beginning with the specified
- prefix. Conflicts with `name`.
-* `description` - (Optional, Forces new resource) The security group description. Defaults to
- "Managed by Terraform". Cannot be "". __NOTE__: This field maps to the AWS
- `GroupDescription` attribute, for which there is no Update API. If you'd like
- to classify your security groups in a way that can be updated, use `tags`.
-* `ingress` - (Optional) Can be specified multiple times for each
- ingress rule. Each ingress block supports fields documented below.
-* `egress` - (Optional, VPC only) Can be specified multiple times for each
- egress rule. Each egress block supports fields documented below.
-* `vpc_id` - (Optional, Forces new resource) The VPC ID.
-* `tags` - (Optional) A mapping of tags to assign to the resource.
-
-The `ingress` block supports:
-
-* `cidr_blocks` - (Optional) List of CIDR blocks.
-* `ipv6_cidr_blocks` - (Optional) List of IPv6 CIDR blocks.
-* `from_port` - (Required) The start port (or ICMP type number if protocol is "icmp")
-* `protocol` - (Required) The protocol. If you select a protocol of
-"-1" (semantically equivalent to `"all"`, which is not a valid value here), you must specify a "from_port" and "to_port" equal to 0. If not icmp, tcp, udp, or "-1" use the [protocol number](https://www.iana.org/assignments/protocol-numbers/protocol-numbers.xhtml)
-* `security_groups` - (Optional) List of security group Group Names if using
- EC2-Classic, or Group IDs if using a VPC.
-* `self` - (Optional) If true, the security group itself will be added as
- a source to this ingress rule.
-* `to_port` - (Required) The end range port (or ICMP code if protocol is "icmp").
-
-The `egress` block supports:
-
-* `cidr_blocks` - (Optional) List of CIDR blocks.
-* `ipv6_cidr_blocks` - (Optional) List of IPv6 CIDR blocks.
-* `prefix_list_ids` - (Optional) List of prefix list IDs (for allowing access to VPC endpoints)
-* `from_port` - (Required) The start port (or ICMP type number if protocol is "icmp")
-* `protocol` - (Required) The protocol. If you select a protocol of
-"-1" (semantically equivalent to `"all"`, which is not a valid value here), you must specify a "from_port" and "to_port" equal to 0. If not icmp, tcp, udp, or "-1" use the [protocol number](https://www.iana.org/assignments/protocol-numbers/protocol-numbers.xhtml)
-* `security_groups` - (Optional) List of security group Group Names if using
- EC2-Classic, or Group IDs if using a VPC.
-* `self` - (Optional) If true, the security group itself will be added as
- a source to this egress rule.
-* `to_port` - (Required) The end range port (or ICMP code if protocol is "icmp").
-
-~> **NOTE on Egress rules:** By default, AWS creates an `ALLOW ALL` egress rule when creating a
-new Security Group inside of a VPC. When creating a new Security
-Group inside a VPC, **Terraform will remove this default rule**, and require you
-specifically re-create it if you desire that rule. We feel this leads to fewer
-surprises in terms of controlling your egress rules. If you desire this rule to
-be in place, you can use this `egress` block:
-
-```hcl
- egress {
- from_port = 0
- to_port = 0
- protocol = "-1"
- cidr_blocks = ["0.0.0.0/0"]
- }
-```
-
-## Usage with prefix list IDs
-
-Prefix list IDs are managed by AWS internally. Prefix list IDs
-are associated with a prefix list name, or service name, that is linked to a specific region.
-Prefix list IDs are exported on VPC Endpoints, so you can use this format:
-
-```hcl
- # ...
- egress {
- from_port = 0
- to_port = 0
- protocol = "-1"
- prefix_list_ids = ["${aws_vpc_endpoint.my_endpoint.prefix_list_id}"]
- }
- # ...
- resource "aws_vpc_endpoint" "my_endpoint" {
- # ...
- }
-```
-
-## Attributes Reference
-
-The following attributes are exported:
-
-* `id` - The ID of the security group
-* `vpc_id` - The VPC ID.
-* `owner_id` - The owner ID.
-* `name` - The name of the security group
-* `description` - The description of the security group
-* `ingress` - The ingress rules. See above for more.
-* `egress` - The egress rules. See above for more.
-
-
-## Import
-
-Security Groups can be imported using the `security group id`, e.g.
-
-```
-$ terraform import aws_security_group.elb_sg sg-903004f8
-```
diff --git a/website/source/docs/providers/aws/r/security_group_rule.html.markdown b/website/source/docs/providers/aws/r/security_group_rule.html.markdown
deleted file mode 100644
index bdf70c4b6..000000000
--- a/website/source/docs/providers/aws/r/security_group_rule.html.markdown
+++ /dev/null
@@ -1,87 +0,0 @@
----
-layout: "aws"
-page_title: "AWS: aws_security_group_rule"
-sidebar_current: "docs-aws-resource-security-group-rule"
-description: |-
- Provides an security group rule resource.
----
-
-# aws\_security\_group\_rule
-
-Provides a security group rule resource. Represents a single `ingress` or
-`egress` group rule, which can be added to external Security Groups.
-
-~> **NOTE on Security Groups and Security Group Rules:** Terraform currently
-provides both a standalone Security Group Rule resource (a single `ingress` or
-`egress` rule), and a [Security Group resource](security_group.html) with `ingress` and `egress` rules
-defined in-line. At this time you cannot use a Security Group with in-line rules
-in conjunction with any Security Group Rule resources. Doing so will cause
-a conflict of rule settings and will overwrite rules.
-
-## Example Usage
-
-Basic usage
-
-```hcl
-resource "aws_security_group_rule" "allow_all" {
- type = "ingress"
- from_port = 0
- to_port = 65535
- protocol = "tcp"
- cidr_blocks = ["0.0.0.0/0"]
- prefix_list_ids = ["pl-12c4e678"]
-
- security_group_id = "sg-123456"
-}
-```
-
-## Argument Reference
-
-The following arguments are supported:
-
-* `type` - (Required) The type of rule being created. Valid options are `ingress` (inbound)
-or `egress` (outbound).
-* `cidr_blocks` - (Optional) List of CIDR blocks. Cannot be specified with `source_security_group_id`.
-* `ipv6_cidr_blocks` - (Optional) List of IPv6 CIDR blocks.
-* `prefix_list_ids` - (Optional) List of prefix list IDs (for allowing access to VPC endpoints).
-Only valid with `egress`.
-* `from_port` - (Required) The start port (or ICMP type number if protocol is "icmp").
-* `protocol` - (Required) The protocol. If not icmp, tcp, udp, or all use the [protocol number](https://www.iana.org/assignments/protocol-numbers/protocol-numbers.xhtml)
-* `security_group_id` - (Required) The security group to apply this rule to.
-* `source_security_group_id` - (Optional) The security group id to allow access to/from,
- depending on the `type`. Cannot be specified with `cidr_blocks`.
-* `self` - (Optional) If true, the security group itself will be added as
- a source to this ingress rule.
-* `to_port` - (Required) The end port (or ICMP code if protocol is "icmp").
-
-## Usage with prefix list IDs
-
-Prefix list IDs are manged by AWS internally. Prefix list IDs
-are associated with a prefix list name, or service name, that is linked to a specific region.
-Prefix list IDs are exported on VPC Endpoints, so you can use this format:
-
-```hcl
-resource "aws_security_group_rule" "allow_all" {
- type = "egress"
- to_port = 0
- protocol = "-1"
- prefix_list_ids = ["${aws_vpc_endpoint.my_endpoint.prefix_list_id}"]
- from_port = 0
- security_group_id = "sg-123456"
-}
-
-# ...
-resource "aws_vpc_endpoint" "my_endpoint" {
- # ...
-}
-```
-
-## Attributes Reference
-
-The following attributes are exported:
-
-* `id` - The ID of the security group rule
-* `type` - The type of rule, `ingress` or `egress`
-* `from_port` - The start port (or ICMP type number if protocol is "icmp")
-* `to_port` - The end port (or ICMP code if protocol is "icmp")
-* `protocol` – The protocol used
diff --git a/website/source/docs/providers/aws/r/ses_active_receipt_rule_set.html.markdown b/website/source/docs/providers/aws/r/ses_active_receipt_rule_set.html.markdown
deleted file mode 100644
index 0a581443a..000000000
--- a/website/source/docs/providers/aws/r/ses_active_receipt_rule_set.html.markdown
+++ /dev/null
@@ -1,25 +0,0 @@
----
-layout: "aws"
-page_title: "AWS: ses_active_receipt_rule_set"
-sidebar_current: "docs-aws-resource-ses-active-receipt-rule-set"
-description: |-
- Provides a resource to designate the active SES receipt rule set
----
-
-# aws\_ses\_active_receipt_rule_set
-
-Provides a resource to designate the active SES receipt rule set
-
-## Example Usage
-
-```hcl
-resource "aws_ses_active_receipt_rule_set" "main" {
- rule_set_name = "primary-rules"
-}
-```
-
-## Argument Reference
-
-The following arguments are supported:
-
-* `rule_set_name` - (Required) The name of the rule set
diff --git a/website/source/docs/providers/aws/r/ses_configuration_set.markdown b/website/source/docs/providers/aws/r/ses_configuration_set.markdown
deleted file mode 100644
index 1b4a0bf3c..000000000
--- a/website/source/docs/providers/aws/r/ses_configuration_set.markdown
+++ /dev/null
@@ -1,25 +0,0 @@
----
-layout: "aws"
-page_title: "AWS: ses_configuration_set"
-sidebar_current: "docs-aws-resource-ses-configuration-set"
-description: |-
- Provides an SES configuration set
----
-
-# aws\_ses\_configuration_set
-
-Provides an SES configuration set resource
-
-## Example Usage
-
-```hcl
-resource "aws_ses_configuration_set" "test" {
- name = "some-configuration-set-test"
-}
-```
-
-## Argument Reference
-
-The following arguments are supported:
-
-* `name` - (Required) The name of the configuration set
diff --git a/website/source/docs/providers/aws/r/ses_domain_identity.html.markdown b/website/source/docs/providers/aws/r/ses_domain_identity.html.markdown
deleted file mode 100644
index 26f37bf1b..000000000
--- a/website/source/docs/providers/aws/r/ses_domain_identity.html.markdown
+++ /dev/null
@@ -1,48 +0,0 @@
----
-layout: "aws"
-page_title: "AWS: ses_domain_identity"
-sidebar_current: "docs-aws-resource-ses-domain-identity"
-description: |-
- Provides an SES domain identity resource
----
-
-# aws\_ses\_domain_identity
-
-Provides an SES domain identity resource
-
-## Argument Reference
-
-The following arguments are supported:
-
-* `domain` - (Required) The domain name to assign to SES
-
-## Attributes Reference
-
-The following attributes are exported:
-
-* `arn` - The ARN of the domain identity.
-
-* `verification_token` - A code which when added to the domain as a TXT record
- will signal to SES that the owner of the domain has authorised SES to act on
- their behalf. The domain identity will be in state "verification pending"
- until this is done. See below for an example of how this might be achieved
- when the domain is hosted in Route 53 and managed by Terraform. Find out
- more about verifying domains in Amazon SES in the [AWS SES
- docs](http://docs.aws.amazon.com/ses/latest/DeveloperGuide/verify-domains.html).
-
-## Example Usage
-
-```hcl
-resource "aws_ses_domain_identity" "example" {
- domain = "example.com"
-}
-
-resource "aws_route53_record" "example_amazonses_verification_record" {
- zone_id = "ABCDEFGHIJ123"
- name = "_amazonses.example.com"
- type = "TXT"
- ttl = "600"
- records = ["${aws_ses_domain_identity.example.verification_token}"]
-}
-```
-
diff --git a/website/source/docs/providers/aws/r/ses_event_destination.markdown b/website/source/docs/providers/aws/r/ses_event_destination.markdown
deleted file mode 100644
index 794953bc5..000000000
--- a/website/source/docs/providers/aws/r/ses_event_destination.markdown
+++ /dev/null
@@ -1,67 +0,0 @@
----
-layout: "aws"
-page_title: "AWS: ses_event_destination"
-sidebar_current: "docs-aws-resource-ses-event-destination"
-description: |-
- Provides an SES event destination
----
-
-# aws\_ses\_event_destination
-
-Provides an SES event destination
-
-## Example Usage
-
-```hcl
-# Add a firehose event destination to a configuration set
-resource "aws_ses_event_destination" "kinesis" {
- name = "event-destination-kinesis"
- configuration_set_name = "${aws_ses_configuration_set.test.name}"
- enabled = true
- matching_types = ["bounce", "send"]
-
- kinesis_destination = {
- stream_arn = "${aws_kinesis_firehose_delivery_stream.test_stream.arn}"
- role_arn = "${aws_iam_role.firehose_role.arn}"
- }
-}
-
-# CloudWatch event destination
-resource "aws_ses_event_destination" "cloudwatch" {
- name = "event-destination-cloudwatch"
- configuration_set_name = "${aws_ses_configuration_set.test.name}"
- enabled = true
- matching_types = ["bounce", "send"]
-
- cloudwatch_destination = {
- default_value = "default"
- dimension_name = "dimension"
- value_source = "emailHeader"
- }
-}
-```
-
-## Argument Reference
-
-The following arguments are supported:
-
-* `name` - (Required) The name of the event destination
-* `configuration_set_name` - (Required) The name of the configuration set
-* `enabled` - (Optional) If true, the event destination will be enabled
-* `matching_types` - (Required) A list of matching types. May be any of `"send"`, `"reject"`, `"bounce"`, `"complaint"`, or `"delivery"`.
-* `cloudwatch_destination` - (Optional) CloudWatch destination for the events
-* `kinesis_destination` - (Optional) Send the events to a kinesis firehose destination
-
-~> **NOTE:** You can specify `"cloudwatch_destination"` or `"kinesis_destination"` but not both
-
-CloudWatch Destination requires the following:
-
-* `default_value` - (Required) The default value for the event
-* `dimension_name` - (Required) The name for the dimension
-* `value_source` - (Required) The source for the value. It can be either `"messageTag"` or `"emailHeader"`
-
-Kinesis Destination requires the following:
-
-* `stream_arn` - (Required) The ARN of the Kinesis Stream
-* `role_arn` - (Required) The ARN of the role that has permissions to access the Kinesis Stream
-
diff --git a/website/source/docs/providers/aws/r/ses_receipt_filter.html.markdown b/website/source/docs/providers/aws/r/ses_receipt_filter.html.markdown
deleted file mode 100644
index d45f710dc..000000000
--- a/website/source/docs/providers/aws/r/ses_receipt_filter.html.markdown
+++ /dev/null
@@ -1,29 +0,0 @@
----
-layout: "aws"
-page_title: "AWS: ses_receipt_filter"
-sidebar_current: "docs-aws-resource-ses-receipt-filter"
-description: |-
- Provides an SES receipt filter
----
-
-# aws\_ses\_receipt_filter
-
-Provides an SES receipt filter resource
-
-## Example Usage
-
-```hcl
-resource "aws_ses_receipt_filter" "filter" {
- name = "block-spammer"
- cidr = "10.10.10.10"
- policy = "Block"
-}
-```
-
-## Argument Reference
-
-The following arguments are supported:
-
-* `name` - (Required) The name of the filter
-* `cidr` - (Required) The IP address or address range to filter, in CIDR notation
-* `policy` - (Required) Block or Allow
diff --git a/website/source/docs/providers/aws/r/ses_receipt_rule.html.markdown b/website/source/docs/providers/aws/r/ses_receipt_rule.html.markdown
deleted file mode 100644
index d08bf81ab..000000000
--- a/website/source/docs/providers/aws/r/ses_receipt_rule.html.markdown
+++ /dev/null
@@ -1,99 +0,0 @@
----
-layout: "aws"
-page_title: "AWS: ses_receipt_rule"
-sidebar_current: "docs-aws-resource-ses-receipt-rule"
-description: |-
- Provides an SES receipt rule resource
----
-
-# aws\_ses\_receipt_rule
-
-Provides an SES receipt rule resource
-
-## Example Usage
-
-```hcl
-# Add a header to the email and store it in S3
-resource "aws_ses_receipt_rule" "store" {
- name = "store"
- rule_set_name = "default-rule-set"
- recipients = ["karen@example.com"]
- enabled = true
- scan_enabled = true
-
- add_header_action {
- header_name = "Custom-Header"
- header_value = "Added by SES"
- }
-
- s3_action {
- bucket_name = "emails"
- }
-}
-```
-
-## Argument Reference
-
-The following arguments are supported:
-
-* `name` - (Required) The name of the rule
-* `rule_set_name` - (Required) The name of the rule set
-* `after` - (Optional) The name of the rule to place this rule after
-* `enabled` - (Optional) If true, the rule will be enabled
-* `recipients` - (Optional) A list of email addresses
-* `scan_enabled` - (Optional) If true, incoming emails will be scanned for spam and viruses
-* `tls_policy` - (Optional) Require or Optional
-* `add_header_action` - (Optional) A list of Add Header Action blocks. Documented below.
-* `bounce_action` - (Optional) A list of Bounce Action blocks. Documented below.
-* `lambda_action` - (Optional) A list of Lambda Action blocks. Documented below.
-* `s3_action` - (Optional) A list of S3 Action blocks. Documented below.
-* `sns_action` - (Optional) A list of SNS Action blocks. Documented below.
-* `stop_action` - (Optional) A list of Stop Action blocks. Documented below.
-* `workmail_action` - (Optional) A list of WorkMail Action blocks. Documented below.
-
-Add header actions support the following:
-
-* `header_name` - (Required) The name of the header to add
-* `header_value` - (Required) The value of the header to add
-* `position` - (Required) The position of the action in the receipt rule
-
-Bounce actions support the following:
-
-* `message` - (Required) The message to send
-* `sender` - (Required) The email address of the sender
-* `smtp_reply_code` - (Required) The RFC 5321 SMTP reply code
-* `status_code` - (Optional) The RFC 3463 SMTP enhanced status code
-* `topic_arn` - (Optional) The ARN of an SNS topic to notify
-* `position` - (Required) The position of the action in the receipt rule
-
-Lambda actions support the following:
-
-* `function_arn` - (Required) The ARN of the Lambda function to invoke
-* `invocation_type` - (Optional) Event or RequestResponse
-* `topic_arn` - (Optional) The ARN of an SNS topic to notify
-* `position` - (Required) The position of the action in the receipt rule
-
-S3 actions support the following:
-
-* `bucket_name` - (Required) The name of the S3 bucket
-* `kms_key_arn` - (Optional) The ARN of the KMS key
-* `object_key_prefix` - (Optional) The key prefix of the S3 bucket
-* `topic_arn` - (Optional) The ARN of an SNS topic to notify
-* `position` - (Required) The position of the action in the receipt rule
-
-SNS actions support the following:
-
-* `topic_arn` - (Required) The ARN of an SNS topic to notify
-* `position` - (Required) The position of the action in the receipt rule
-
-Stop actions support the following:
-
-* `scope` - (Required) The scope to apply
-* `topic_arn` - (Optional) The ARN of an SNS topic to notify
-* `position` - (Required) The position of the action in the receipt rule
-
-WorkMail actions support the following:
-
-* `organization_arn` - (Required) The ARN of the WorkMail organization
-* `topic_arn` - (Optional) The ARN of an SNS topic to notify
-* `position` - (Required) The position of the action in the receipt rule
diff --git a/website/source/docs/providers/aws/r/ses_receipt_rule_set.html.markdown b/website/source/docs/providers/aws/r/ses_receipt_rule_set.html.markdown
deleted file mode 100644
index f8ec307ed..000000000
--- a/website/source/docs/providers/aws/r/ses_receipt_rule_set.html.markdown
+++ /dev/null
@@ -1,25 +0,0 @@
----
-layout: "aws"
-page_title: "AWS: ses_receipt_rule_set"
-sidebar_current: "docs-aws-resource-ses-receipt-rule-set"
-description: |-
- Provides an SES receipt rule set resource
----
-
-# aws\_ses\_receipt_rule_set
-
-Provides an SES receipt rule set resource
-
-## Example Usage
-
-```hcl
-resource "aws_ses_receipt_rule_set" "main" {
- rule_set_name = "primary-rules"
-}
-```
-
-## Argument Reference
-
-The following arguments are supported:
-
-* `rule_set_name` - (Required) The name of the rule set
diff --git a/website/source/docs/providers/aws/r/sfn_activity.html.markdown b/website/source/docs/providers/aws/r/sfn_activity.html.markdown
deleted file mode 100644
index e6e9407ec..000000000
--- a/website/source/docs/providers/aws/r/sfn_activity.html.markdown
+++ /dev/null
@@ -1,41 +0,0 @@
----
-layout: "aws"
-page_title: "AWS: sfn_activity"
-sidebar_current: "docs-aws-resource-sfn-activity"
-description: |-
- Provides a Step Function Activity resource.
----
-
-# sfn\_activity
-
-Provides a Step Function Activity resource
-
-## Example Usage
-
-```hcl
-resource "aws_sfn_activity" "sfn_activity" {
- name = "my-activity"
-}
-```
-
-## Argument Reference
-
-The following arguments are supported:
-
-* `name` - (Required) The name of the activity to create.
-
-## Attributes Reference
-
-The following attributes are exported:
-
-* `id` - The Amazon Resource Name (ARN) that identifies the created activity.
-* `name` - The name of the activity.
-* `creation_date` - The date the activity was created.
-
-## Import
-
-Activities can be imported using the `arn`, e.g.
-
-```
-$ terraform import aws_sfn_activity.foo arn:aws:states:eu-west-1:123456789098:activity:bar
-```
diff --git a/website/source/docs/providers/aws/r/sfn_state_machine.html.markdown b/website/source/docs/providers/aws/r/sfn_state_machine.html.markdown
deleted file mode 100644
index e9bd9efbd..000000000
--- a/website/source/docs/providers/aws/r/sfn_state_machine.html.markdown
+++ /dev/null
@@ -1,60 +0,0 @@
----
-layout: "aws"
-page_title: "AWS: sfn_state_machine"
-sidebar_current: "docs-aws-resource-sfn-state-machine"
-description: |-
- Provides a Step Function State Machine resource.
----
-
-# sfn\_state\_machine
-
-Provides a Step Function State Machine resource
-
-## Example Usage
-
-```hcl
-# ...
-
-resource "aws_sfn_state_machine" "sfn_state_machine" {
- name = "my-state-machine"
- role_arn = "${aws_iam_role.iam_for_sfn.arn}"
-
- definition = < **NOTE:** If SNS topic and SQS queue are in different AWS regions it is important to place the "aws_sns_topic_subscription" into the terraform configuration of the region with the SQS queue. If "aws_sns_topic_subscription" is placed in the terraform configuration of the region with the SNS topic terraform will fail to create the subscription.
-
-~> **NOTE:** Setup of cross-account subscriptions from SNS topics to SQS queues requires Terraform to have access to BOTH accounts.
-
-~> **NOTE:** If SNS topic and SQS queue are in different AWS accounts but the same region it is important to place the "aws_sns_topic_subscription" into the terraform configuration of the account with the SQS queue. If "aws_sns_topic_subscription" is placed in the terraform configuration of the account with the SNS topic terraform creates the subscriptions but does not keep state and tries to re-create the subscription at every apply.
-
-~> **NOTE:** If SNS topic and SQS queue are in different AWS accounts and different AWS regions it is important to recognize that the subscription needs to be initiated from the account with the SQS queue but in the region of the SNS topic.
-
-## Example Usage
-
-You can directly supply a topic and ARN by hand in the `topic_arn` property along with the queue ARN:
-
-```hcl
-resource "aws_sns_topic_subscription" "user_updates_sqs_target" {
- topic_arn = "arn:aws:sns:us-west-2:432981146916:user-updates-topic"
- protocol = "sqs"
- endpoint = "arn:aws:sqs:us-west-2:432981146916:terraform-queue-too"
-}
-```
-
-Alternatively you can use the ARN properties of a managed SNS topic and SQS queue:
-
-```hcl
-resource "aws_sns_topic" "user_updates" {
- name = "user-updates-topic"
-}
-
-resource "aws_sqs_queue" "user_updates_queue" {
- name = "user-updates-queue"
-}
-
-resource "aws_sns_topic_subscription" "user_updates_sqs_target" {
- topic_arn = "${aws_sns_topic.user_updates.arn}"
- protocol = "sqs"
- endpoint = "${aws_sqs_queue.user_updates_queue.arn}"
-}
-```
-
-You can subscribe SNS topics to SQS queues in different Amazon accounts and regions:
-
-```hcl
-/*
-#
-# Variables
-#
-*/
-variable "sns" {
- default = {
- account-id = "111111111111"
- role-name = "service/service-hashicorp-terraform"
- name = "example-sns-topic"
- display_name = "example"
- region = "us-west-1"
- }
-}
-
-variable "sqs" {
- default = {
- account-id = "222222222222"
- role-name = "service/service-hashicorp-terraform"
- name = "example-sqs-queue"
- region = "us-east-1"
- }
-}
-
-data "aws_iam_policy_document" "sns-topic-policy" {
- policy_id = "__default_policy_ID"
-
- statement {
- actions = [
- "SNS:Subscribe",
- "SNS:SetTopicAttributes",
- "SNS:RemovePermission",
- "SNS:Receive",
- "SNS:Publish",
- "SNS:ListSubscriptionsByTopic",
- "SNS:GetTopicAttributes",
- "SNS:DeleteTopic",
- "SNS:AddPermission",
- ]
-
- condition {
- test = "StringEquals"
- variable = "AWS:SourceOwner"
-
- values = [
- "${var.sns["account-id"]}",
- ]
- }
-
- effect = "Allow"
-
- principals {
- type = "AWS"
- identifiers = ["*"]
- }
-
- resources = [
- "arn:aws:sns:${var.sns["region"]}:${var.sns["account-id"]}:${var.sns["name"]}",
- ]
-
- sid = "__default_statement_ID"
- }
-
- statement {
- actions = [
- "SNS:Subscribe",
- "SNS:Receive",
- ]
-
- condition {
- test = "StringLike"
- variable = "SNS:Endpoint"
-
- values = [
- "arn:aws:sqs:${var.sqs["region"]}:${var.sqs["account-id"]}:${var.sqs["name"]}",
- ]
- }
-
- effect = "Allow"
-
- principals {
- type = "AWS"
- identifiers = ["*"]
- }
-
- resources = [
- "arn:aws:sns:${var.sns["region"]}:${var.sns["account-id"]}:${var.sns["name"]}",
- ]
-
- sid = "__console_sub_0"
- }
-}
-
-data "aws_iam_policy_document" "sqs-queue-policy" {
- policy_id = "arn:aws:sqs:${var.sqs["region"]}:${var.sqs["account-id"]}:${var.sqs["name"]}/SQSDefaultPolicy"
-
- statement {
- sid = "example-sns-topic"
- effect = "Allow"
-
- principals {
- type = "AWS"
- identifiers = ["*"]
- }
-
- actions = [
- "SQS:SendMessage",
- ]
-
- resources = [
- "arn:aws:sqs:${var.sqs["region"]}:${var.sqs["account-id"]}:${var.sqs["name"]}",
- ]
-
- condition {
- test = "ArnEquals"
- variable = "aws:SourceArn"
-
- values = [
- "arn:aws:sns:${var.sns["region"]}:${var.sns["account-id"]}:${var.sns["name"]}",
- ]
- }
- }
-}
-
-# provider to manage SNS topics
-provider "aws" {
- alias = "sns"
- region = "${var.sns["region"]}"
-
- assume_role {
- role_arn = "arn:aws:iam::${var.sns["account-id"]}:role/${var.sns["role-name"]}"
- session_name = "sns-${var.sns["region"]}"
- }
-}
-
-# provider to manage SQS queues
-provider "aws" {
- alias = "sqs"
- region = "${var.sqs["region"]}"
-
- assume_role {
- role_arn = "arn:aws:iam::${var.sqs["account-id"]}:role/${var.sqs["role-name"]}"
- session_name = "sqs-${var.sqs["region"]}"
- }
-}
-
-# provider to subscribe SQS to SNS (using the SQS account but the SNS region)
-provider "aws" {
- alias = "sns2sqs"
- region = "${var.sns["region"]}"
-
- assume_role {
- role_arn = "arn:aws:iam::${var.sqs["account-id"]}:role/${var.sqs["role-name"]}"
- session_name = "sns2sqs-${var.sns["region"]}"
- }
-}
-
-resource "aws_sns_topic" "sns-topic" {
- provider = "aws.sns"
- name = "${var.sns["name"]}"
- display_name = "${var.sns["display_name"]}"
- policy = "${data.aws_iam_policy_document.sns-topic-policy.json}"
-}
-
-resource "aws_sqs_queue" "sqs-queue" {
- provider = "aws.sqs"
- name = "${var.sqs["name"]}"
- policy = "${data.aws_iam_policy_document.sqs-queue-policy.json}"
-}
-
-resource "aws_sns_topic_subscription" "sns-topic" {
- provider = "aws.sns2sqs"
- topic_arn = "${aws_sns_topic.sns-topic.arn}"
- protocol = "sqs"
- endpoint = "${aws_sqs_queue.sqs-queue.arn}"
-}
-```
-
-## Argument Reference
-
-The following arguments are supported:
-
-* `topic_arn` - (Required) The ARN of the SNS topic to subscribe to
-* `protocol` - (Required) The protocol to use. The possible values for this are: `sqs`, `lambda`, `application`. (`http` or `https` are partially supported, see below) (`email`, `sms`, are options but unsupported, see below).
-* `endpoint` - (Required) The endpoint to send data to, the contents will vary with the protocol. (see below for more information)
-* `endpoint_auto_confirms` - (Optional) Boolean indicating whether the end point is capable of [auto confirming subscription](http://docs.aws.amazon.com/sns/latest/dg/SendMessageToHttp.html#SendMessageToHttp.prepare) e.g., PagerDuty (default is false)
-* `confirmation_timeout_in_minutes` - (Optional) Integer indicating number of minutes to wait in retying mode for fetching subscription arn before marking it as failure. Only applicable for http and https protocols (default is 1 minute).
-* `raw_message_delivery` - (Optional) Boolean indicating whether or not to enable raw message delivery (the original message is directly passed, not wrapped in JSON with the original message in the message property).
-
-### Protocols supported
-
-Supported SNS protocols include:
-
-* `lambda` -- delivery of JSON-encoded message to a lambda function
-* `sqs` -- delivery of JSON-encoded message to an Amazon SQS queue
-* `application` -- delivery of JSON-encoded message to an EndpointArn for a mobile app and device
-
-Partially supported SNS protocols include:
-
-* `http` -- delivery of JSON-encoded messages via HTTP. Supported only for the end points that auto confirms the subscription.
-* `https` -- delivery of JSON-encoded messages via HTTPS. Supported only for the end points that auto confirms the subscription.
-
-Unsupported protocols include the following:
-
-* `email` -- delivery of message via SMTP
-* `email-json` -- delivery of JSON-encoded message via SMTP
-* `sms` -- delivery text message
-
-These are unsupported because the endpoint needs to be authorized and does not
-generate an ARN until the target email address has been validated. This breaks
-the Terraform model and as a result are not currently supported.
-
-### Specifying endpoints
-
-Endpoints have different format requirements according to the protocol that is chosen.
-
-* SQS endpoints come in the form of the SQS queue's ARN (not the URL of the queue) e.g: `arn:aws:sqs:us-west-2:432981146916:terraform-queue-too`
-* Application endpoints are also the endpoint ARN for the mobile app and device.
-
-## Attributes Reference
-
-The following attributes are exported:
-
-* `id` - The ARN of the subscription
-* `topic_arn` - The ARN of the topic the subscription belongs to
-* `protocol` - The protocol being used
-* `endpoint` - The full endpoint to send data to (SQS ARN, HTTP(S) URL, Application ARN, SMS number, etc.)
-* `arn` - The ARN of the subscription stored as a more user-friendly property
-
-## Import
-
-SNS Topic Subscriptions can be imported using the `subscription arn`, e.g.
-
-```
-$ terraform import aws_sns_topic_subscription.user_updates_sqs_target arn:aws:sns:us-west-2:0123456789012:my-topic:8a21d249-4329-4871-acc6-7be709c6ea7f
-```
diff --git a/website/source/docs/providers/aws/r/spot_datafeed_subscription.html.markdown b/website/source/docs/providers/aws/r/spot_datafeed_subscription.html.markdown
deleted file mode 100644
index 71e2ec7d1..000000000
--- a/website/source/docs/providers/aws/r/spot_datafeed_subscription.html.markdown
+++ /dev/null
@@ -1,39 +0,0 @@
----
-layout: "aws"
-page_title: "AWS: aws_spot_datafeed_subscription"
-sidebar_current: "docs-aws-resource-spot-datafeed-subscription"
-description: |-
- Provides a Spot Datafeed Subscription resource.
----
-
-# aws\_spot\_datafeed\_subscription
-
--> **Note:** There is only a single subscription allowed per account.
-
-To help you understand the charges for your Spot instances, Amazon EC2 provides a data feed that describes your Spot instance usage and pricing.
-This data feed is sent to an Amazon S3 bucket that you specify when you subscribe to the data feed.
-
-## Example Usage
-
-```hcl
-resource "aws_s3_bucket" "default" {
- bucket = "tf-spot-datafeed"
-}
-
-resource "aws_spot_datafeed_subscription" "default" {
- bucket = "${aws_s3_bucket.default.bucket}"
- prefix = "my_subdirectory"
-}
-```
-
-## Argument Reference
-* `bucket` - (Required) The Amazon S3 bucket in which to store the Spot instance data feed.
-* `prefix` - (Optional) Path of folder inside bucket to place spot pricing data.
-
-## Import
-
-A Spot Datafeed Subscription can be imported using the word `spot-datafeed-subscription`, e.g.
-
-```
-$ terraform import aws_spot_datafeed_subscription.mysubscription spot-datafeed-subscription
-```
diff --git a/website/source/docs/providers/aws/r/spot_fleet_request.html.markdown b/website/source/docs/providers/aws/r/spot_fleet_request.html.markdown
deleted file mode 100644
index 7b77ee199..000000000
--- a/website/source/docs/providers/aws/r/spot_fleet_request.html.markdown
+++ /dev/null
@@ -1,118 +0,0 @@
----
-layout: "aws"
-page_title: "AWS: aws_spot_fleet_request"
-sidebar_current: "docs-aws-resource-spot-fleet-request"
-description: |-
- Provides a Spot Fleet Request resource.
----
-
-# aws\_spot\_fleet\_request
-
-Provides an EC2 Spot Fleet Request resource. This allows a fleet of Spot
-instances to be requested on the Spot market.
-
-## Example Usage
-
-```hcl
-# Request a Spot fleet
-resource "aws_spot_fleet_request" "cheap_compute" {
- iam_fleet_role = "arn:aws:iam::12345678:role/spot-fleet"
- spot_price = "0.03"
- allocation_strategy = "diversified"
- target_capacity = 6
- valid_until = "2019-11-04T20:44:20Z"
-
- launch_specification {
- instance_type = "m4.10xlarge"
- ami = "ami-1234"
- spot_price = "2.793"
- placement_tenancy = "dedicated"
- }
-
- launch_specification {
- instance_type = "m4.4xlarge"
- ami = "ami-5678"
- key_name = "my-key"
- spot_price = "1.117"
- availability_zone = "us-west-1a"
- subnet_id = "subnet-1234"
- weighted_capacity = 35
-
- root_block_device {
- volume_size = "300"
- volume_type = "gp2"
- }
- }
-}
-```
-
-~> **NOTE:** Terraform does not support the functionality where multiple `subnet_id` or `availability_zone` parameters can be specified in the same
-launch configuration block. If you want to specify multiple values, then separate launch configuration blocks should be used:
-
-```hcl
-resource "aws_spot_fleet_request" "foo" {
- iam_fleet_role = "arn:aws:iam::12345678:role/spot-fleet"
- spot_price = "0.005"
- target_capacity = 2
- valid_until = "2019-11-04T20:44:20Z"
-
- launch_specification {
- instance_type = "m1.small"
- ami = "ami-d06a90b0"
- key_name = "my-key"
- availability_zone = "us-west-2a"
- }
-
- launch_specification {
- instance_type = "m3.large"
- ami = "ami-d06a90b0"
- key_name = "my-key"
- availability_zone = "us-west-2a"
- }
-
- depends_on = ["aws_iam_policy_attachment.test-attach"]
-}
-```
-
-## Argument Reference
-
-Most of these arguments directly correspond to the
-[official API](http://docs.aws.amazon.com/AWSEC2/latest/APIReference/API_SpotFleetRequestConfigData.html).
-
-* `iam_fleet_role` - (Required) Grants the Spot fleet permission to terminate
- Spot instances on your behalf when you cancel its Spot fleet request using
-CancelSpotFleetRequests or when the Spot fleet request expires, if you set
-terminateInstancesWithExpiration.
-* `replace_unhealthy_instances` - (Optional) Indicates whether Spot fleet should replace unhealthy instances. Default `false`.
-* `launch_specification` - Used to define the launch configuration of the
- spot-fleet request. Can be specified multiple times to define different bids
-across different markets and instance types.
-
- **Note:** This takes in similar but not
- identical inputs as [`aws_instance`](instance.html). There are limitations on
- what you can specify (tags, for example, are not supported). See the
- list of officially supported inputs in the
- [reference documentation](http://docs.aws.amazon.com/AWSEC2/latest/APIReference/API_SpotFleetLaunchSpecification.html). Any normal [`aws_instance`](instance.html) parameter that corresponds to those inputs may be used.
-
-* `spot_price` - (Required) The bid price per unit hour.
-* `target_capacity` - The number of units to request. You can choose to set the
- target capacity in terms of instances or a performance characteristic that is
-important to your application workload, such as vCPUs, memory, or I/O.
-* `allocation_strategy` - Indicates how to allocate the target capacity across
- the Spot pools specified by the Spot fleet request. The default is
-lowestPrice.
-* `excess_capacity_termination_policy` - Indicates whether running Spot
- instances should be terminated if the target capacity of the Spot fleet
- request is decreased below the current size of the Spot fleet.
-* `terminate_instances_with_expiration` - Indicates whether running Spot
- instances should be terminated when the Spot fleet request expires.
-* `valid_until` - The end date and time of the request, in UTC ISO8601 format
- (for example, YYYY-MM-DDTHH:MM:SSZ). At this point, no new Spot instance
-requests are placed or enabled to fulfill the request. Defaults to 24 hours.
-
-## Attributes Reference
-
-The following attributes are exported:
-
-* `id` - The Spot fleet request ID
-* `spot_request_state` - The state of the Spot fleet request.
diff --git a/website/source/docs/providers/aws/r/spot_instance_request.html.markdown b/website/source/docs/providers/aws/r/spot_instance_request.html.markdown
deleted file mode 100644
index 52cbb847f..000000000
--- a/website/source/docs/providers/aws/r/spot_instance_request.html.markdown
+++ /dev/null
@@ -1,92 +0,0 @@
----
-layout: "aws"
-page_title: "AWS: aws_spot_instance_request"
-sidebar_current: "docs-aws-resource-spot-instance-request"
-description: |-
- Provides a Spot Instance Request resource.
----
-
-# aws\_spot\_instance\_request
-
-Provides an EC2 Spot Instance Request resource. This allows instances to be
-requested on the spot market.
-
-Terraform always creates Spot Instance Requests with a `persistent` type, which
-means that for the duration of their lifetime, AWS will launch an instance
-with the configured details if and when the spot market will accept the
-requested price.
-
-On destruction, Terraform will make an attempt to terminate the associated Spot
-Instance if there is one present.
-
-~> **NOTE:** Because their behavior depends on the live status of the spot
-market, Spot Instance Requests have a unique lifecycle that makes them behave
-differently than other Terraform resources. Most importantly: there is __no
-guarantee__ that a Spot Instance exists to fulfill the request at any given
-point in time. See the [AWS Spot Instance
-documentation](https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/using-spot-instances.html)
-for more information.
-
-
-## Example Usage
-
-```hcl
-# Request a spot instance at $0.03
-resource "aws_spot_instance_request" "cheap_worker" {
- ami = "ami-1234"
- spot_price = "0.03"
- instance_type = "c4.xlarge"
-
- tags {
- Name = "CheapWorker"
- }
-}
-```
-
-## Argument Reference
-
-Spot Instance Requests support all the same arguments as
-[`aws_instance`](instance.html), with the addition of:
-
-* `spot_price` - (Required) The price to request on the spot market.
-* `wait_for_fulfillment` - (Optional; Default: false) If set, Terraform will
- wait for the Spot Request to be fulfilled, and will throw an error if the
- timeout of 10m is reached.
-* `spot_type` - (Optional; Default: "persistent") If set to "one-time", after
- the instance is terminated, the spot request will be closed. Also, Terraform
- can't manage one-time spot requests, just launch them.
-* `block_duration_minutes` - (Optional) The required duration for the Spot instances, in minutes. This value must be a multiple of 60 (60, 120, 180, 240, 300, or 360).
- The duration period starts as soon as your Spot instance receives its instance ID. At the end of the duration period, Amazon EC2 marks the Spot instance for termination and provides a Spot instance termination notice, which gives the instance a two-minute warning before it terminates.
- Note that you can't specify an Availability Zone group or a launch group if you specify a duration.
-
-### Timeouts
-
-The `timeouts` block allows you to specify [timeouts](https://www.terraform.io/docs/configuration/resources.html#timeouts) for certain actions:
-
-* `create` - (Defaults to 10 mins) Used when requesting the spot instance (only valid if `wait_for_fulfillment = true`)
-* `delete` - (Defaults to 10 mins) Used when terminating all instances launched via the given spot instance request
-
-## Attributes Reference
-
-The following attributes are exported:
-
-* `id` - The Spot Instance Request ID.
-
-These attributes are exported, but they are expected to change over time and so
-should only be used for informational purposes, not for resource dependencies:
-
-* `spot_bid_status` - The current [bid
- status](https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/spot-bid-status.html)
- of the Spot Instance Request.
-* `spot_request_state` The current [request
- state](https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/spot-requests.html#creating-spot-request-status)
- of the Spot Instance Request.
-* `spot_instance_id` - The Instance ID (if any) that is currently fulfilling
- the Spot Instance request.
-* `public_dns` - The public DNS name assigned to the instance. For EC2-VPC, this
- is only available if you've enabled DNS hostnames for your VPC
-* `public_ip` - The public IP address assigned to the instance, if applicable.
-* `private_dns` - The private DNS name assigned to the instance. Can only be
- used inside the Amazon EC2, and only available if you've enabled DNS hostnames
- for your VPC
-* `private_ip` - The private IP address assigned to the instance
diff --git a/website/source/docs/providers/aws/r/sqs_queue.html.markdown b/website/source/docs/providers/aws/r/sqs_queue.html.markdown
deleted file mode 100644
index fbe384927..000000000
--- a/website/source/docs/providers/aws/r/sqs_queue.html.markdown
+++ /dev/null
@@ -1,62 +0,0 @@
----
-layout: "aws"
-page_title: "AWS: aws_sqs_queue"
-sidebar_current: "docs-aws-resource-sqs-queue"
-description: |-
- Provides a SQS resource.
----
-
-# aws\_sqs\_queue
-
-## Example Usage
-
-```hcl
-resource "aws_sqs_queue" "terraform_queue" {
- name = "terraform-example-queue"
- delay_seconds = 90
- max_message_size = 2048
- message_retention_seconds = 86400
- receive_wait_time_seconds = 10
- redrive_policy = "{\"deadLetterTargetArn\":\"${aws_sqs_queue.terraform_queue_deadletter.arn}\",\"maxReceiveCount\":4}"
-}
-```
-
-## FIFO queue
-
-```hcl
-resource "aws_sqs_queue" "terraform_queue" {
- name = "terraform-example-queue.fifo"
- fifo_queue = true
- content_based_deduplication = true
-}
-```
-
-## Argument Reference
-
-The following arguments are supported:
-
-* `name` - (Required) This is the human-readable name of the queue
-* `visibility_timeout_seconds` - (Optional) The visibility timeout for the queue. An integer from 0 to 43200 (12 hours). The default for this attribute is 30. For more information about visibility timeout, see [AWS docs](https://docs.aws.amazon.com/AWSSimpleQueueService/latest/SQSDeveloperGuide/AboutVT.html).
-* `message_retention_seconds` - (Optional) The number of seconds Amazon SQS retains a message. Integer representing seconds, from 60 (1 minute) to 1209600 (14 days). The default for this attribute is 345600 (4 days).
-* `max_message_size` - (Optional) The limit of how many bytes a message can contain before Amazon SQS rejects it. An integer from 1024 bytes (1 KiB) up to 262144 bytes (256 KiB). The default for this attribute is 262144 (256 KiB).
-* `delay_seconds` - (Optional) The time in seconds that the delivery of all messages in the queue will be delayed. An integer from 0 to 900 (15 minutes). The default for this attribute is 0 seconds.
-* `receive_wait_time_seconds` - (Optional) The time for which a ReceiveMessage call will wait for a message to arrive (long polling) before returning. An integer from 0 to 20 (seconds). The default for this attribute is 0, meaning that the call will return immediately.
-* `policy` - (Optional) The JSON policy for the SQS queue
-* `redrive_policy` - (Optional) The JSON policy to set up the Dead Letter Queue, see [AWS docs](https://docs.aws.amazon.com/AWSSimpleQueueService/latest/SQSDeveloperGuide/SQSDeadLetterQueue.html). **Note:** when specifying `maxReceiveCount`, you must specify it as an integer (`5`), and not a string (`"5"`).
-* `fifo_queue` - (Optional) Boolean designating a FIFO queue. If not set, it defaults to `false` making it standard.
-* `content_based_deduplication` - (Optional) Enables content-based deduplication for FIFO queues. For more information, see the [related documentation](http://docs.aws.amazon.com/AWSSimpleQueueService/latest/SQSDeveloperGuide/FIFO-queues.html#FIFO-queues-exactly-once-processing)
-
-## Attributes Reference
-
-The following attributes are exported:
-
-* `id` - The URL for the created Amazon SQS queue.
-* `arn` - The ARN of the SQS queue
-
-## Import
-
-SQS Queues can be imported using the `queue url`, e.g.
-
-```
-$ terraform import aws_sqs_queue.public_queue https://queue.amazonaws.com/80398EXAMPLE/MyQueue
-```
diff --git a/website/source/docs/providers/aws/r/sqs_queue_policy.html.markdown b/website/source/docs/providers/aws/r/sqs_queue_policy.html.markdown
deleted file mode 100644
index ecb0ccb28..000000000
--- a/website/source/docs/providers/aws/r/sqs_queue_policy.html.markdown
+++ /dev/null
@@ -1,52 +0,0 @@
----
-layout: "aws"
-page_title: "AWS: aws_sqs_queue_policy"
-sidebar_current: "docs-aws-resource-sqs-queue-policy"
-description: |-
- Provides a SQS Queue Policy resource.
----
-
-# aws\_sqs\_queue\_policy
-
-Allows you to set a policy of an SQS Queue
-while referencing ARN of the queue within the policy.
-
-## Example Usage
-
-```hcl
-resource "aws_sqs_queue" "q" {
- name = "examplequeue"
-}
-
-resource "aws_sqs_queue_policy" "test" {
- queue_url = "${aws_sqs_queue.q.id}"
-
- policy = < **NOTE on updating SSM documents:** Only documents with a schema version of 2.0
-or greater can update their content once created, see [SSM Schema Features][1]. To update a document with an older
-schema version you must recreate the resource.
-
-## Example Usage
-
-```hcl
-resource "aws_ssm_document" "foo" {
- name = "test_document"
- document_type = "Command"
-
- content = < **Note:** The unencrypted value of a SecureString will be stored in the raw state as plain-text.
-[Read more about sensitive data in state](/docs/state/sensitive-data.html).
-
-## Argument Reference
-
-The following arguments are supported:
-
-* `name` - (Required) The name of the parameter.
-* `type` - (Required) The type of the parameter. Valid types are `String`, `StringList` and `SecureString`.
-* `value` - (Required) The value of the parameter.
-* `key_id` - (Optional) The KMS key id or arn for encrypting a SecureString.
-## Attributes Reference
-
-The following attributes are exported:
-
-* `name` - (Required) The name of the parameter.
-* `type` - (Required) The type of the parameter. Valid types are `String`, `StringList` and `SecureString`.
-* `value` - (Required) The value of the parameter.
diff --git a/website/source/docs/providers/aws/r/ssm_patch_baseline.html.markdown b/website/source/docs/providers/aws/r/ssm_patch_baseline.html.markdown
deleted file mode 100644
index 48e224139..000000000
--- a/website/source/docs/providers/aws/r/ssm_patch_baseline.html.markdown
+++ /dev/null
@@ -1,95 +0,0 @@
----
-layout: "aws"
-page_title: "AWS: aws_ssm_patch_baseline"
-sidebar_current: "docs-aws-resource-ssm-patch-baseline"
-description: |-
- Provides an SSM Patch Baseline resource
----
-
-# aws_ssm_patch_baseline
-
-Provides an SSM Patch Baseline resource
-
-~> **NOTE on Patch Baselines:** The `approved_patches` and `approval_rule` are
-both marked as optional fields, but the Patch Baseline requires that at least one
-of them is specified.
-
-## Example Usage
-
-Basic usage using `approved_patches` only
-
-```hcl
-resource "aws_ssm_patch_baseline" "production" {
- name = "patch-baseline"
- approved_patches = ["KB123456"]
-}
-```
-
-Advanced usage, specifying patch filters
-
-```hcl
-resource "aws_ssm_patch_baseline" "production" {
- name = "patch-baseline"
- description = "Patch Baseline Description"
- approved_patches = ["KB123456", "KB456789"]
- rejected_patches = ["KB987654"]
- global_filter {
- key = "PRODUCT"
- values = ["WindowsServer2008"]
- }
- global_filter {
- key = "CLASSIFICATION"
- values = ["ServicePacks"]
- }
- global_filter {
- key = "MSRC_SEVERITY"
- values = ["Low"]
- }
- approval_rule {
- approve_after_days = 7
- patch_filter {
- key = "PRODUCT"
- values = ["WindowsServer2016"]
- }
- patch_filter {
- key = "CLASSIFICATION"
- values = ["CriticalUpdates", "SecurityUpdates", "Updates"]
- }
- patch_filter {
- key = "MSRC_SEVERITY"
- values = ["Critical", "Important", "Moderate"]
- }
- }
- approval_rule {
- approve_after_days = 7
- patch_filter {
- key = "PRODUCT"
- values = ["WindowsServer2012"]
- }
- }
-}
-```
-
-
-## Argument Reference
-
-The following arguments are supported:
-
-* `name` - (Required) The name of the patch baseline.
-* `description` - (Optional) The description of the patch baseline.
-* `approved_patches` - (Optional) A list of explicitly approved patches for the baseline.
-* `rejected_patches` - (Optional) A list of rejected patches.
-* `global_filter` - (Optional) A set of global filters used to exclude patches from the baseline. Up to 4 global filters can be specified using Key/Value pairs. Valid Keys are `PRODUCT | CLASSIFICATION | MSRC_SEVERITY | PATCH_ID`.
-* `approval_rule` - (Optional) A set of rules used to include patches in the baseline. up to 10 approval rules can be specified. Each approval_rule block requires the fields documented below.
-
-The `approval_rule` block supports:
-
-* `approve_after_days` - (Required) The number of days after the release date of each patch matched by the rule the patch is marked as approved in the patch baseline. Valid Range: 0 to 100.
-* `patch_filter` - (Required) The patch filter group that defines the criteria for the rule. Up to 4 patch filters can be specified per approval rule using Key/Value pairs. Valid Keys are `PRODUCT | CLASSIFICATION | MSRC_SEVERITY | PATCH_ID`.
-
-
-## Attributes Reference
-
-The following attributes are exported:
-
-* `id` - The ID of the patch baseline.
\ No newline at end of file
diff --git a/website/source/docs/providers/aws/r/ssm_patch_group.html.markdown b/website/source/docs/providers/aws/r/ssm_patch_group.html.markdown
deleted file mode 100644
index c52fe3e9a..000000000
--- a/website/source/docs/providers/aws/r/ssm_patch_group.html.markdown
+++ /dev/null
@@ -1,37 +0,0 @@
----
-layout: "aws"
-page_title: "AWS: aws_ssm_patch_group"
-sidebar_current: "docs-aws-resource-ssm-patch-group"
-description: |-
- Provides an SSM Patch Group resource
----
-
-# aws_ssm_patch_group
-
-Provides an SSM Patch Group resource
-
-## Example Usage
-
-```hcl
-resource "aws_ssm_patch_baseline" "production" {
- name = "patch-baseline"
- approved_patches = ["KB123456"]
-}
-
-resource "aws_ssm_patch_group" "patchgroup" {
- baseline_id = "${aws_ssm_patch_baseline.production.id}"
- patch_group = "patch-group-name"
-}```
-
-## Argument Reference
-
-The following arguments are supported:
-
-* `baseline_id` - (Required) The ID of the patch baseline to register the patch group with.
-* `patch_group` - (Required) The name of the patch group that should be registered with the patch baseline.
-
-## Attributes Reference
-
-The following attributes are exported:
-
-* `id` - The ID of the patch baseline.
\ No newline at end of file
diff --git a/website/source/docs/providers/aws/r/subnet.html.markdown b/website/source/docs/providers/aws/r/subnet.html.markdown
deleted file mode 100644
index 7ff7ed900..000000000
--- a/website/source/docs/providers/aws/r/subnet.html.markdown
+++ /dev/null
@@ -1,60 +0,0 @@
----
-layout: "aws"
-page_title: "AWS: aws_subnet"
-sidebar_current: "docs-aws-resource-subnet"
-description: |-
- Provides an VPC subnet resource.
----
-
-# aws\_subnet
-
-Provides an VPC subnet resource.
-
-## Example Usage
-
-```hcl
-resource "aws_subnet" "main" {
- vpc_id = "${aws_vpc.main.id}"
- cidr_block = "10.0.1.0/24"
-
- tags {
- Name = "Main"
- }
-}
-```
-
-## Argument Reference
-
-The following arguments are supported:
-
-* `availability_zone`- (Optional) The AZ for the subnet.
-* `cidr_block` - (Required) The CIDR block for the subnet.
-* `ipv6_cidr_block` - (Optional) The IPv6 network range for the subnet,
- in CIDR notation. The subnet size must use a /64 prefix length.
-* `map_public_ip_on_launch` - (Optional) Specify true to indicate
- that instances launched into the subnet should be assigned
- a public IP address. Default is `false`.
-* `assign_ipv6_address_on_creation` - (Optional) Specify true to indicate
- that network interfaces created in the specified subnet should be
- assigned an IPv6 address. Default is `false`
-* `vpc_id` - (Required) The VPC ID.
-* `tags` - (Optional) A mapping of tags to assign to the resource.
-
-## Attributes Reference
-
-The following attributes are exported:
-
-* `id` - The ID of the subnet
-* `availability_zone`- The AZ for the subnet.
-* `cidr_block` - The CIDR block for the subnet.
-* `vpc_id` - The VPC ID.
-* `ipv6_association_id` - The association ID for the IPv6 CIDR block.
-* `ipv6_cidr_block` - The IPv6 CIDR block.
-
-## Import
-
-Subnets can be imported using the `subnet id`, e.g.
-
-```
-$ terraform import aws_subnet.public_subnet subnet-9d4a7b6c
-```
\ No newline at end of file
diff --git a/website/source/docs/providers/aws/r/volume_attachment.html.markdown b/website/source/docs/providers/aws/r/volume_attachment.html.markdown
deleted file mode 100644
index 75cc933c9..000000000
--- a/website/source/docs/providers/aws/r/volume_attachment.html.markdown
+++ /dev/null
@@ -1,65 +0,0 @@
----
-layout: "aws"
-page_title: "AWS: aws_volume_attachment"
-sidebar_current: "docs-aws-resource-volume-attachment"
-description: |-
- Provides an AWS EBS Volume Attachment
----
-
-# aws\_volume\_attachment
-
-Provides an AWS EBS Volume Attachment as a top level resource, to attach and
-detach volumes from AWS Instances.
-
-~> **NOTE on EBS block devices:** If you use `ebs_block_device` on an `aws_instance`, Terraform will assume management over the full set of non-root EBS block devices for the instance, and treats additional block devices as drift. For this reason, `ebs_block_device` cannot be mixed with external `aws_ebs_volume` + `aws_ebs_volume_attachment` resources for a given instance.
-
-## Example Usage
-
-```hcl
-resource "aws_volume_attachment" "ebs_att" {
- device_name = "/dev/sdh"
- volume_id = "${aws_ebs_volume.example.id}"
- instance_id = "${aws_instance.web.id}"
-}
-
-resource "aws_instance" "web" {
- ami = "ami-21f78e11"
- availability_zone = "us-west-2a"
- instance_type = "t1.micro"
-
- tags {
- Name = "HelloWorld"
- }
-}
-
-resource "aws_ebs_volume" "example" {
- availability_zone = "us-west-2a"
- size = 1
-}
-```
-
-## Argument Reference
-
-The following arguments are supported:
-
-* `device_name` - (Required) The device name to expose to the instance (for
-example, `/dev/sdh` or `xvdh`)
-* `instance_id` - (Required) ID of the Instance to attach to
-* `volume_id` - (Required) ID of the Volume to be attached
-* `force_detach` - (Optional, Boolean) Set to `true` if you want to force the
-volume to detach. Useful if previous attempts failed, but use this option only
-as a last resort, as this can result in **data loss**. See
-[Detaching an Amazon EBS Volume from an Instance][1] for more information.
-* `skip_destroy` - (Optional, Boolean) Set this to true if you do not wish
-to detach the volume from the instance to which it is attached at destroy
-time, and instead just remove the attachment from Terraform state. This is
-useful when destroying an instance which has volumes created by some other
-means attached.
-
-## Attributes Reference
-
-* `device_name` - The device name exposed to the instance
-* `instance_id` - ID of the Instance
-* `volume_id` - ID of the Volume
-
-[1]: https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/ebs-detaching-volume.html
diff --git a/website/source/docs/providers/aws/r/vpc.html.markdown b/website/source/docs/providers/aws/r/vpc.html.markdown
deleted file mode 100644
index 115da9d23..000000000
--- a/website/source/docs/providers/aws/r/vpc.html.markdown
+++ /dev/null
@@ -1,80 +0,0 @@
----
-layout: "aws"
-page_title: "AWS: aws_vpc"
-sidebar_current: "docs-aws-resource-vpc"
-description: |-
- Provides an VPC resource.
----
-
-# aws\_vpc
-
-Provides an VPC resource.
-
-## Example Usage
-
-Basic usage:
-
-```hcl
-resource "aws_vpc" "main" {
- cidr_block = "10.0.0.0/16"
-}
-```
-
-Basic usage with tags:
-
-```hcl
-resource "aws_vpc" "main" {
- cidr_block = "10.0.0.0/16"
- instance_tenancy = "dedicated"
-
- tags {
- Name = "main"
- }
-}
-```
-
-## Argument Reference
-
-The following arguments are supported:
-
-* `cidr_block` - (Required) The CIDR block for the VPC.
-* `instance_tenancy` - (Optional) A tenancy option for instances launched into the VPC
-* `enable_dns_support` - (Optional) A boolean flag to enable/disable DNS support in the VPC. Defaults true.
-* `enable_dns_hostnames` - (Optional) A boolean flag to enable/disable DNS hostnames in the VPC. Defaults false.
-* `enable_classiclink` - (Optional) A boolean flag to enable/disable ClassicLink
- for the VPC. Only valid in regions and accounts that support EC2 Classic.
- See the [ClassicLink documentation][1] for more information. Defaults false.
-* `assign_generated_ipv6_cidr_block` - (Optional) Requests an Amazon-provided IPv6 CIDR
-block with a /56 prefix length for the VPC. You cannot specify the range of IP addresses, or
-the size of the CIDR block. Default is `false`.
-* `tags` - (Optional) A mapping of tags to assign to the resource.
-
-## Attributes Reference
-
-The following attributes are exported:
-
-* `id` - The ID of the VPC
-* `cidr_block` - The CIDR block of the VPC
-* `instance_tenancy` - Tenancy of instances spin up within VPC.
-* `enable_dns_support` - Whether or not the VPC has DNS support
-* `enable_dns_hostnames` - Whether or not the VPC has DNS hostname support
-* `enable_classiclink` - Whether or not the VPC has Classiclink enabled
-* `main_route_table_id` - The ID of the main route table associated with
- this VPC. Note that you can change a VPC's main route table by using an
- [`aws_main_route_table_association`](/docs/providers/aws/r/main_route_table_assoc.html).
-* `default_network_acl_id` - The ID of the network ACL created by default on VPC creation
-* `default_security_group_id` - The ID of the security group created by default on VPC creation
-* `default_route_table_id` - The ID of the route table created by default on VPC creation
-* `ipv6_association_id` - The association ID for the IPv6 CIDR block.
-* `ipv6_cidr_block` - The IPv6 CIDR block.
-
-
-[1]: https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/vpc-classiclink.html
-
-## Import
-
-VPCs can be imported using the `vpc id`, e.g.
-
-```
-$ terraform import aws_vpc.test_vpc vpc-a01106c2
-```
diff --git a/website/source/docs/providers/aws/r/vpc_dhcp_options.html.markdown b/website/source/docs/providers/aws/r/vpc_dhcp_options.html.markdown
deleted file mode 100644
index 34bf01cc8..000000000
--- a/website/source/docs/providers/aws/r/vpc_dhcp_options.html.markdown
+++ /dev/null
@@ -1,72 +0,0 @@
----
-layout: "aws"
-page_title: "AWS: aws_vpc_dhcp_options"
-sidebar_current: "docs-aws-resource-vpc-dhcp-options"
-description: |-
- Provides a VPC DHCP Options resource.
----
-
-# aws\_vpc\_dhcp\_options
-
-Provides a VPC DHCP Options resource.
-
-## Example Usage
-
-Basic usage:
-
-```hcl
-resource "aws_vpc_dhcp_options" "dns_resolver" {
- domain_name_servers = ["8.8.8.8", "8.8.4.4"]
-}
-```
-
-Full usage:
-
-```hcl
-resource "aws_vpc_dhcp_options" "foo" {
- domain_name = "service.consul"
- domain_name_servers = ["127.0.0.1", "10.0.0.2"]
- ntp_servers = ["127.0.0.1"]
- netbios_name_servers = ["127.0.0.1"]
- netbios_node_type = 2
-
- tags {
- Name = "foo-name"
- }
-}
-```
-
-## Argument Reference
-
-The following arguments are supported:
-
-* `domain_name` - (Optional) the suffix domain name to use by default when resolving non Fully Qualified Domain Names. In other words, this is what ends up being the `search` value in the `/etc/resolv.conf` file.
-* `domain_name_servers` - (Optional) List of name servers to configure in `/etc/resolv.conf`.
-* `ntp_servers` - (Optional) List of NTP servers to configure.
-* `netbios_name_servers` - (Optional) List of NETBIOS name servers.
-* `netbios_node_type` - (Optional) The NetBIOS node type (1, 2, 4, or 8). AWS recommends to specify 2 since broadcast and multicast are not supported in their network. For more information about these node types, see [RFC 2132](http://www.ietf.org/rfc/rfc2132.txt).
-* `tags` - (Optional) A mapping of tags to assign to the resource.
-
-## Remarks
-* Notice that all arguments are optional but you have to specify at least one argument.
-* `domain_name_servers`, `netbios_name_servers`, `ntp_servers` are limited by AWS to maximum four servers only.
-* To actually use the DHCP Options Set you need to associate it to a VPC using [`aws_vpc_dhcp_options_association`](/docs/providers/aws/r/vpc_dhcp_options_association.html).
-* If you delete a DHCP Options Set, all VPCs using it will be associated to AWS's `default` DHCP Option Set.
-
-## Attributes Reference
-
-The following attributes are exported:
-
-* `id` - The ID of the DHCP Options Set.
-
-You can find more technical documentation about DHCP Options Set in the
-official [AWS User Guide](https://docs.aws.amazon.com/AmazonVPC/latest/UserGuide/VPC_DHCP_Options.html).
-
-
-## Import
-
-VPC DHCP Options can be imported using the `dhcp options id`, e.g.
-
-```
-$ terraform import aws_vpc_dhcp_options.my_options dopt-d9070ebb
-```
\ No newline at end of file
diff --git a/website/source/docs/providers/aws/r/vpc_dhcp_options_association.html.markdown b/website/source/docs/providers/aws/r/vpc_dhcp_options_association.html.markdown
deleted file mode 100644
index 2c55ecf10..000000000
--- a/website/source/docs/providers/aws/r/vpc_dhcp_options_association.html.markdown
+++ /dev/null
@@ -1,37 +0,0 @@
----
-layout: "aws"
-page_title: "AWS: aws_vpc_dhcp_options_association"
-sidebar_current: "docs-aws-resource-vpc-dhcp-options-association"
-description: |-
- Provides a VPC DHCP Options Association resource.
----
-
-# aws\_vpc\_dhcp\_options\_association
-
-Provides a VPC DHCP Options Association resource.
-
-## Example Usage
-
-```hcl
-resource "aws_vpc_dhcp_options_association" "dns_resolver" {
- vpc_id = "${aws_vpc.foo.id}"
- dhcp_options_id = "${aws_vpc_dhcp_options.foo.id}"
-}
-```
-
-## Argument Reference
-
-The following arguments are supported:
-
-* `vpc_id` - (Required) The ID of the VPC to which we would like to associate a DHCP Options Set.
-* `dhcp_options_id` - (Required) The ID of the DHCP Options Set to associate to the VPC.
-
-## Remarks
-* You can only associate one DHCP Options Set to a given VPC ID.
-* Removing the DHCP Options Association automatically sets AWS's `default` DHCP Options Set to the VPC.
-
-## Attributes Reference
-
-The following attributes are exported:
-
-* `id` - The ID of the DHCP Options Set Association.
diff --git a/website/source/docs/providers/aws/r/vpc_endpoint.html.markdown b/website/source/docs/providers/aws/r/vpc_endpoint.html.markdown
deleted file mode 100644
index 4195ecc45..000000000
--- a/website/source/docs/providers/aws/r/vpc_endpoint.html.markdown
+++ /dev/null
@@ -1,54 +0,0 @@
----
-layout: "aws"
-page_title: "AWS: aws_vpc_endpoint"
-sidebar_current: "docs-aws-resource-vpc-endpoint"
-description: |-
- Provides a VPC Endpoint resource.
----
-
-# aws\_vpc\_endpoint
-
-Provides a VPC Endpoint resource.
-
-~> **NOTE on VPC Endpoints and VPC Endpoint Route Table Associations:** Terraform provides
-both a standalone [VPC Endpoint Route Table Association](vpc_endpoint_route_table_association.html)
-(an association between a VPC endpoint and a single `route_table_id`) and a VPC Endpoint resource
-with a `route_table_ids` attribute. Do not use the same route table ID in both a VPC Endpoint resource
-and a VPC Endpoint Route Table Association resource. Doing so will cause a conflict of associations
-and will overwrite the association.
-
-## Example Usage
-
-Basic usage:
-
-```hcl
-resource "aws_vpc_endpoint" "private-s3" {
- vpc_id = "${aws_vpc.main.id}"
- service_name = "com.amazonaws.us-west-2.s3"
-}
-```
-
-## Argument Reference
-
-The following arguments are supported:
-
-* `vpc_id` - (Required) The ID of the VPC in which the endpoint will be used.
-* `service_name` - (Required) The AWS service name, in the form `com.amazonaws.region.service`.
-* `policy` - (Optional) A policy to attach to the endpoint that controls access to the service.
-* `route_table_ids` - (Optional) One or more route table IDs.
-
-## Attributes Reference
-
-The following attributes are exported:
-
-* `id` - The ID of the VPC endpoint.
-* `prefix_list_id` - The prefix list ID of the exposed service.
-* `cidr_blocks` - The list of CIDR blocks for the exposed service.
-
-## Import
-
-VPC Endpoints can be imported using the `vpc endpoint id`, e.g.
-
-```
-$ terraform import aws_vpc_endpoint.endpoint1 vpce-3ecf2a57
-```
diff --git a/website/source/docs/providers/aws/r/vpc_endpoint_route_table_association.html.markdown b/website/source/docs/providers/aws/r/vpc_endpoint_route_table_association.html.markdown
deleted file mode 100644
index 61975fe62..000000000
--- a/website/source/docs/providers/aws/r/vpc_endpoint_route_table_association.html.markdown
+++ /dev/null
@@ -1,41 +0,0 @@
----
-layout: "aws"
-page_title: "AWS: aws_vpc_endpoint_route_table_association"
-sidebar_current: "docs-aws-resource-vpc-endpoint-route-table-association"
-description: |-
- Provides a resource to create an association between a VPC endpoint and routing table.
----
-
-# aws\_vpc\_endpoint\_route\_table\_association
-
-Provides a resource to create an association between a VPC endpoint and routing table.
-
-~> **NOTE on VPC Endpoints and VPC Endpoint Route Table Associations:** Terraform provides
-both a standalone VPC Endpoint Route Table Association (an association between a VPC endpoint
-and a single `route_table_id`) and a [VPC Endpoint](vpc_endpoint.html) resource with a `route_table_ids`
-attribute. Do not use the same route table ID in both a VPC Endpoint resource and a VPC Endpoint Route
-Table Association resource. Doing so will cause a conflict of associations and will overwrite the association.
-
-## Example Usage
-
-Basic usage:
-
-```hcl
-resource "aws_vpc_endpoint_route_table_association" "private_s3" {
- vpc_endpoint_id = "${aws_vpc_endpoint.s3.id}"
- route_table_id = "${aws_route_table.private.id}"
-}
-```
-
-## Argument Reference
-
-The following arguments are supported:
-
-* `vpc_endpoint_id` - (Required) The ID of the VPC endpoint with which the routing table will be associated.
-* `route_table_id` - (Required) The ID of the routing table to be associated with the VPC endpoint.
-
-## Attributes Reference
-
-The following attributes are exported:
-
-* `id` - The ID of the association.
diff --git a/website/source/docs/providers/aws/r/vpc_peering.html.markdown b/website/source/docs/providers/aws/r/vpc_peering.html.markdown
deleted file mode 100644
index cc7de2630..000000000
--- a/website/source/docs/providers/aws/r/vpc_peering.html.markdown
+++ /dev/null
@@ -1,130 +0,0 @@
----
-layout: "aws"
-page_title: "AWS: aws_vpc_peering_connection"
-sidebar_current: "docs-aws-resource-vpc-peering"
-description: |-
- Manage a VPC Peering Connection resource.
----
-
-# aws\_vpc\_peering\_connection
-
-Provides a resource to manage a VPC Peering Connection resource.
-
--> **Note:** For cross-account (requester's AWS account differs from the accepter's AWS account) VPC Peering Connections
-use the `aws_vpc_peering_connection` resource to manage the requester's side of the connection and
-use the `aws_vpc_peering_connection_accepter` resource to manage the accepter's side of the connection.
-
-## Example Usage
-
-```hcl
-resource "aws_vpc_peering_connection" "foo" {
- peer_owner_id = "${var.peer_owner_id}"
- peer_vpc_id = "${aws_vpc.bar.id}"
- vpc_id = "${aws_vpc.foo.id}"
-}
-```
-
-Basic usage with connection options:
-
-```hcl
-resource "aws_vpc_peering_connection" "foo" {
- peer_owner_id = "${var.peer_owner_id}"
- peer_vpc_id = "${aws_vpc.bar.id}"
- vpc_id = "${aws_vpc.foo.id}"
-
- accepter {
- allow_remote_vpc_dns_resolution = true
- }
-
- requester {
- allow_remote_vpc_dns_resolution = true
- }
-}
-```
-
-Basic usage with tags:
-
-```hcl
-resource "aws_vpc_peering_connection" "foo" {
- peer_owner_id = "${var.peer_owner_id}"
- peer_vpc_id = "${aws_vpc.bar.id}"
- vpc_id = "${aws_vpc.foo.id}"
- auto_accept = true
-
- tags {
- Name = "VPC Peering between foo and bar"
- }
-}
-
-resource "aws_vpc" "foo" {
- cidr_block = "10.1.0.0/16"
-}
-
-resource "aws_vpc" "bar" {
- cidr_block = "10.2.0.0/16"
-}
-```
-
-## Argument Reference
-
--> **Note:** Modifying the VPC Peering Connection options requires peering to be active. An automatic activation
-can be done using the [`auto_accept`](vpc_peering.html#auto_accept) attribute. Alternatively, the VPC Peering
-Connection has to be made active manually using other means. See [notes](vpc_peering.html#notes) below for
-more information.
-
-The following arguments are supported:
-
-* `peer_owner_id` - (Required) The AWS account ID of the owner of the peer VPC.
- Defaults to the account ID the [AWS provider][1] is currently connected to.
-* `peer_vpc_id` - (Required) The ID of the VPC with which you are creating the VPC Peering Connection.
-* `vpc_id` - (Required) The ID of the requester VPC.
-* `auto_accept` - (Optional) Accept the peering (both VPCs need to be in the same AWS account).
-* `accepter` (Optional) - An optional configuration block that allows for [VPC Peering Connection]
-(http://docs.aws.amazon.com/AmazonVPC/latest/PeeringGuide) options to be set for the VPC that accepts
-the peering connection (a maximum of one).
-* `requester` (Optional) - A optional configuration block that allows for [VPC Peering Connection]
-(http://docs.aws.amazon.com/AmazonVPC/latest/PeeringGuide) options to be set for the VPC that requests
-the peering connection (a maximum of one).
-* `tags` - (Optional) A mapping of tags to assign to the resource.
-
-#### Accepter and Requester Arguments
-
--> **Note:** When enabled, the DNS resolution feature requires that VPCs participating in the peering
-must have support for the DNS hostnames enabled. This can be done using the [`enable_dns_hostnames`]
-(vpc.html#enable_dns_hostnames) attribute in the [`aws_vpc`](vpc.html) resource. See [Using DNS with Your VPC]
-(http://docs.aws.amazon.com/AmazonVPC/latest/UserGuide/vpc-dns.html) user guide for more information.
-
-* `allow_remote_vpc_dns_resolution` - (Optional) Allow a local VPC to resolve public DNS hostnames to private
-IP addresses when queried from instances in the peer VPC.
-* `allow_classic_link_to_remote_vpc` - (Optional) Allow a local linked EC2-Classic instance to communicate
-with instances in a peer VPC. This enables an outbound communication from the local ClassicLink connection
-to the remote VPC.
-* `allow_vpc_to_remote_classic_link` - (Optional) Allow a local VPC to communicate with a linked EC2-Classic
-instance in a peer VPC. This enables an outbound communication from the local VPC to the remote ClassicLink
-connection.
-
-## Attributes Reference
-
-The following attributes are exported:
-
-* `id` - The ID of the VPC Peering Connection.
-* `accept_status` - The status of the VPC Peering Connection request.
-
-
-## Notes
-
-AWS only supports VPC peering within the same AWS region.
-
-If both VPCs are not in the same AWS account do not enable the `auto_accept` attribute.
-The accepter can manage its side of the connection using the `aws_vpc_peering_connection_accepter` resource
-or accept the connection manually using the AWS Management Console, AWS CLI, through SDKs, etc.
-
-## Import
-
-VPC Peering resources can be imported using the `vpc peering id`, e.g.
-
-```
-$ terraform import aws_vpc_peering_connection.test_connection pcx-111aaa111
-```
-
-[1]: /docs/providers/aws/index.html
diff --git a/website/source/docs/providers/aws/r/vpc_peering_accepter.html.markdown b/website/source/docs/providers/aws/r/vpc_peering_accepter.html.markdown
deleted file mode 100644
index c330226b3..000000000
--- a/website/source/docs/providers/aws/r/vpc_peering_accepter.html.markdown
+++ /dev/null
@@ -1,106 +0,0 @@
----
-layout: "aws"
-page_title: "AWS: aws_vpc_peering_connection_accepter"
-sidebar_current: "docs-aws-resource-vpc-peering-accepter"
-description: |-
- Manage the accepter's side of a cross-account VPC Peering Connection.
----
-
-# aws\_vpc\_peering\_connection\_accepter
-
-Provides a resource to manage the accepter's side of a cross-account VPC Peering Connection.
-
-When a cross-account (requester's AWS account differs from the accepter's AWS account) VPC Peering Connection
-is created, a VPC Peering Connection resource is automatically created in the accepter's account.
-The requester can use the `aws_vpc_peering_connection` resource to manage its side of the connection
-and the accepter can use the `aws_vpc_peering_connection_accepter` resource to "adopt" its side of the
-connection into management.
-
-## Example Usage
-
-```hcl
-provider "aws" {
- // Requester's credentials.
-}
-
-provider "aws" {
- alias = "peer"
-
- // Accepter's credentials.
-}
-
-resource "aws_vpc" "main" {
- cidr_block = "10.0.0.0/16"
-}
-
-resource "aws_vpc" "peer" {
- provider = "aws.peer"
- cidr_block = "10.1.0.0/16"
-}
-
-data "aws_caller_identity" "peer" {
- provider = "aws.peer"
-}
-
-// Requester's side of the connection.
-resource "aws_vpc_peering_connection" "peer" {
- vpc_id = "${aws_vpc.main.id}"
- peer_vpc_id = "${aws_vpc.peer.id}"
- peer_owner_id = "${data.aws_caller_identity.peer.account_id}"
- auto_accept = false
-
- tags {
- Side = "Requester"
- }
-}
-
-// Accepter's side of the connection.
-resource "aws_vpc_peering_connection_accepter" "peer" {
- provider = "aws.peer"
- vpc_peering_connection_id = "${aws_vpc_peering_connection.peer.id}"
- auto_accept = true
-
- tags {
- Side = "Accepter"
- }
-}
-```
-
-## Argument Reference
-
-The following arguments are supported:
-
-* `vpc_peering_connection_id` - (Required) The VPC Peering Connection ID to manage.
-* `auto_accept` - (Optional) Whether or not to accept the peering request. Defaults to `false`.
-* `tags` - (Optional) A mapping of tags to assign to the resource.
-
-### Removing `aws_vpc_peering_connection_accepter` from your configuration
-
-AWS allows a cross-account VPC Peering Connection to be deleted from either the requester's or accepter's side.
-However, Terraform only allows the VPC Peering Connection to be deleted from the requester's side
-by removing the corresponding `aws_vpc_peering_connection` resource from your configuration.
-Removing a `aws_vpc_peering_connection_accepter` resource from your configuration will remove it
-from your statefile and management, **but will not destroy the VPC Peering Connection.**
-
-## Attributes Reference
-
-All of the argument attributes except `auto_accept` are also exported as result attributes.
-
-* `id` - The ID of the VPC Peering Connection.
-* `accept_status` - The status of the VPC Peering Connection request.
-* `vpc_id` - The ID of the accepter VPC.
-* `peer_vpc_id` - The ID of the requester VPC.
-* `peer_owner_id` - The AWS account ID of the owner of the requester VPC.
-* `accepter` - A configuration block that describes [VPC Peering Connection]
-(http://docs.aws.amazon.com/AmazonVPC/latest/PeeringGuide) options set for the accepter VPC.
-* `requester` - A configuration block that describes [VPC Peering Connection]
-(http://docs.aws.amazon.com/AmazonVPC/latest/PeeringGuide) options set for the requester VPC.
-
-#### Accepter and Requester Attributes Reference
-
-* `allow_remote_vpc_dns_resolution` - Indicates whether a local VPC can resolve public DNS hostnames to
-private IP addresses when queried from instances in a peer VPC.
-* `allow_classic_link_to_remote_vpc` - Indicates whether a local ClassicLink connection can communicate
-with the peer VPC over the VPC Peering Connection.
-* `allow_vpc_to_remote_classic_link` - Indicates whether a local VPC can communicate with a ClassicLink
-connection in the peer VPC over the VPC Peering Connection.
diff --git a/website/source/docs/providers/aws/r/vpn_connection.html.markdown b/website/source/docs/providers/aws/r/vpn_connection.html.markdown
deleted file mode 100644
index fbbac508e..000000000
--- a/website/source/docs/providers/aws/r/vpn_connection.html.markdown
+++ /dev/null
@@ -1,76 +0,0 @@
----
-layout: "aws"
-page_title: "AWS: aws_vpn_connection"
-sidebar_current: "docs-aws-resource-vpn-connection"
-description: |-
- Provides a VPN connection connected to a VPC. These objects can be connected to customer gateways, and allow you to establish tunnels between your network and the VPC.
----
-
-# aws\_vpn\_connection
-
-
-Provides a VPN connection connected to a VPC. These objects can be connected to customer gateways, and allow you to establish tunnels between your network and the VPC.
-
-## Example Usage
-
-```hcl
-resource "aws_vpc" "vpc" {
- cidr_block = "10.0.0.0/16"
-}
-
-resource "aws_vpn_gateway" "vpn_gateway" {
- vpc_id = "${aws_vpc.vpc.id}"
-}
-
-resource "aws_customer_gateway" "customer_gateway" {
- bgp_asn = 65000
- ip_address = "172.0.0.1"
- type = "ipsec.1"
-}
-
-resource "aws_vpn_connection" "main" {
- vpn_gateway_id = "${aws_vpn_gateway.vpn_gateway.id}"
- customer_gateway_id = "${aws_customer_gateway.customer_gateway.id}"
- type = "ipsec.1"
- static_routes_only = true
-}
-```
-
-## Argument Reference
-
-The following arguments are supported:
-
-* `customer_gateway_id` - (Required) The ID of the customer gateway.
-* `static_routes_only` - (Optional, Default `false`) Whether the VPN connection uses static routes exclusively. Static routes must be used for devices that don't support BGP.
-* `tags` - (Optional) Tags to apply to the connection.
-* `type` - (Required) The type of VPN connection. The only type AWS supports at this time is "ipsec.1".
-* `vpn_gateway_id` - (Required) The ID of the virtual private gateway.
-
-## Attribute Reference
-
-The following attributes are exported:
-
-* `id` - The amazon-assigned ID of the VPN connection.
-* `customer_gateway_configuration` - The configuration information for the VPN connection's customer gateway (in the native XML format).
-* `customer_gateway_id` - The ID of the customer gateway to which the connection is attached.
-* `static_routes_only` - Whether the VPN connection uses static routes exclusively.
-* `tags` - Tags applied to the connection.
-* `tunnel1_address` - The public IP address of the first VPN tunnel.
-* `tunnel1_cgw_inside_address` - The RFC 6890 link-local address of the first VPN tunnel (Customer Gateway Side).
-* `tunnel1_vgw_inside_address` - The RFC 6890 link-local address of the first VPN tunnel (VPN Gateway Side).
-* `tunnel1_preshared_key` - The preshared key of the first VPN tunnel.
-* `tunnel2_address` - The public IP address of the second VPN tunnel.
-* `tunnel2_cgw_inside_address` - The RFC 6890 link-local address of the second VPN tunnel (Customer Gateway Side).
-* `tunnel2_vgw_inside_address` - The RFC 6890 link-local address of the second VPN tunnel (VPN Gateway Side).
-* `tunnel2_preshared_key` - The preshared key of the second VPN tunnel.
-* `type` - The type of VPN connection.
-* `vpn_gateway_id` - The ID of the virtual private gateway to which the connection is attached.
-
-
-## Import
-
-VPN Connections can be imported using the `vpn connection id`, e.g.
-
-```
-$ terraform import aws_vpn_connection.testvpnconnection vpn-40f41529
-```
\ No newline at end of file
diff --git a/website/source/docs/providers/aws/r/vpn_connection_route.html.markdown b/website/source/docs/providers/aws/r/vpn_connection_route.html.markdown
deleted file mode 100644
index 9d64e9eb0..000000000
--- a/website/source/docs/providers/aws/r/vpn_connection_route.html.markdown
+++ /dev/null
@@ -1,55 +0,0 @@
----
-layout: "aws"
-page_title: "AWS: aws_vpn_connection_route"
-sidebar_current: "docs-aws-resource-vpn-connection-route"
-description: |-
- Provides a static route between a VPN connection and a customer gateway.
----
-
-# aws\_vpn\_connection\_route
-
-Provides a static route between a VPN connection and a customer gateway.
-
-## Example Usage
-
-```hcl
-resource "aws_vpc" "vpc" {
- cidr_block = "10.0.0.0/16"
-}
-
-resource "aws_vpn_gateway" "vpn_gateway" {
- vpc_id = "${aws_vpc.vpc.id}"
-}
-
-resource "aws_customer_gateway" "customer_gateway" {
- bgp_asn = 65000
- ip_address = "172.0.0.1"
- type = "ipsec.1"
-}
-
-resource "aws_vpn_connection" "main" {
- vpn_gateway_id = "${aws_vpn_gateway.vpn_gateway.id}"
- customer_gateway_id = "${aws_customer_gateway.customer_gateway.id}"
- type = "ipsec.1"
- static_routes_only = true
-}
-
-resource "aws_vpn_connection_route" "office" {
- destination_cidr_block = "192.168.10.0/24"
- vpn_connection_id = "${aws_vpn_connection.main.id}"
-}
-```
-
-## Argument Reference
-
-The following arguments are supported:
-
-* `destination_cidr_block` - (Required) The CIDR block associated with the local subnet of the customer network.
-* `vpn_connection_id` - (Required) The ID of the VPN connection.
-
-## Attribute Reference
-
-The following attributes are exported:
-
-* `destination_cidr_block` - The CIDR block associated with the local subnet of the customer network.
-* `vpn_connection_id` - The ID of the VPN connection.
diff --git a/website/source/docs/providers/aws/r/vpn_gateway.html.markdown b/website/source/docs/providers/aws/r/vpn_gateway.html.markdown
deleted file mode 100644
index d4b391b91..000000000
--- a/website/source/docs/providers/aws/r/vpn_gateway.html.markdown
+++ /dev/null
@@ -1,46 +0,0 @@
----
-layout: "aws"
-page_title: "AWS: aws_vpn_gateway"
-sidebar_current: "docs-aws-resource-vpn-gateway-x"
-description: |-
- Provides a resource to create a VPC VPN Gateway.
----
-
-# aws\_vpn\_gateway
-
-Provides a resource to create a VPC VPN Gateway.
-
-## Example Usage
-
-```hcl
-resource "aws_vpn_gateway" "vpn_gw" {
- vpc_id = "${aws_vpc.main.id}"
-
- tags {
- Name = "main"
- }
-}
-```
-
-## Argument Reference
-
-The following arguments are supported:
-
-* `vpc_id` - (Optional) The VPC ID to create in.
-* `availability_zone` - (Optional) The Availability Zone for the virtual private gateway.
-* `tags` - (Optional) A mapping of tags to assign to the resource.
-
-## Attributes Reference
-
-The following attributes are exported:
-
-* `id` - The ID of the VPN Gateway.
-
-
-## Import
-
-VPN Gateways can be imported using the `vpn gateway id`, e.g.
-
-```
-$ terraform import aws_vpn_gateway.testvpngateway vgw-9a4cacf3
-```
\ No newline at end of file
diff --git a/website/source/docs/providers/aws/r/vpn_gateway_attachment.html.markdown b/website/source/docs/providers/aws/r/vpn_gateway_attachment.html.markdown
deleted file mode 100644
index 6271598ff..000000000
--- a/website/source/docs/providers/aws/r/vpn_gateway_attachment.html.markdown
+++ /dev/null
@@ -1,57 +0,0 @@
----
-layout: "aws"
-page_title: "AWS: aws_vpn_gateway_attachment"
-sidebar_current: "docs-aws-resource-vpn-gateway-attachment"
-description: |-
- Provides a Virtual Private Gateway attachment resource.
----
-
-# aws\_vpn\_gateway\_attachment
-
-Provides a Virtual Private Gateway attachment resource, allowing for an existing
-hardware VPN gateway to be attached and/or detached from a VPC.
-
--> **Note:** The [`aws_vpn_gateway`](vpn_gateway.html)
-resource can also automatically attach the Virtual Private Gateway it creates
-to an existing VPC by setting the [`vpc_id`](vpn_gateway.html#vpc_id) attribute accordingly.
-
-## Example Usage
-
-```hcl
-resource "aws_vpc" "network" {
- cidr_block = "10.0.0.0/16"
-}
-
-resource "aws_vpn_gateway" "vpn" {
- tags {
- Name = "example-vpn-gateway"
- }
-}
-
-resource "aws_vpn_gateway_attachment" "vpn_attachment" {
- vpc_id = "${aws_vpc.network.id}"
- vpn_gateway_id = "${aws_vpn_gateway.vpn.id}"
-}
-```
-
-See [Virtual Private Cloud](http://docs.aws.amazon.com/AmazonVPC/latest/UserGuide/VPC_Introduction.html)
-and [Virtual Private Gateway](http://docs.aws.amazon.com/AmazonVPC/latest/UserGuide/VPC_VPN.html) user
-guides for more information.
-
-## Argument Reference
-
-The following arguments are supported:
-
-* `vpc_id` - (Required) The ID of the VPC.
-* `vpn_gateway_id` - (Required) The ID of the Virtual Private Gateway.
-
-## Attributes Reference
-
-The following attributes are exported:
-
-* `vpc_id` - The ID of the VPC that Virtual Private Gateway is attached to.
-* `vpn_gateway_id` - The ID of the Virtual Private Gateway.
-
-## Import
-
-This resource does not support importing.
diff --git a/website/source/docs/providers/aws/r/vpn_gateway_route_propagation.html.markdown b/website/source/docs/providers/aws/r/vpn_gateway_route_propagation.html.markdown
deleted file mode 100644
index d72940e30..000000000
--- a/website/source/docs/providers/aws/r/vpn_gateway_route_propagation.html.markdown
+++ /dev/null
@@ -1,35 +0,0 @@
----
-layout: "aws"
-page_title: "AWS: aws_vpn_gateway_route_propagation"
-sidebar_current: "docs-aws-resource-vpn-gateway-route-propagation"
-description: |-
- Requests automatic route propagation between a VPN gateway and a route table.
----
-
-# aws_vpn_gateway_route_propagation
-
-Requests automatic route propagation between a VPN gateway and a route table.
-
-~> **Note:** This resource should not be used with a route table that has
-the `propagating_vgws` argument set. If that argument is set, any route
-propagation not explicitly listed in its value will be removed.
-
-## Example Usage
-
-```hcl
-resource "aws_vpn_gateway_route_propagation" "example" {
- vpn_gateway_id = "${aws_vpn_gateway.example.id}"
- route_table_id = "${aws_route_table.example.id}"
-}
-```
-
-## Argument Reference
-
-The following arguments are required:
-
-* `vpn_gateway_id` - The id of the `aws_vpn_gateway` to propagate routes from.
-* `route_table_id` - The id of the `aws_route_table` to propagate routes into.
-
-## Attributes Reference
-
-This resource does not export any additional attributes.
diff --git a/website/source/docs/providers/aws/r/waf_byte_match_set.html.markdown b/website/source/docs/providers/aws/r/waf_byte_match_set.html.markdown
deleted file mode 100644
index 4c9781bce..000000000
--- a/website/source/docs/providers/aws/r/waf_byte_match_set.html.markdown
+++ /dev/null
@@ -1,79 +0,0 @@
----
-layout: "aws"
-page_title: "AWS: waf_byte_match_set"
-sidebar_current: "docs-aws-resource-waf-bytematchset"
-description: |-
- Provides a AWS WAF Byte Match Set resource.
----
-
-# aws\_waf\_byte\_match\_set
-
-Provides a WAF Byte Match Set Resource
-
-## Example Usage
-
-```hcl
-resource "aws_waf_byte_match_set" "byte_set" {
- name = "tf_waf_byte_match_set"
-
- byte_match_tuples {
- text_transformation = "NONE"
- target_string = "badrefer1"
- positional_constraint = "CONTAINS"
-
- field_to_match {
- type = "HEADER"
- data = "referer"
- }
- }
-}
-```
-
-## Argument Reference
-
-The following arguments are supported:
-
-* `name` - (Required) The name or description of the Byte Match Set.
-* `byte_match_tuples` - Specifies the bytes (typically a string that corresponds
- with ASCII characters) that you want to search for in web requests,
- the location in requests that you want to search, and other settings.
-
-## Nested blocks
-
-### `byte_match_tuples`
-
-#### Arguments
-
-* `field_to_match` - (Required) The part of a web request that you want to search, such as a specified header or a query string.
-* `positional_constraint` - (Required) Within the portion of a web request that you want to search
- (for example, in the query string, if any), specify where you want to search.
- e.g. `CONTAINS`, `CONTAINS_WORD` or `EXACTLY`.
- See [docs](http://docs.aws.amazon.com/waf/latest/APIReference/API_ByteMatchTuple.html#WAF-Type-ByteMatchTuple-PositionalConstraint)
- for all supported values.
-* `target_string` - (Optional) The value that you want to search for. e.g. `HEADER`, `METHOD` or `BODY`.
- See [docs](http://docs.aws.amazon.com/waf/latest/APIReference/API_ByteMatchTuple.html#WAF-Type-ByteMatchTuple-TargetString)
- for all supported values.
-* `text_transformation` - (Required) Text transformations used to eliminate unusual formatting that attackers use in web requests in an effort to bypass AWS WAF.
- If you specify a transformation, AWS WAF performs the transformation on `target_string` before inspecting a request for a match.
- e.g. `CMD_LINE`, `HTML_ENTITY_DECODE` or `NONE`.
- See [docs](http://docs.aws.amazon.com/waf/latest/APIReference/API_ByteMatchTuple.html#WAF-Type-ByteMatchTuple-TextTransformation)
- for all supported values.
-
-### `field_to_match`
-
-#### Arguments
-
-* `data` - (Optional) When `type` is `HEADER`, enter the name of the header that you want to search, e.g. `User-Agent` or `Referer`.
- If `type` is any other value, omit this field.
-* `type` - (Required) The part of the web request that you want AWS WAF to search for a specified string.
- e.g. `HEADER`, `METHOD` or `BODY`.
- See [docs](http://docs.aws.amazon.com/waf/latest/APIReference/API_FieldToMatch.html)
- for all supported values.
-
-## Remarks
-
-## Attributes Reference
-
-The following attributes are exported:
-
-* `id` - The ID of the WAF Byte Match Set.
diff --git a/website/source/docs/providers/aws/r/waf_ipset.html.markdown b/website/source/docs/providers/aws/r/waf_ipset.html.markdown
deleted file mode 100644
index 058101a4a..000000000
--- a/website/source/docs/providers/aws/r/waf_ipset.html.markdown
+++ /dev/null
@@ -1,50 +0,0 @@
----
-layout: "aws"
-page_title: "AWS: waf_ipset"
-sidebar_current: "docs-aws-resource-waf-ipset"
-description: |-
- Provides a AWS WAF IPSet resource.
----
-
-# aws\_waf\_ipset
-
-Provides a WAF IPSet Resource
-
-## Example Usage
-
-```hcl
-resource "aws_waf_ipset" "ipset" {
- name = "tfIPSet"
-
- ip_set_descriptors {
- type = "IPV4"
- value = "192.0.7.0/24"
- }
-}
-```
-
-## Argument Reference
-
-The following arguments are supported:
-
-* `name` - (Required) The name or description of the IPSet.
-* `ip_set_descriptors` - (Optional) Specifies the IP address type (IPV4 or IPV6)
- and the IP address range (in CIDR format) that web requests originate from.
-
-## Nested Blocks
-
-### `ip_set_descriptors`
-
-#### Arguments
-
-* `type` - (Required) Type of the IP address - `IPV4` or `IPV6`.
-* `value` - (Required) An IPv4 or IPv6 address specified via CIDR notation.
- e.g. `192.0.2.44/32` or `1111:0000:0000:0000:0000:0000:0000:0000/64`
-
-## Remarks
-
-## Attributes Reference
-
-The following attributes are exported:
-
-* `id` - The ID of the WAF IPSet.
diff --git a/website/source/docs/providers/aws/r/waf_rule.html.markdown b/website/source/docs/providers/aws/r/waf_rule.html.markdown
deleted file mode 100644
index feea0c707..000000000
--- a/website/source/docs/providers/aws/r/waf_rule.html.markdown
+++ /dev/null
@@ -1,65 +0,0 @@
----
-layout: "aws"
-page_title: "AWS: waf_rule"
-sidebar_current: "docs-aws-resource-waf-rule"
-description: |-
- Provides a AWS WAF rule resource.
----
-
-# aws\_waf\_rule
-
-Provides a WAF Rule Resource
-
-## Example Usage
-
-```hcl
-resource "aws_waf_ipset" "ipset" {
- name = "tfIPSet"
-
- ip_set_descriptors {
- type = "IPV4"
- value = "192.0.7.0/24"
- }
-}
-
-resource "aws_waf_rule" "wafrule" {
- depends_on = ["aws_waf_ipset.ipset"]
- name = "tfWAFRule"
- metric_name = "tfWAFRule"
-
- predicates {
- data_id = "${aws_waf_ipset.ipset.id}"
- negated = false
- type = "IPMatch"
- }
-}
-```
-
-## Argument Reference
-
-The following arguments are supported:
-
-* `metric_name` - (Required) The name or description for the Amazon CloudWatch metric of this rule.
-* `name` - (Required) The name or description of the rule.
-* `predicates` - (Optional) One of ByteMatchSet, IPSet, SizeConstraintSet, SqlInjectionMatchSet, or XssMatchSet objects to include in a rule.
-
-## Nested Blocks
-
-### `predicates`
-
-#### Arguments
-
-* `negated` - (Required) Set this to `false` if you want to allow, block, or count requests
- based on the settings in the specified `ByteMatchSet`, `IPSet`, `SqlInjectionMatchSet`, `XssMatchSet`, or `SizeConstraintSet`.
- For example, if an IPSet includes the IP address `192.0.2.44`, AWS WAF will allow or block requests based on that IP address.
- If set to `true`, AWS WAF will allow, block, or count requests based on all IP addresses _except_ `192.0.2.44`.
-* `data_id` - (Optional) A unique identifier for a predicate in the rule, such as Byte Match Set ID or IPSet ID.
-* `type` - (Required) The type of predicate in a rule, such as `ByteMatchSet` or `IPSet`
-
-## Remarks
-
-## Attributes Reference
-
-The following attributes are exported:
-
-* `id` - The ID of the WAF rule.
diff --git a/website/source/docs/providers/aws/r/waf_size_constraint_set.html.markdown b/website/source/docs/providers/aws/r/waf_size_constraint_set.html.markdown
deleted file mode 100644
index e0d010791..000000000
--- a/website/source/docs/providers/aws/r/waf_size_constraint_set.html.markdown
+++ /dev/null
@@ -1,74 +0,0 @@
----
-layout: "aws"
-page_title: "AWS: waf_size_constraint_set"
-sidebar_current: "docs-aws-resource-waf-size-constraint-set"
-description: |-
- Provides a AWS WAF Size Constraint Set resource.
----
-
-# aws\_waf\_size\_constraint\_set
-
-Provides a WAF Size Constraint Set Resource
-
-## Example Usage
-
-```hcl
-resource "aws_waf_size_constraint_set" "size_constraint_set" {
- name = "tfsize_constraints"
-
- size_constraints {
- text_transformation = "NONE"
- comparison_operator = "EQ"
- size = "4096"
-
- field_to_match {
- type = "BODY"
- }
- }
-}
-```
-
-## Argument Reference
-
-The following arguments are supported:
-
-* `name` - (Required) The name or description of the Size Constraint Set.
-* `size_constraints` - (Optional) Specifies the parts of web requests that you want to inspect the size of.
-
-## Nested Blocks
-
-### `size_constraints`
-
-#### Arguments
-
-* `field_to_match` - (Required) Specifies where in a web request to look for the size constraint.
-* `comparison_operator` - (Required) The type of comparison you want to perform.
- e.g. `EQ`, `NE`, `LT`, `GT`.
- See [docs](http://docs.aws.amazon.com/waf/latest/APIReference/API_SizeConstraint.html#WAF-Type-SizeConstraint-ComparisonOperator) for all supported values.
-* `size` - (Required) The size in bytes that you want to compare against the size of the specified `field_to_match`.
- Valid values are between 0 - 21474836480 bytes (0 - 20 GB).
-* `text_transformation` - (Required) Text transformations used to eliminate unusual formatting that attackers use in web requests in an effort to bypass AWS WAF.
- If you specify a transformation, AWS WAF performs the transformation on `field_to_match` before inspecting a request for a match.
- e.g. `CMD_LINE`, `HTML_ENTITY_DECODE` or `NONE`.
- See [docs](http://docs.aws.amazon.com/waf/latest/APIReference/API_SizeConstraint.html#WAF-Type-SizeConstraint-TextTransformation)
- for all supported values.
- **Note:** if you choose `BODY` as `type`, you must choose `NONE` because CloudFront forwards only the first 8192 bytes for inspection.
-
-### `field_to_match`
-
-#### Arguments
-
-* `data` - (Optional) When `type` is `HEADER`, enter the name of the header that you want to search, e.g. `User-Agent` or `Referer`.
- If `type` is any other value, omit this field.
-* `type` - (Required) The part of the web request that you want AWS WAF to search for a specified string.
- e.g. `HEADER`, `METHOD` or `BODY`.
- See [docs](http://docs.aws.amazon.com/waf/latest/APIReference/API_FieldToMatch.html)
- for all supported values.
-
-## Remarks
-
-## Attributes Reference
-
-The following attributes are exported:
-
-* `id` - The ID of the WAF Size Constraint Set.
diff --git a/website/source/docs/providers/aws/r/waf_sql_injection_match_set.html.markdown b/website/source/docs/providers/aws/r/waf_sql_injection_match_set.html.markdown
deleted file mode 100644
index d3c96cf71..000000000
--- a/website/source/docs/providers/aws/r/waf_sql_injection_match_set.html.markdown
+++ /dev/null
@@ -1,65 +0,0 @@
----
-layout: "aws"
-page_title: "AWS: waf_sql_injection_match_set"
-sidebar_current: "docs-aws-resource-waf-sql-injection-match-set"
-description: |-
- Provides a AWS WAF SQL Injection Match Set resource.
----
-
-# aws\_waf\_sql\_injection\_match\_set
-
-Provides a WAF SQL Injection Match Set Resource
-
-## Example Usage
-
-```hcl
-resource "aws_waf_sql_injection_match_set" "sql_injection_match_set" {
- name = "tf-sql_injection_match_set"
-
- sql_injection_match_tuples {
- text_transformation = "URL_DECODE"
-
- field_to_match {
- type = "QUERY_STRING"
- }
- }
-}
-```
-
-## Argument Reference
-
-The following arguments are supported:
-
-* `name` - (Required) The name or description of the SizeConstraintSet.
-* `sql_injection_match_tuples` - (Optional) The parts of web requests that you want AWS WAF to inspect for malicious SQL code and, if you want AWS WAF to inspect a header, the name of the header.
-
-## Nested Blocks
-
-### `sql_injection_match_tuples`
-
-* `field_to_match` - (Required) Specifies where in a web request to look for snippets of malicious SQL code.
-* `text_transformation` - (Required) Text transformations used to eliminate unusual formatting that attackers use in web requests in an effort to bypass AWS WAF.
- If you specify a transformation, AWS WAF performs the transformation on `field_to_match` before inspecting a request for a match.
- e.g. `CMD_LINE`, `HTML_ENTITY_DECODE` or `NONE`.
- See [docs](http://docs.aws.amazon.com/waf/latest/APIReference/API_SqlInjectionMatchTuple.html#WAF-Type-SqlInjectionMatchTuple-TextTransformation)
- for all supported values.
-
-### `field_to_match`
-
-#### Arguments
-
-* `data` - (Optional) When `type` is `HEADER`, enter the name of the header that you want to search, e.g. `User-Agent` or `Referer`.
- If `type` is any other value, omit this field.
-* `type` - (Required) The part of the web request that you want AWS WAF to search for a specified string.
- e.g. `HEADER`, `METHOD` or `BODY`.
- See [docs](http://docs.aws.amazon.com/waf/latest/APIReference/API_FieldToMatch.html)
- for all supported values.
-
-
-## Remarks
-
-## Attributes Reference
-
-The following attributes are exported:
-
-* `id` - The ID of the WAF SQL Injection Match Set.
diff --git a/website/source/docs/providers/aws/r/waf_web_acl.html.markdown b/website/source/docs/providers/aws/r/waf_web_acl.html.markdown
deleted file mode 100644
index c8c6a9987..000000000
--- a/website/source/docs/providers/aws/r/waf_web_acl.html.markdown
+++ /dev/null
@@ -1,91 +0,0 @@
----
-layout: "aws"
-page_title: "AWS: aws_waf_web_acl"
-sidebar_current: "docs-aws-resource-waf-webacl"
-description: |-
- Provides a AWS WAF web access control group (ACL) resource.
----
-
-# aws\_waf\_web\_acl
-
-Provides a WAF Web ACL Resource
-
-## Example Usage
-
-```hcl
-resource "aws_waf_ipset" "ipset" {
- name = "tfIPSet"
-
- ip_set_descriptors {
- type = "IPV4"
- value = "192.0.7.0/24"
- }
-}
-
-resource "aws_waf_rule" "wafrule" {
- depends_on = ["aws_waf_ipset.ipset"]
- name = "tfWAFRule"
- metric_name = "tfWAFRule"
-
- predicates {
- data_id = "${aws_waf_ipset.ipset.id}"
- negated = false
- type = "IPMatch"
- }
-}
-
-resource "aws_waf_web_acl" "waf_acl" {
- depends_on = ["aws_waf_ipset.ipset", "aws_waf_rule.wafrule"]
- name = "tfWebACL"
- metric_name = "tfWebACL"
-
- default_action {
- type = "ALLOW"
- }
-
- rules {
- action {
- type = "BLOCK"
- }
-
- priority = 1
- rule_id = "${aws_waf_rule.wafrule.id}"
- }
-}
-```
-
-## Argument Reference
-
-The following arguments are supported:
-
-* `default_action` - (Required) The action that you want AWS WAF to take when a request doesn't match the criteria in any of the rules that are associated with the web ACL.
-* `metric_name` - (Required) The name or description for the Amazon CloudWatch metric of this web ACL.
-* `name` - (Required) The name or description of the web ACL.
-* `rules` - (Required) The rules to associate with the web ACL and the settings for each rule.
-
-## Nested Blocks
-
-### `default_action`
-
-#### Arguments
-
-* `type` - (Required) Specifies how you want AWS WAF to respond to requests that match the settings in a rule.
- e.g. `ALLOW`, `BLOCK` or `COUNT`
-
-### `rules`
-
-See [docs](http://docs.aws.amazon.com/waf/latest/APIReference/API_ActivatedRule.html) for all details and supported values.
-
-#### Arguments
-
-* `action` - (Required) The action that CloudFront or AWS WAF takes when a web request matches the conditions in the rule.
- e.g. `ALLOW`, `BLOCK` or `COUNT`
-* `priority` - (Required) Specifies the order in which the rules in a WebACL are evaluated.
- Rules with a lower value are evaluated before rules with a higher value.
-* `rule_id` - (Required) ID of the associated [rule](/docs/providers/aws/r/waf_rule.html)
-
-## Attributes Reference
-
-The following attributes are exported:
-
-* `id` - The ID of the WAF WebACL.
diff --git a/website/source/docs/providers/aws/r/waf_xss_match_set.html.markdown b/website/source/docs/providers/aws/r/waf_xss_match_set.html.markdown
deleted file mode 100644
index 5f51a1f46..000000000
--- a/website/source/docs/providers/aws/r/waf_xss_match_set.html.markdown
+++ /dev/null
@@ -1,73 +0,0 @@
----
-layout: "aws"
-page_title: "AWS: waf_xss_match_set"
-sidebar_current: "docs-aws-resource-waf-xss-match-set"
-description: |-
- Provides a AWS WAF XssMatchSet resource.
----
-
-# aws\_waf\_xss\_match\_set
-
-Provides a WAF XSS Match Set Resource
-
-## Example Usage
-
-```hcl
-resource "aws_waf_xss_match_set" "xss_match_set" {
- name = "xss_match_set"
-
- xss_match_tuples {
- text_transformation = "NONE"
-
- field_to_match {
- type = "URI"
- }
- }
-
- xss_match_tuples {
- text_transformation = "NONE"
-
- field_to_match {
- type = "QUERY_STRING"
- }
- }
-}
-```
-
-## Argument Reference
-
-The following arguments are supported:
-
-* `name` - (Required) The name or description of the SizeConstraintSet.
-* `xss_match_tuples` - (Optional) The parts of web requests that you want to inspect for cross-site scripting attacks.
-
-## Nested Blocks
-
-### `xss_match_tuples`
-
-* `field_to_match` - (Required) Specifies where in a web request to look for cross-site scripting attacks.
-* `text_transformation` - (Required) Text transformations used to eliminate unusual formatting that attackers use in web requests in an effort to bypass AWS WAF.
- If you specify a transformation, AWS WAF performs the transformation on `target_string` before inspecting a request for a match.
- e.g. `CMD_LINE`, `HTML_ENTITY_DECODE` or `NONE`.
- See [docs](http://docs.aws.amazon.com/waf/latest/APIReference/API_XssMatchTuple.html#WAF-Type-XssMatchTuple-TextTransformation)
- for all supported values.
-
-### `field_to_match`
-
-#### Arguments
-
-* `data` - (Optional) When `type` is `HEADER`, enter the name of the header that you want to search, e.g. `User-Agent` or `Referer`.
- If `type` is any other value, omit this field.
-* `type` - (Required) The part of the web request that you want AWS WAF to search for a specified string.
- e.g. `HEADER`, `METHOD` or `BODY`.
- See [docs](http://docs.aws.amazon.com/waf/latest/APIReference/API_FieldToMatch.html)
- for all supported values.
-
-
-## Remarks
-
-## Attributes Reference
-
-The following attributes are exported:
-
-* `id` - The ID of the WAF XssMatchSet.
diff --git a/website/source/docs/providers/aws/r/wafregional_byte_match_set.html.markdown b/website/source/docs/providers/aws/r/wafregional_byte_match_set.html.markdown
deleted file mode 100644
index 1ebd97a48..000000000
--- a/website/source/docs/providers/aws/r/wafregional_byte_match_set.html.markdown
+++ /dev/null
@@ -1,55 +0,0 @@
----
-layout: "aws"
-page_title: "AWS: wafregional_byte_match_set"
-sidebar_current: "docs-aws-resource-wafregional-bytematchset"
-description: |-
- Provides a AWS WAF Regional ByteMatchSet resource for use with ALB.
----
-
-# aws\_wafregional\_byte\_match\_set
-
-Provides a WAF Regional Byte Match Set Resource for use with Application Load Balancer.
-
-## Example Usage
-
-```
-resource "aws_wafregional_byte_match_set" "byte_set" {
- name = "tf_waf_byte_match_set"
- byte_match_tuple {
- text_transformation = "NONE"
- target_string = "badrefer1"
- positional_constraint = "CONTAINS"
- field_to_match {
- type = "HEADER"
- data = "referer"
- }
- }
-}
-```
-
-## Argument Reference
-
-The following arguments are supported:
-
-* `name` - (Required) The name or description of the ByteMatchSet.
-* `byte_match_tuple` - (Optional)Settings for the ByteMatchSet, such as the bytes (typically a string that corresponds with ASCII characters) that you want AWS WAF to search for in web requests. ByteMatchTuple documented below.
-
-ByteMatchTuple(byte_match_tuple) support the following:
-
-* `field_to_match` - (Required) Settings for the ByteMatchTuple. FieldToMatch documented below.
-* `positional_constraint` - (Required) Within the portion of a web request that you want to search.
-* `target_string` - (Required) The value that you want AWS WAF to search for. The maximum length of the value is 50 bytes.
-* `text_transformation` - (Required) The formatting way for web request.
-
-FieldToMatch(field_to_match) support following:
-
-* `data` - (Optional) When the value of Type is HEADER, enter the name of the header that you want AWS WAF to search, for example, User-Agent or Referer. If the value of Type is any other value, omit Data.
-* `type` - (Required) The part of the web request that you want AWS WAF to search for a specified string.
-
-## Remarks
-
-## Attributes Reference
-
-The following attributes are exported:
-
-* `id` - The ID of the WAF ByteMatchSet.
diff --git a/website/source/docs/providers/aws/r/wafregional_ipset.html.markdown b/website/source/docs/providers/aws/r/wafregional_ipset.html.markdown
deleted file mode 100644
index 5ad381266..000000000
--- a/website/source/docs/providers/aws/r/wafregional_ipset.html.markdown
+++ /dev/null
@@ -1,44 +0,0 @@
----
-layout: "aws"
-page_title: "AWS: wafregional_ipset"
-sidebar_current: "docs-aws-resource-wafregional-ipset"
-description: |-
- Provides a AWS WAF Regional IPSet resource for use with ALB.
----
-
-# aws\_wafregional\_ipset
-
-Provides a WAF Regional IPSet Resource for use with Application Load Balancer.
-
-## Example Usage
-
-```
-resource "aws_wafregional_ipset" "ipset" {
- name = "tfIPSet"
- ip_set_descriptor {
- type = "IPV4"
- value = "192.0.7.0/24"
- }
-}
-```
-
-## Argument Reference
-
-The following arguments are supported:
-
-* `name` - (Required) The name or description of the IPSet.
-* `ip_set_descriptor` - (Optional) The IP address type and IP address range (in CIDR notation) from which web requests originate.
-
-IPSetDescriptor(ip_set_descriptor) support following:
-
-* `type` - (Required) The string like IPV4 or IPV6.
-* `value` - (Required) The CIDR notation.
-
-
-## Remarks
-
-## Attributes Reference
-
-The following attributes are exported:
-
-* `id` - The ID of the WAF IPSet.
diff --git a/website/source/docs/providers/azure/index.html.markdown b/website/source/docs/providers/azure/index.html.markdown
deleted file mode 100644
index a4ae0c66d..000000000
--- a/website/source/docs/providers/azure/index.html.markdown
+++ /dev/null
@@ -1,71 +0,0 @@
----
-layout: "azure"
-page_title: "Provider: Azure Service Management"
-sidebar_current: "docs-azure-index"
-description: |-
- The Azure provider is used to interact with the many resources supported by Azure. The provider needs to be configured with a publish settings file and optionally a subscription ID before it can be used.
----
-
-# Azure Service Management Provider
-
-[arm]: /docs/providers/azurerm/index.html
-
-The Azure Service Management provider is used to interact with the many resources supported
-by Azure. The provider needs to be configured with a [publish settings
-file](https://manage.windowsazure.com/publishsettings) and optionally a
-subscription ID before it can be used.
-
-Use the navigation to the left to read about the available resources.
-
-## Example Usage
-
-```hcl
-# Configure the Azure Provider
-provider "azure" {
- publish_settings = "${file("credentials.publishsettings")}"
-}
-
-# Create a web server
-resource "azure_instance" "web" {
- # ...
-}
-```
-
-## Argument Reference
-
-The following arguments are supported:
-
-* `publish_settings` - (Optional) Contents of a valid `publishsettings` file,
- used to authenticate with the Azure API. You can download the settings file
- here: https://manage.windowsazure.com/publishsettings. You must either
- provide publish settings or both a `subscription_id` and `certificate`. It
- can also be sourced from the `AZURE_PUBLISH_SETTINGS` environment variable.
-
-* `subscription_id` - (Optional) The subscription ID to use. If a
- `settings_file` is not provided `subscription_id` is required. It can also
- be sourced from the `AZURE_SUBSCRIPTION_ID` environment variable.
-
-* `certificate` - (Optional) The certificate used to authenticate with the
- Azure API. If a `settings_file` is not provided `certificate` is required.
- It can also be sourced from the `AZURE_CERTIFICATE` environment variable.
-
-These arguments are supported for backwards compatibility, and may be removed
-in a future version:
-
-* `settings_file` - __Deprecated: please use `publish_settings` instead.__
- Path to or contents of a valid `publishsettings` file, used to
- authenticate with the Azure API. You can download the settings file here:
- https://manage.windowsazure.com/publishsettings. You must either provide
- (or source from the `AZURE_SETTINGS_FILE` environment variable) a settings
- file or both a `subscription_id` and `certificate`.
-
-## Testing:
-
-The following environment variables must be set for the running of the
-acceptance test suite:
-
-* A valid combination of the above which are required for authentification.
-
-* `AZURE_STORAGE` - The name of a storage account to be used in tests which
- require a storage backend. The storage account needs to be located in
- the Western US Azure region.
diff --git a/website/source/docs/providers/azure/r/affinity_group.html.markdown b/website/source/docs/providers/azure/r/affinity_group.html.markdown
deleted file mode 100644
index f09a60a53..000000000
--- a/website/source/docs/providers/azure/r/affinity_group.html.markdown
+++ /dev/null
@@ -1,42 +0,0 @@
----
-layout: "azure"
-page_title: "Azure: azure_affinity_group"
-sidebar_current: "docs-azure-affinity-group"
-description: |-
- Creates a new affinity group on Azure.
----
-
-# azure\_affinity\_group
-
-Creates a new affinity group on Azure.
-
-## Example Usage
-
-```hcl
-resource "azure_affinity_group" "terraform-main-group" {
- name = "terraform-group"
- location = "North Europe"
- label = "tf-group-01"
- description = "Affinity group created by Terraform."
-}
-```
-
-## Argument Reference
-
-The following arguments are supported:
-
-* `name` - (Required) The name of the affinity group. Must be unique on your
- Azure subscription.
-
-* `location` - (Required) The location where the affinity group should be created.
- For a list of all Azure locations, please consult [this link](https://azure.microsoft.com/en-us/regions/).
-
-* `label` - (Required) A label to be used for tracking purposes.
-
-* `description` - (Optional) A description for the affinity group.
-
-## Attributes Reference
-
-The following attributes are exported:
-
-* `id` - The affinity group ID. Coincides with the given `name`.
diff --git a/website/source/docs/providers/azure/r/data_disk.html.markdown b/website/source/docs/providers/azure/r/data_disk.html.markdown
deleted file mode 100644
index 484fd6ece..000000000
--- a/website/source/docs/providers/azure/r/data_disk.html.markdown
+++ /dev/null
@@ -1,70 +0,0 @@
----
-layout: "azure"
-page_title: "Azure: azure_data_disk"
-sidebar_current: "docs-azure-resource-data-disk"
-description: |-
- Adds a data disk to a virtual machine. If the name of an existing disk is given, it will attach that disk. Otherwise it will create and attach a new empty disk.
----
-
-# azure\_data\_disk
-
-Adds a data disk to a virtual machine. If the name of an existing disk is given,
-it will attach that disk. Otherwise it will create and attach a new empty disk.
-
-## Example Usage
-
-```hcl
-resource "azure_data_disk" "data" {
- lun = 0
- size = 10
- storage_service_name = "yourstorage"
- virtual_machine = "server1"
-}
-```
-
-## Argument Reference
-
-The following arguments are supported:
-
-* `name` - (Optional) The name of an existing registered disk to attach to the
- virtual machine. If left empty, a new empty disk will be created and
- attached instead. Changing this forces a new resource to be created.
-
-* `label` - (Optional) The identifier of the data disk. Changing this forces a
- new resource to be created (defaults to "virtual_machine-lun")
-
-* `lun` - (Required) The Logical Unit Number (LUN) for the disk. The LUN
- specifies the slot in which the data drive appears when mounted for usage
- by the virtual machine. Valid LUN values are 0 through 31.
-
-* `size` - (Optional) The size, in GB, of an empty disk to be attached to the
- virtual machine. Required when creating a new disk, not used otherwise.
-
-* `caching` - (Optional) The caching behavior of data disk. Valid options are:
- `None`, `ReadOnly` and `ReadWrite` (defaults `None`)
-
-* `storage_service_name` - (Optional) The name of an existing storage account
- within the subscription which will be used to store the VHD of this disk.
- Required if no value is supplied for `media_link`. Changing this forces
- a new resource to be created.
-
-* `media_link` - (Optional) The location of the blob in storage where the VHD
- of this disk will be created. The storage account where must be associated
- with the subscription. Changing this forces a new resource to be created.
-
-* `source_media_link` - (Optional) The location of a blob in storage where a
- VHD file is located that is imported and registered as a disk. If a value
- is supplied, `media_link` will not be used.
-
-* `virtual_machine` - (Required) The name of the virtual machine the disk will
- be attached to.
-
-## Attributes Reference
-
-The following attributes are exported:
-
-* `id` - The security group ID.
-* `name` - The name of the disk.
-* `label` - The identifier for the disk.
-* `media_link` - The location of the blob in storage where the VHD of this disk
- is created.
diff --git a/website/source/docs/providers/azure/r/dns_server.html.markdown b/website/source/docs/providers/azure/r/dns_server.html.markdown
deleted file mode 100644
index 311c2f59e..000000000
--- a/website/source/docs/providers/azure/r/dns_server.html.markdown
+++ /dev/null
@@ -1,35 +0,0 @@
----
-layout: "azure"
-page_title: "Azure: azure_dns_server"
-sidebar_current: "docs-azure-resource-dns-server"
-description: |-
- Creates a new DNS server definition to be used internally in Azure.
----
-
-# azure\_dns\_server
-
-Creates a new DNS server definition to be used internally in Azure.
-
-## Example Usage
-
-```hcl
-resource "azure_dns_server" "google-dns" {
- name = "google"
- dns_address = "8.8.8.8"
-}
-```
-
-## Argument Reference
-
-The following arguments are supported:
-
-* `name` - (Required) The name of the DNS server reference. Changing this
- forces a new resource to be created.
-
-* `dns_address` - (Required) The IP address of the DNS server.
-
-## Attributes Reference
-
-The following attributes are exported:
-
-* `id` - The DNS server definition ID. Coincides with the given `name`.
diff --git a/website/source/docs/providers/azure/r/hosted_service.html.markdown b/website/source/docs/providers/azure/r/hosted_service.html.markdown
deleted file mode 100644
index f16783dcc..000000000
--- a/website/source/docs/providers/azure/r/hosted_service.html.markdown
+++ /dev/null
@@ -1,50 +0,0 @@
----
-layout: "azure"
-page_title: "Azure: azure_hosted_service"
-sidebar_current: "docs-azure-hosted-service"
-description: |-
- Creates a new hosted service on Azure with its own .cloudapp.net domain.
----
-
-# azure\_hosted\_service
-
-Creates a new hosted service on Azure with its own .cloudapp.net domain.
-
-## Example Usage
-
-```hcl
-resource "azure_hosted_service" "terraform-service" {
- name = "terraform-service"
- location = "North Europe"
- ephemeral_contents = false
- description = "Hosted service created by Terraform."
- label = "tf-hs-01"
-}
-```
-
-## Argument Reference
-
-The following arguments are supported:
-
-* `name` - (Required) The name of the hosted service. Must be unique on Azure.
-
-* `location` - (Required) The location where the hosted service should be created.
- For a list of all Azure locations, please consult [this link](https://azure.microsoft.com/en-us/regions/).
-
-* `ephemeral_contents` - (Required) A boolean value (true|false), specifying
- whether all the resources present in the hosted hosted service should be
- destroyed following the hosted service's destruction.
-
-* `reverse_dns_fqdn` - (Optional) The reverse of the fully qualified domain name
- for the hosted service.
-
-* `label` - (Optional) A label to be used for tracking purposes. Must be
- non-void. Defaults to `Made by Terraform.`.
-
-* `description` - (Optional) A description for the hosted service.
-
-## Attributes Reference
-
-The following attributes are exported:
-
-* `id` - The hosted service ID. Coincides with the given `name`.
diff --git a/website/source/docs/providers/azure/r/instance.html.markdown b/website/source/docs/providers/azure/r/instance.html.markdown
deleted file mode 100644
index 75e8c20e9..000000000
--- a/website/source/docs/providers/azure/r/instance.html.markdown
+++ /dev/null
@@ -1,152 +0,0 @@
----
-layout: "azure"
-page_title: "Azure: azure_instance"
-sidebar_current: "docs-azure-resource-instance"
-description: |-
- Creates a hosted service, role and deployment and then creates a virtual machine in the deployment based on the specified configuration.
----
-
-# azure\_instance
-
-Creates a hosted service, role and deployment and then creates a virtual
-machine in the deployment based on the specified configuration.
-
-## Example Usage
-
-```hcl
-resource "azure_hosted_service" "terraform-service" {
- name = "terraform-service"
- location = "North Europe"
- ephemeral_contents = false
- description = "Hosted service created by Terraform."
- label = "tf-hs-01"
-}
-
-resource "azure_instance" "web" {
- name = "terraform-test"
- hosted_service_name = "${azure_hosted_service.terraform-service.name}"
- image = "Ubuntu Server 14.04 LTS"
- size = "Basic_A1"
- storage_service_name = "yourstorage"
- location = "West US"
- username = "terraform"
- password = "Pass!admin123"
- domain_name = "contoso.com"
- domain_ou = "OU=Servers,DC=contoso.com,DC=Contoso,DC=com"
- domain_username = "Administrator"
- domain_password = "Pa$$word123"
-
- endpoint {
- name = "SSH"
- protocol = "tcp"
- public_port = 22
- private_port = 22
- }
-}
-```
-
-## Argument Reference
-
-The following arguments are supported:
-
-* `name` - (Required) The name of the instance. Changing this forces a new
- resource to be created.
-
-* `hosted_service_name` - (Optional) The name of the hosted service the
- instance should be deployed under. If not provided; it will default to the
- value of `name`. Changes to this parameter forces the creation of a new
- resource.
-
-* `description` - (Optional) The description for the associated hosted service.
- Changing this forces a new resource to be created (defaults to the instance
- name).
-
-* `image` - (Required) The name of an existing VM or OS image to use for this
- instance. Changing this forces a new resource to be created.
-
-* `size` - (Required) The size of the instance.
-
-* `subnet` - (Optional) The name of the subnet to connect this instance to. If
- a value is supplied `virtual_network` is required. Changing this forces a
- new resource to be created.
-
-* `virtual_network` - (Optional) The name of the virtual network the `subnet`
- belongs to. If a value is supplied `subnet` is required. Changing this
- forces a new resource to be created.
-
-* `storage_service_name` - (Optional) The name of an existing storage account
- within the subscription which will be used to store the VHDs of this
- instance. Changing this forces a new resource to be created. **A Storage
- Service is required if you are using a Platform Image**
-
-* `reverse_dns` - (Optional) The DNS address to which the IP address of the
- hosted service resolves when queried using a reverse DNS query. Changing
- this forces a new resource to be created.
-
-* `location` - (Required) The location/region where the cloud service is
- created. Changing this forces a new resource to be created.
-
-* `automatic_updates` - (Optional) If true this will enable automatic updates.
- This attribute is only used when creating a Windows instance. Changing this
- forces a new resource to be created (defaults false)
-
-* `time_zone` - (Optional) The appropriate time zone for this instance in the
- format 'America/Los_Angeles'. This attribute is only used when creating a
- Windows instance. Changing this forces a new resource to be created
- (defaults false)
-
-* `username` - (Required) The username of a new user that will be created while
- creating the instance. Changing this forces a new resource to be created.
-
-* `password` - (Optional) The password of the new user that will be created
- while creating the instance. Required when creating a Windows instance or
- when not supplying an `ssh_key_thumbprint` while creating a Linux instance.
- Changing this forces a new resource to be created.
-
-* `ssh_key_thumbprint` - (Optional) The SSH thumbprint of an existing SSH key
- within the subscription. This attribute is only used when creating a Linux
- instance. Changing this forces a new resource to be created.
-
-* `security_group` - (Optional) The Network Security Group to associate with
- this instance.
-
-* `endpoint` - (Optional) Can be specified multiple times to define multiple
- endpoints. Each `endpoint` block supports fields documented below.
-
-* `domain_name` - (Optional) The name of an Active Directory domain to join.
-
-* `domain_ou` - (Optional) Specifies the LDAP Organizational Unit to place the
- instance in.
-
-* `domain_username` - (Optional) The username of an account with permission to
- join the instance to the domain. Required if a domain_name is specified.
-
-* `domain_password` - (Optional) The password for the domain_username account
- specified above.
-
-* `custom_data` - (Optional) The custom data to provide when launching the
- instance.
-
-The `endpoint` block supports:
-
-* `name` - (Required) The name of the external endpoint.
-
-* `protocol` - (Optional) The transport protocol for the endpoint. Valid
- options are: `tcp` and `udp` (defaults `tcp`)
-
-* `public_port` - (Required) The external port to use for the endpoint.
-
-* `private_port` - (Required) The private port on which the instance is
- listening.
-
-## Attributes Reference
-
-The following attributes are exported:
-
-* `id` - The instance ID.
-* `description` - The description for the associated hosted service.
-* `subnet` - The subnet the instance is connected to.
-* `endpoint` - The complete set of configured endpoints.
-* `security_group` - The associated Network Security Group.
-* `ip_address` - The private IP address assigned to the instance.
-* `vip_address` - The public IP address assigned to the instance.
diff --git a/website/source/docs/providers/azure/r/local_network_connection.html.markdown b/website/source/docs/providers/azure/r/local_network_connection.html.markdown
deleted file mode 100644
index d9b3d3ef6..000000000
--- a/website/source/docs/providers/azure/r/local_network_connection.html.markdown
+++ /dev/null
@@ -1,39 +0,0 @@
----
-layout: "azure"
-page_title: "Azure: azure_local_network_connection"
-sidebar_current: "docs-azure-resource-local-network-connection"
-description: |-
- Defines a new connection to a remote network through a VPN tunnel.
----
-
-# azure\_local\_network\_connection
-
-Defines a new connection to a remote network through a VPN tunnel.
-
-## Example Usage
-
-```hcl
-resource "azure_local_network_connection" "localnet" {
- name = "terraform-local-network-connection"
- vpn_gateway_address = "45.12.189.2"
- address_space_prefixes = ["10.10.10.0/24", "10.10.11.0/24"]
-}
-```
-
-## Argument Reference
-
-The following arguments are supported:
-
-* `name` - (Required) The name by which this local network connection will
- be referenced by. Changing this forces a new resource to be created.
-
-* `vpn_gateway_address` - (Required) The public IPv4 of the VPN endpoint.
-
-* `address_space_prefixes` - (Required) List of address spaces accessible
- through the VPN connection. The elements are in the CIDR format.
-
-## Attributes Reference
-
-The following attributes are exported:
-
-* `id` - The local network connection ID.
diff --git a/website/source/docs/providers/azure/r/security_group.html.markdown b/website/source/docs/providers/azure/r/security_group.html.markdown
deleted file mode 100644
index 31a4ea278..000000000
--- a/website/source/docs/providers/azure/r/security_group.html.markdown
+++ /dev/null
@@ -1,42 +0,0 @@
----
-layout: "azure"
-page_title: "Azure: azure_security_group"
-sidebar_current: "docs-azure-resource-security-group"
-description: |-
- Creates a new network security group within the context of the specified subscription.
----
-
-# azure\_security\_group
-
-Creates a new network security group within the context of the specified
-subscription.
-
-## Example Usage
-
-```hcl
-resource "azure_security_group" "web" {
- name = "webservers"
- location = "West US"
-}
-```
-
-## Argument Reference
-
-The following arguments are supported:
-
-* `name` - (Required) The name of the security group. Changing this forces a
- new resource to be created.
-
-* `label` - (Optional) The identifier for the security group. The label can be
- up to 1024 characters long. Changing this forces a new resource to be
- created (defaults to the security group name)
-
-* `location` - (Required) The location/region where the security group is
- created. Changing this forces a new resource to be created.
-
-## Attributes Reference
-
-The following attributes are exported:
-
-* `id` - The security group ID.
-* `label` - The identifier for the security group.
diff --git a/website/source/docs/providers/azure/r/security_group_rule.html.markdown b/website/source/docs/providers/azure/r/security_group_rule.html.markdown
deleted file mode 100644
index 3b01cb092..000000000
--- a/website/source/docs/providers/azure/r/security_group_rule.html.markdown
+++ /dev/null
@@ -1,88 +0,0 @@
----
-layout: "azure"
-page_title: "Azure: azure_security_group_rule"
-sidebar_current: "docs-azure-resource-security-group-rule"
-description: |-
- Creates a new network security rule to be associated with a given security group.
----
-
-# azure\_security\_group\_rule
-
-Creates a new network Security Group Rule to be associated with a number of
-given Security Groups.
-
-~> **NOTE on Security Group Rules**: for usability purposes; Terraform allows the
-addition of a single Security Group Rule to multiple Security Groups, despite
-it having to define each rule individually per Security Group on Azure. As a
-result; in the event that one of the Rules on one of the Groups is modified by
-external factors, Terraform cannot reason as to whether or not that change
-should be propagated to the others; let alone choose one changed Rule
-configuration over another in case of a conflic. As such; `terraform refresh`
-only checks that the rule is still defined for each of the specified
-`security_group_names`; ignoring the actual parameters of the Rule and **not**
-updating the state with regards to them.
-
-## Example Usage
-
-```hcl
-resource "azure_security_group" "web" {
- # ...
-}
-
-resource "azure_security_group" "apps" {
- # ...
-}
-
-resource "azure_security_group_rule" "ssh_access" {
- name = "ssh-access-rule"
- security_group_names = ["${azure_security_group.web.name}", "${azure_security_group.apps.name}"]
- type = "Inbound"
- action = "Allow"
- priority = 200
- source_address_prefix = "100.0.0.0/32"
- source_port_range = "*"
- destination_address_prefix = "10.0.0.0/32"
- destination_port_range = "22"
- protocol = "TCP"
-}
-```
-
-## Argument Reference
-
-The following arguments are supported:
-* `name` - (Required) The name of the security group rule.
-
-* `security_group_names` - (Required) A list of the names of the security groups
- the rule should be applied to.
- Changing this list forces the creation of a new resource.
-
-* `type` - (Required) The type of the security rule. Valid options are:
- `Inbound` and `Outbound`.
-
-* `priority` - (Required) The priority of the network security rule. Rules with
- lower priority are evaluated first. This value can be between 100 and 4096.
-
-* `action` - (Optional) The action that is performed when the security rule is
- matched. Valid options are: `Allow` and `Deny`.
-
-* `source_address_prefix` - (Required) The address prefix of packet sources that
- that should be subjected to the rule. An asterisk (\*) can also be used to
- match all source IPs.
-
-* `source_port_range` - (Required) The source port or range. This value can be
- between 0 and 65535. An asterisk (\*) can also be used to match all ports.
-
-* `destination_address_prefix` - (Required) The address prefix of packet
- destinations that should be subjected to the rule. An asterisk
- (\*) can also be used to match all destination IPs.
-
-* `destination_port_range` - (Required) The destination port or range. This value
- can be between 0 and 65535. An asterisk (\*) can also be used to match all
- ports.
-
-* `protocol` - (Optional) The protocol of the security rule. Valid options are:
- `TCP`, `UDP` and `*`.
-
-The following attributes are exported:
-
-* `id` - The security group rule ID. Coincides with its given `name`.
diff --git a/website/source/docs/providers/azure/r/sql_database_server.html.markdown b/website/source/docs/providers/azure/r/sql_database_server.html.markdown
deleted file mode 100644
index baeb32ba4..000000000
--- a/website/source/docs/providers/azure/r/sql_database_server.html.markdown
+++ /dev/null
@@ -1,50 +0,0 @@
----
-layout: "azure"
-page_title: "Azure: azure_sql_database_server"
-sidebar_current: "docs-azure-sql-database-server"
-description: |-
- Allocates a new SQL Database Server on Azure.
----
-
-# azure\_sql\_database\_server
-
-Allocates a new SQL Database Server on Azure.
-
-## Example Usage
-
-```hcl
-resource "azure_sql_database_server" "sql-serv" {
- name = ""
- location = "West US"
- username = "SuperUser"
- password = "SuperSEKR3T"
- version = "2.0"
- url = ""
-}
-```
-
-## Argument Reference
-
-The following arguments are supported:
-
-* `name` - (Computed) The name of the database server. It is determined upon
- creation as it is randomly-generated per server.
-
-* `location` - (Required) The location where the database server should be created.
- For a list of all Azure locations, please consult [this link](https://azure.microsoft.com/en-us/regions/).
-
-* `username` - (Required) The username for the administrator of the database server.
-
-* `password` - (Required) The password for the administrator of the database server.
-
-* `version` - (Optional) The version of the database server to be used. Can be any
- one of `2.0` or `12.0`.
-
-* `url` - (Computed) The fully qualified domain name of the database server.
- Will be of the form `.database.windows.net`.
-
-## Attributes Reference
-
-The following attributes are exported:
-
-* `id` - The database server ID. Coincides with the randomly-generated `name`.
diff --git a/website/source/docs/providers/azure/r/sql_database_server_firewall_rule.html.markdown b/website/source/docs/providers/azure/r/sql_database_server_firewall_rule.html.markdown
deleted file mode 100644
index b5a02b3b2..000000000
--- a/website/source/docs/providers/azure/r/sql_database_server_firewall_rule.html.markdown
+++ /dev/null
@@ -1,58 +0,0 @@
----
-layout: "azure"
-page_title: "Azure: azure_sql_database_server_firewall_rule"
-sidebar_current: "docs-azure-sql-database-server-firewall-rule"
-description: |-
- Defines a new Firewall Rule to be applied across the given Database Servers.
----
-
-# azure\_sql\_database\_server
-
-Defines a new Firewall Rule to be applied across the given Database Servers.
-
-## Example Usage
-
-```hcl
-resource "azure_sql_database_server" "sql-serv1" {
- # ...
-}
-
-resource "azure_sql_database_server" "sql-serv2" {
- # ...
-}
-
-resource "azure_sql_database_server_firewall_rule" "constraint" {
- name = "terraform-testing-rule"
- start_ip = "154.0.0.0"
- end_ip = "154.0.0.255"
-
- database_server_names = [
- "${azure_sql_database_server.sql-serv1.name}",
- "${azure_sql_database_server.sql-serv2.name}",
- ]
-}
-```
-
-## Argument Reference
-
-The following arguments are supported:
-
-* `name` - (Required) The name of the rule. Changing forces the creation of a
- new resource.
-
-* `start_ip` - (Required) The IPv4 which will represent the lower bound of the
- rule's application IP's. Traffic to/from IP's greater than or equal to this
- one up to the `end_ip` will be permitted.
-
-* `end_ip` - (Required) The IPv4 which will represent the upper bound of the
- rule's application IP's. Traffic to/from IP's lesser that or equal to this
- one all the way down to the `start_ip` will be permitted.
-
-* `database_server_names` - (Required) The set of names of the Azure SQL
- Database servers the rule should be enforced on.
-
-## Attributes Reference
-
-The following attributes are exported:
-
-* `id` - The database server ID. Coincides with the given `name`.
diff --git a/website/source/docs/providers/azure/r/sql_database_service.html.markdown b/website/source/docs/providers/azure/r/sql_database_service.html.markdown
deleted file mode 100644
index f8cdf9687..000000000
--- a/website/source/docs/providers/azure/r/sql_database_service.html.markdown
+++ /dev/null
@@ -1,52 +0,0 @@
----
-layout: "azure"
-page_title: "Azure: azure_sql_database_service"
-sidebar_current: "docs-azure-sql-database-service"
-description: |-
- Creates a new SQL Database Service on an Azure Database Server.
----
-
-# azure\_sql\_database\_service
-
-Creates a new SQL database service on an Azure database server.
-
-## Example Usage
-
-```hcl
-resource "azure_sql_database_service" "sql-server" {
- name = "terraform-testing-db-renamed"
- database_server_name = "flibberflabber"
- edition = "Standard"
- collation = "SQL_Latin1_General_CP1_CI_AS"
- max_size_bytes = "5368709120"
- service_level_id = "f1173c43-91bd-4aaa-973c-54e79e15235b"
-}
-```
-
-## Argument Reference
-
-The following arguments are supported:
-
-* `name` - (Required) The name of the database service.
-
-* `database_server_name` - (Required) The name of the database server this service
- should run on. Changes here force the creation of a new resource.
-
-* `edition` - (Optional) The edition of the database service. For more information
- on each variant, please view [this](https://msdn.microsoft.com/library/azure/dn741340.aspx) link.
-
-* `collation` - (Optional) The collation to be used within the database service.
- Defaults to the standard Latin charset.
-
-* `max_size_bytes` - (Optional) The maximum size in bytes the database service
- should be allowed to expand to. Range depends on the database `edition`
- selected above.
-
-* `service_level_id` - (Optional) The ID corresponding to the service level per
- edition. Please refer to [this](https://msdn.microsoft.com/en-us/library/azure/dn505701.aspx) link for more details.
-
-## Attributes Reference
-
-The following attributes are exported:
-
-* `id` - The database service ID. Coincides with the given `name`.
diff --git a/website/source/docs/providers/azure/r/storage_blob.html.markdown b/website/source/docs/providers/azure/r/storage_blob.html.markdown
deleted file mode 100644
index 6f1d56aaf..000000000
--- a/website/source/docs/providers/azure/r/storage_blob.html.markdown
+++ /dev/null
@@ -1,49 +0,0 @@
----
-layout: "azure"
-page_title: "Azure: azure_storage_blob"
-sidebar_current: "docs-azure-storage-blob"
-description: |-
- Creates a new storage blob within a given storage container on Azure.
----
-
-# azure\_storage\_blob
-
-Creates a new storage blob within a given storage container on Azure.
-
-## Example Usage
-
-```hcl
-resource "azure_storage_blob" "foo" {
- name = "tftesting-blob"
- storage_service_name = "tfstorserv"
- storage_container_name = "terraform-storage-container"
- type = "PageBlob"
- size = 1024
-}
-```
-
-## Argument Reference
-
-The following arguments are supported:
-
-* `name` - (Required) The name of the storage blob. Must be unique within
- the storage service the blob is located.
-
-* `storage_service_name` - (Required) The name of the storage service within
- which the storage container in which the blob will be created resides.
-
-* `storage_container_name` - (Required) The name of the storage container
- in which this blob should be created. Must be located on the storage
- service given with `storage_service_name`.
-
-* `type` - (Required) The type of the storage blob to be created. One of either
- `BlockBlob` or `PageBlob`.
-
-* `size` - (Optional) Used only for `PageBlob`'s to specify the size in bytes
- of the blob to be created. Must be a multiple of 512. Defaults to 0.
-
-## Attributes Reference
-
-The following attributes are exported:
-
-* `id` - The storage blob ID. Coincides with the given `name`.
diff --git a/website/source/docs/providers/azure/r/storage_container.html.markdown b/website/source/docs/providers/azure/r/storage_container.html.markdown
deleted file mode 100644
index 3b09fd322..000000000
--- a/website/source/docs/providers/azure/r/storage_container.html.markdown
+++ /dev/null
@@ -1,43 +0,0 @@
----
-layout: "azure"
-page_title: "Azure: azure_storage_container"
-sidebar_current: "docs-azure-storage-container"
-description: |-
- Creates a new storage container within a given storage service on Azure.
----
-
-# azure\_storage\_container
-
-Creates a new storage container within a given storage service on Azure.
-
-## Example Usage
-
-```hcl
-resource "azure_storage_container" "stor-cont" {
- name = "terraform-storage-container"
- container_access_type = "blob"
- storage_service_name = "tfstorserv"
-}
-```
-
-## Argument Reference
-
-The following arguments are supported:
-
-* `name` - (Required) The name of the storage container. Must be unique within
- the storage service the container is located.
-
-* `storage_service_name` - (Required) The name of the storage service within
- which the storage container should be created.
-
-* `container_access_type` - (Required) The 'interface' for access the container
- provides. Can be either `blob`, `container` or ``.
-
-* `properties` - (Optional) Key-value definition of additional properties
- associated to the storage service.
-
-## Attributes Reference
-
-The following attributes are exported:
-
-* `id` - The storage container ID. Coincides with the given `name`.
diff --git a/website/source/docs/providers/azure/r/storage_queue.html.markdown b/website/source/docs/providers/azure/r/storage_queue.html.markdown
deleted file mode 100644
index 117d766a9..000000000
--- a/website/source/docs/providers/azure/r/storage_queue.html.markdown
+++ /dev/null
@@ -1,36 +0,0 @@
----
-layout: "azure"
-page_title: "Azure: azure_storage_queue"
-sidebar_current: "docs-azure-storage-queue"
-description: |-
- Creates a new storage queue within a given storage service on Azure.
----
-
-# azure\_storage\_queue
-
-Creates a new storage queue within a given storage service on Azure.
-
-## Example Usage
-
-```hcl
-resource "azure_storage_queue" "stor-queue" {
- name = "terraform-storage-queue"
- storage_service_name = "tfstorserv"
-}
-```
-
-## Argument Reference
-
-The following arguments are supported:
-
-* `name` - (Required) The name of the storage queue. Must be unique within
- the storage service the queue is located.
-
-* `storage_service_name` - (Required) The name of the storage service within
- which the storage queue should be created.
-
-## Attributes Reference
-
-The following attributes are exported:
-
-* `id` - The storage queue ID. Coincides with the given `name`.
diff --git a/website/source/docs/providers/azure/r/storage_service.html.markdown b/website/source/docs/providers/azure/r/storage_service.html.markdown
deleted file mode 100644
index 619f45755..000000000
--- a/website/source/docs/providers/azure/r/storage_service.html.markdown
+++ /dev/null
@@ -1,55 +0,0 @@
----
-layout: "azure"
-page_title: "Azure: azure_storage_service"
-sidebar_current: "docs-azure-storage-service"
-description: |-
- Creates a new storage service on Azure in which storage containers may be created.
----
-
-# azure\_storage\_service
-
-Creates a new storage service on Azure in which storage containers may be created.
-
-## Example Usage
-
-```hcl
-resource "azure_storage_service" "tfstor" {
- name = "tfstor"
- location = "West US"
- description = "Made by Terraform."
- account_type = "Standard_LRS"
-}
-```
-
-## Argument Reference
-
-The following arguments are supported:
-
-* `name` - (Required) The name of the storage service. Must be between 4 and 24
- lowercase-only characters or digits. Must be unique on Azure.
-
-* `location` - (Required) The location where the storage service should be created.
- For a list of all Azure locations, please consult [this link](https://azure.microsoft.com/en-us/regions/).
-
-* `account_type` - (Required) The type of storage account to be created.
- Available options include `Standard_LRS`, `Standard_ZRS`, `Standard_GRS`,
- `Standard_RAGRS` and `Premium_LRS`. To learn more about the differences
- of each storage account type, please consult [this link](http://blogs.msdn.com/b/windowsazurestorage/archive/2013/12/11/introducing-read-access-geo-replicated-storage-ra-grs-for-windows-azure-storage.aspx).
-
-* `affinity_group` - (Optional) The affinity group the storage service should
- belong to.
-
-* `properties` - (Optional) Key-value definition of additional properties
- associated to the storage service. For additional information on what
- these properties do, please consult [this link](https://msdn.microsoft.com/en-us/library/azure/hh452235.aspx).
-
-* `label` - (Optional) A label to be used for tracking purposes. Must be
- non-void. Defaults to `Made by Terraform.`.
-
-* `description` - (Optional) A description for the storage service.
-
-## Attributes Reference
-
-The following attributes are exported:
-
-* `id` - The storage service ID. Coincides with the given `name`.
diff --git a/website/source/docs/providers/azure/r/virtual_network.html.markdown b/website/source/docs/providers/azure/r/virtual_network.html.markdown
deleted file mode 100644
index 092e013de..000000000
--- a/website/source/docs/providers/azure/r/virtual_network.html.markdown
+++ /dev/null
@@ -1,62 +0,0 @@
----
-layout: "azure"
-page_title: "Azure: azure_virtual_network"
-sidebar_current: "docs-azure-resource-virtual-network"
-description: |-
- Creates a new virtual network including any configured subnets. Each subnet can optionally be configured with a security group to be associated with the subnet.
----
-
-# azure\_virtual\_network
-
-Creates a new virtual network including any configured subnets. Each subnet can
-optionally be configured with a security group to be associated with the subnet.
-
-## Example Usage
-
-```hcl
-resource "azure_virtual_network" "default" {
- name = "test-network"
- address_space = ["10.1.2.0/24"]
- location = "West US"
-
- subnet {
- name = "subnet1"
- address_prefix = "10.1.2.0/25"
- }
-}
-```
-
-## Argument Reference
-
-The following arguments are supported:
-
-* `name` - (Required) The name of the virtual network. Changing this forces a
- new resource to be created.
-
-* `address_space` - (Required) The address space that is used the virtual
- network. You can supply more than one address space. Changing this forces
- a new resource to be created.
-
-* `location` - (Required) The location/region where the virtual network is
- created. Changing this forces a new resource to be created.
-
-* `dns_servers` - (Optional) List of names of DNS servers previously registered
- on Azure.
-
-* `subnet` - (Required) Can be specified multiple times to define multiple
- subnets. Each `subnet` block supports fields documented below.
-
-The `subnet` block supports:
-
-* `name` - (Required) The name of the subnet.
-
-* `address_prefix` - (Required) The address prefix to use for the subnet.
-
-* `security_group` - (Optional) The Network Security Group to associate with
- the subnet.
-
-## Attributes Reference
-
-The following attributes are exported:
-
-* `id` - The virtual NetworkConfiguration ID.
diff --git a/website/source/docs/providers/azurerm/d/client_config.html.markdown b/website/source/docs/providers/azurerm/d/client_config.html.markdown
deleted file mode 100644
index 3bee99162..000000000
--- a/website/source/docs/providers/azurerm/d/client_config.html.markdown
+++ /dev/null
@@ -1,32 +0,0 @@
----
-layout: "azurerm"
-page_title: "Azure Resource Manager: azurerm_client_config"
-sidebar_current: "docs-azurerm-datasource-client-config"
-description: |-
- Get information about the configuration of the azurerm provider.
----
-
-# azurerm\_client\_config
-
-Use this data source to access the configuration of the Azure Resource Manager
-provider.
-
-## Example Usage
-
-```hcl
-data "azurerm_client_config" "current" {}
-
-output "account_id" {
- value = "${data.azurerm_client_config.current.account_id}"
-}
-```
-
-## Argument Reference
-
-There are no arguments available for this data source.
-
-## Attributes Reference
-
-* `client_id` is set to the Azure Client ID.
-* `tenant_id` is set to the Azure Tenant ID.
-* `subscription_id` is set to the Azure Subscription ID.
\ No newline at end of file
diff --git a/website/source/docs/providers/azurerm/d/public_ip.html.markdown b/website/source/docs/providers/azurerm/d/public_ip.html.markdown
deleted file mode 100644
index 168c16cc2..000000000
--- a/website/source/docs/providers/azurerm/d/public_ip.html.markdown
+++ /dev/null
@@ -1,62 +0,0 @@
----
-layout: "azurerm"
-page_title: "Azure Resource Manager: azurerm_public_ip"
-sidebar_current: "docs-azurerm-datasource-public-ip"
-description: |-
- Get information about the specified public IP address.
----
-
-# azurerm\_public\_ip
-
-Use this data source to access the properties of an existing Azure Public IP Address.
-
-## Example Usage
-
-```hcl
-data "azurerm_public_ip" "datasourceip" {
- name = "testPublicIp"
- resource_group_name = "acctestRG"
-}
-
-resource "azurerm_virtual_network" "helloterraformnetwork" {
- name = "acctvn"
- address_space = ["10.0.0.0/16"]
- location = "West US 2"
- resource_group_name = "acctestRG"
-}
-
-resource "azurerm_subnet" "helloterraformsubnet" {
- name = "acctsub"
- resource_group_name = "acctestRG"
- virtual_network_name = "${azurerm_virtual_network.helloterraformnetwork.name}"
- address_prefix = "10.0.2.0/24"
-}
-
-resource "azurerm_network_interface" "helloterraformnic" {
- name = "tfni"
- location = "West US 2"
- resource_group_name = "acctestRG"
-
- ip_configuration {
- name = "testconfiguration1"
- subnet_id = "${azurerm_subnet.helloterraformsubnet.id}"
- private_ip_address_allocation = "static"
- private_ip_address = "10.0.2.5"
- public_ip_address_id = "${data.azurerm_public_ip.datasourceip.id}"
- }
-}
-```
-
-## Argument Reference
-
-* `name` - (Required) Specifies the name of the public IP address.
-* `resource_group_name` - (Required) Specifies the name of the resource group.
-
-
-## Attributes Reference
-
-* `domain_name_label` - The label for the Domain Name.
-* `idle_timeout_in_minutes` - Specifies the timeout for the TCP idle connection.
-* `fqdn` - Fully qualified domain name of the A DNS record associated with the public IP. This is the concatenation of the domainNameLabel and the regionalized DNS zone.
-* `ip_address` - The IP address value that was allocated.
-* `tags` - A mapping of tags to assigned to the resource.
\ No newline at end of file
diff --git a/website/source/docs/providers/azurerm/d/resource_group.html.markdown b/website/source/docs/providers/azurerm/d/resource_group.html.markdown
deleted file mode 100644
index 6f64ea88e..000000000
--- a/website/source/docs/providers/azurerm/d/resource_group.html.markdown
+++ /dev/null
@@ -1,39 +0,0 @@
----
-layout: "azurerm"
-page_title: "Azure Resource Manager: azurerm_resource_group"
-sidebar_current: "docs-azurerm-datasource-resource-group"
-description: |-
- Get information about the specified resource group.
----
-
-# azurerm\_resource\_group
-
-Use this data source to access the properties of an Azure resource group.
-
-## Example Usage
-
-```hcl
-data "azurerm_resource_group" "test" {
- name = "dsrg_test"
-}
-
-resource "azurerm_managed_disk" "test" {
- name = "managed_disk_name"
- location = "${data.azurerm_resource_group.test.location}"
- resource_group_name = "${data.azurerm_resource_group.test.name}"
- storage_account_type = "Standard_LRS"
- create_option = "Empty"
- disk_size_gb = "1"
-}
-```
-
-## Argument Reference
-
-* `name` - (Required) Specifies the name of the resource group.
-
-~> **NOTE:** If the specified location doesn't match the actual resource group location, an error message with the actual location value will be shown.
-
-## Attributes Reference
-
-* `location` - The location of the resource group.
-* `tags` - A mapping of tags assigned to the resource group.
\ No newline at end of file
diff --git a/website/source/docs/providers/azurerm/index.html.markdown b/website/source/docs/providers/azurerm/index.html.markdown
deleted file mode 100644
index e43f0563c..000000000
--- a/website/source/docs/providers/azurerm/index.html.markdown
+++ /dev/null
@@ -1,226 +0,0 @@
----
-layout: "azurerm"
-page_title: "Provider: Azure Resource Manager"
-sidebar_current: "docs-azurerm-index"
-description: |-
- The Azure Resource Manager provider is used to interact with the many resources supported by Azure, via the ARM API. This supersedes the Azure provider, which interacts with Azure using the Service Management API. The provider needs to be configured with a credentials file, or credentials needed to generate OAuth tokens for the ARM API.
----
-
-# Microsoft Azure Provider
-
-The Microsoft Azure provider is used to interact with the many
-resources supported by Azure, via the ARM API. This supercedes the [legacy Azure
-provider][asm], which interacts with Azure using the Service Management API. The
-provider needs to be configured with the credentials needed to generate OAuth
-tokens for the ARM API.
-
-[asm]: /docs/providers/azure/index.html
-
-Use the navigation to the left to read about the available resources.
-
-## Example Usage
-
-```hcl
-# Configure the Microsoft Azure Provider
-provider "azurerm" {
- subscription_id = "..."
- client_id = "..."
- client_secret = "..."
- tenant_id = "..."
-}
-
-# Create a resource group
-resource "azurerm_resource_group" "production" {
- name = "production"
- location = "West US"
-}
-
-# Create a virtual network in the web_servers resource group
-resource "azurerm_virtual_network" "network" {
- name = "productionNetwork"
- address_space = ["10.0.0.0/16"]
- location = "West US"
- resource_group_name = "${azurerm_resource_group.production.name}"
-
- subnet {
- name = "subnet1"
- address_prefix = "10.0.1.0/24"
- }
-
- subnet {
- name = "subnet2"
- address_prefix = "10.0.2.0/24"
- }
-
- subnet {
- name = "subnet3"
- address_prefix = "10.0.3.0/24"
- }
-}
-```
-
-## Argument Reference
-
-The following arguments are supported:
-
-* `subscription_id` - (Optional) The subscription ID to use. It can also
- be sourced from the `ARM_SUBSCRIPTION_ID` environment variable.
-
-* `client_id` - (Optional) The client ID to use. It can also be sourced from
- the `ARM_CLIENT_ID` environment variable.
-
-* `client_secret` - (Optional) The client secret to use. It can also be sourced from
- the `ARM_CLIENT_SECRET` environment variable.
-
-* `tenant_id` - (Optional) The tenant ID to use. It can also be sourced from the
- `ARM_TENANT_ID` environment variable.
-
-* `environment` - (Optional) The cloud environment to use. It can also be sourced
- from the `ARM_ENVIRONMENT` environment variable. Supported values are:
- * `public` (default)
- * `usgovernment`
- * `german`
- * `china`
-
-* `skip_provider_registration` - (Optional) Prevents the provider from registering
- the ARM provider namespaces, this can be used if you don't wish to give the Active
- Directory Application permission to register resource providers. It can also be
- sourced from the `ARM_SKIP_PROVIDER_REGISTRATION` environment variable, defaults
- to `false`.
-
-## Creating Credentials
-
-Azure requires that an application is added to Azure Active Directory to generate the `client_id`, `client_secret`, and `tenant_id` needed by Terraform (`subscription_id` can be recovered from your Azure account details).
-
-It's possible to complete this task in either the [Azure CLI](#creating-credentials-using-the-azure-cli) or in the [Azure Portal](#creating-credentials-in-the-azure-portal) - in both we'll create a Service Principal which has `Contributor` rights to the subscription. [It's also possible to assign other rights](https://azure.microsoft.com/en-gb/documentation/articles/role-based-access-built-in-roles/) depending on your configuration.
-
-### Creating Credentials using the Azure CLI
-
-~> **Note**: if you're using the **China**, **German** or **Government** Azure Clouds - you'll need to first configure the Azure CLI to work with that Cloud. You can do this by running:
-
-```
-$ az cloud set --name AzureChinaCloud|AzureGermanCloud|AzureUSGovernment
-```
-
----
-
-Firstly, login to the Azure CLI using:
-
-```shell
-$ az login
-```
-
-
-Once logged in - it's possible to list the Subscriptions associated with the account via:
-
-```shell
-$ az account list
-```
-
-The output (similar to below) will display one or more Subscriptions - with the `ID` field being the `subscription_id` field referenced above.
-
-```json
-[
- {
- "cloudName": "AzureCloud",
- "id": "00000000-0000-0000-0000-000000000000",
- "isDefault": true,
- "name": "PAYG Subscription",
- "state": "Enabled",
- "tenantId": "00000000-0000-0000-0000-000000000000",
- "user": {
- "name": "user@example.com",
- "type": "user"
- }
- }
-]
-```
-
-Should you have more than one Subscription, you can specify the Subscription to use via the following command:
-
-```shell
-$ az account set --subscription="SUBSCRIPTION_ID"
-```
-
-We can now create the Service Principal, which will have permissions to manage resources in the specified Subscription using the following command:
-
-```shell
-$ az ad sp create-for-rbac --role="Contributor" --scopes="/subscriptions/SUBSCRIPTION_ID"
-```
-
-This command will output 5 values:
-
-```json
-{
- "appId": "00000000-0000-0000-0000-000000000000",
- "displayName": "azure-cli-2017-06-05-10-41-15",
- "name": "http://azure-cli-2017-06-05-10-41-15",
- "password": "0000-0000-0000-0000-000000000000",
- "tenant": "00000000-0000-0000-0000-000000000000"
-}
-```
-
-These values map to the Terraform variables like so:
-
- - `appId` is the `client_id` defined above.
- - `password` is the `client_secret` defined above.
- - `tenant` is the `tenant_id` defined above.
-
----
-
-Finally - it's possible to test these values work as expected by first logging in:
-
-```shell
-$ az login --service-principal -u CLIENT_ID -p CLIENT_SECRET --tenant TENANT_ID
-```
-
-Once logged in as the Service Principal - we should be able to list the VM Sizes by specifying an Azure region, for example here we use the `West US` region:
-
-```shell
-$ az vm list-sizes --location westus
-```
-
-~> **Note**: If you're using the **China**, **German** or **Government** Azure Clouds - you will need to switch `westus` out for another region. You can find which regions are available by running:
-
-```
-$ az account list-locations
-```
-
-### Creating Credentials in the Azure Portal
-
-There's a couple of phases to create Credentials via [the Azure Portal](https://portal.azure.com):
-
- 1. Creating an Application in Azure Active Directory (which acts as a Service Principal)
- 2. Granting the Application access to manage resources in your Azure Subscription
-
-### 1. Creating an Application in Azure Active Directory
-
-Firstly navigate to [the **Azure Active Directory** overview](https://portal.azure.com/#blade/Microsoft_AAD_IAM/ActiveDirectoryMenuBlade/Overview) within the Azure Portal - [then select the **App Registration** blade](https://portal.azure.com/#blade/Microsoft_AAD_IAM/ActiveDirectoryMenuBlade/RegisteredApps/RegisteredApps/Overview) and finally click **Endpoints** at the top of the **App Registration** blade. This will display a list of URIs, the URI for **OAUTH 2.0 AUTHORIZATION ENDPOINT** contains a GUID - which is your Tenant ID / the `tenant_id` field mentioned above.
-
-Next, navigate back to [the **App Registration** blade](https://portal.azure.com/#blade/Microsoft_AAD_IAM/ActiveDirectoryMenuBlade/RegisteredApps/RegisteredApps/Overview) - from here we'll create the Application in Azure Active Directory. To do this click **Add** at the top to add a new Application within Azure Active Directory. On this page, set the following values then press **Create**:
-
-- **Name** - this is a friendly identifier and can be anything (e.g. "Terraform")
-- **Application Type** - this should be set to "Web app / API"
-- **Sign-on URL** - this can be anything, providing it's a valid URI (e.g. https://terra.form)
-
-Once that's done - select the Application you just created in [the **App Registration** blade](https://portal.azure.com/#blade/Microsoft_AAD_IAM/ActiveDirectoryMenuBlade/RegisteredApps/RegisteredApps/Overview). At the top of this page, the "Application ID" GUID is the `client_id` you'll need.
-
-Finally, we can create the `client_secret` by selecting **Keys** and then generating a new key by entering a description, selecting how long the `client_secret` should be valid for - and finally pressing **Save**. This value will only be visible whilst on the page, so be sure to copy it now (otherwise you'll need to regenerate a new key).
-
-### 2. Granting the Application access to manage resources in your Azure Subscription
-
-Once the Application exists in Azure Active Directory - we can grant it permissions to modify resources in the Subscription. To do this, [navigate to the **Subscriptions** blade within the Azure Portal](https://portal.azure.com/#blade/Microsoft_Azure_Billing/SubscriptionsBlade), then select the Subscription you wish to use, then click **Access Control (IAM)**, and finally **Add**.
-
-Firstly specify a Role which grants the appropriate permissions needed for the Service Principal (for example, `Contributor` will grant Read/Write on all resources in the Subscription). There's more information about [the built in roles](https://azure.microsoft.com/en-gb/documentation/articles/role-based-access-built-in-roles/) available here.
-
-Secondly, search for and select the name of the Application created in Azure Active Directory to assign it this role - then press **Save**.
-
-## Creating Credentials through the Legacy CLI's
-
-It's also possible to create credentials via [the legacy cross-platform CLI](https://azure.microsoft.com/en-us/documentation/articles/resource-group-authenticate-service-principal-cli/) and the [legacy PowerShell Commandlets](https://azure.microsoft.com/en-us/documentation/articles/resource-group-authenticate-service-principal/) - however we would highly recommend using the Azure CLI above.
-
-## Testing
-
-Credentials must be provided via the `ARM_SUBSCRIPTION_ID`, `ARM_CLIENT_ID`,
-`ARM_CLIENT_SECRET` and `ARM_TENANT_ID` environment variables in order to run
-acceptance tests.
diff --git a/website/source/docs/providers/azurerm/r/availability_set.html.markdown b/website/source/docs/providers/azurerm/r/availability_set.html.markdown
deleted file mode 100644
index 8dd5cd412..000000000
--- a/website/source/docs/providers/azurerm/r/availability_set.html.markdown
+++ /dev/null
@@ -1,65 +0,0 @@
----
-layout: "azurerm"
-page_title: "Azure Resource Manager: azurerm_availability_set"
-sidebar_current: "docs-azurerm-resource-virtualmachine-availability-set"
-description: |-
- Create an availability set for virtual machines.
----
-
-# azurerm\_availability\_set
-
-Create an availability set for virtual machines.
-
-## Example Usage
-
-```hcl
-resource "azurerm_resource_group" "test" {
- name = "resourceGroup1"
- location = "West US"
-}
-
-resource "azurerm_availability_set" "test" {
- name = "acceptanceTestAvailabilitySet1"
- location = "West US"
- resource_group_name = "${azurerm_resource_group.test.name}"
-
- tags {
- environment = "Production"
- }
-}
-```
-
-## Argument Reference
-
-The following arguments are supported:
-
-* `name` - (Required) Specifies the name of the availability set. Changing this forces a
- new resource to be created.
-
-* `resource_group_name` - (Required) The name of the resource group in which to
- create the availability set.
-
-* `location` - (Required) Specifies the supported Azure location where the resource exists. Changing this forces a new resource to be created.
-
-* `platform_update_domain_count` - (Optional) Specifies the number of update domains that are used. Defaults to 5.
-
-* `platform_fault_domain_count` - (Optional) Specifies the number of fault domains that are used. Defaults to 3.
-
-* `managed` - (Optional) Specifies whether the availability set is managed or not. Possible values are `true` (to specify aligned) or `false` (to specify classic). Default is `false`.
-
-* `tags` - (Optional) A mapping of tags to assign to the resource.
-
-## Attributes Reference
-
-The following attributes are exported:
-
-* `id` - The virtual AvailabilitySet ID.
-
-
-## Import
-
-Availability Sets can be imported using the `resource id`, e.g.
-
-```
-terraform import azurerm_availability_set.group1 /subscriptions/00000000-0000-0000-0000-000000000000/resourceGroups/mygroup1/providers/Microsoft.Compute/availabilitySets/webAvailSet
-```
diff --git a/website/source/docs/providers/azurerm/r/cdn_endpoint.html.markdown b/website/source/docs/providers/azurerm/r/cdn_endpoint.html.markdown
deleted file mode 100644
index 0c64e24ec..000000000
--- a/website/source/docs/providers/azurerm/r/cdn_endpoint.html.markdown
+++ /dev/null
@@ -1,96 +0,0 @@
----
-layout: "azurerm"
-page_title: "Azure Resource Manager: azurerm_cdn_endpoint"
-sidebar_current: "docs-azurerm-resource-cdn-endpoint"
-description: |-
- Create a CDN Endpoint entity.
----
-
-# azurerm\_cdn\_endpoint
-
-A CDN Endpoint is the entity within a CDN Profile containing configuration information regarding caching behaviors and origins. The CDN Endpoint is exposed using the URL format .azureedge.net by default, but custom domains can also be created.
-
-## Example Usage
-
-```hcl
-resource "azurerm_resource_group" "test" {
- name = "acceptanceTestResourceGroup1"
- location = "West US"
-}
-
-resource "azurerm_cdn_profile" "test" {
- name = "acceptanceTestCdnProfile1"
- location = "West US"
- resource_group_name = "${azurerm_resource_group.test.name}"
- sku = "Standard"
-}
-
-resource "azurerm_cdn_endpoint" "test" {
- name = "acceptanceTestCdnEndpoint1"
- profile_name = "${azurerm_cdn_profile.test.name}"
- location = "West US"
- resource_group_name = "${azurerm_resource_group.test.name}"
-
- origin {
- name = "acceptanceTestCdnOrigin1"
- host_name = "www.example.com"
- }
-}
-```
-
-## Argument Reference
-
-The following arguments are supported:
-
-* `name` - (Required) Specifies the name of the CDN Endpoint. Changing this forces a
- new resource to be created.
-
-* `resource_group_name` - (Required) The name of the resource group in which to
- create the CDN Endpoint.
-
-* `profile_name` - (Required) The CDN Profile to which to attach the CDN Endpoint.
-
-* `location` - (Required) Specifies the supported Azure location where the resource exists. Changing this forces a new resource to be created.
-
-* `origin_host_header` - (Optional) The host header CDN provider will send along with content requests to origins. Defaults to the host name of the origin.
-
-* `is_http_allowed` - (Optional) Defaults to `true`.
-
-* `is_https_allowed` - (Optional) Defaults to `true`.
-
-* `origin` - (Optional) The set of origins of the CDN endpoint. When multiple origins exist, the first origin will be used as primary and rest will be used as failover options.
-Each `origin` block supports fields documented below.
-
-* `origin_path` - (Optional) The path used at for origin requests.
-
-* `querystring_caching_behaviour` - (Optional) Sets query string caching behavior. Allowed values are `IgnoreQueryString`, `BypassCaching` and `UseQueryString`. Defaults to `IgnoreQueryString`.
-
-* `content_types_to_compress` - (Optional) An array of strings that indicates a content types on which compression will be applied. The value for the elements should be MIME types.
-
-* `is_compression_enabled` - (Optional) Indicates whether compression is to be enabled. Defaults to false.
-
-* `tags` - (Optional) A mapping of tags to assign to the resource.
-
-The `origin` block supports:
-
-* `name` - (Required) The name of the origin. This is an arbitrary value. However, this value needs to be unique under endpoint.
-
-* `host_name` - (Required) A string that determines the hostname/IP address of the origin server. This string could be a domain name, IPv4 address or IPv6 address.
-
-* `http_port` - (Optional) The HTTP port of the origin. Defaults to null. When null, 80 will be used for HTTP.
-
-* `https_port` - (Optional) The HTTPS port of the origin. Defaults to null. When null, 443 will be used for HTTPS.
-
-## Attributes Reference
-
-The following attributes are exported:
-
-* `id` - The CDN Endpoint ID.
-
-## Import
-
-CDN Endpoints can be imported using the `resource id`, e.g.
-
-```
-terraform import azurerm_cdn_endpoint.test /subscriptions/00000000-0000-0000-0000-000000000000/resourceGroups/mygroup1/providers/Microsoft.Cdn/profiles/myprofile1/endpoints/myendpoint1
-```
\ No newline at end of file
diff --git a/website/source/docs/providers/azurerm/r/cdn_profile.html.markdown b/website/source/docs/providers/azurerm/r/cdn_profile.html.markdown
deleted file mode 100644
index cc2990589..000000000
--- a/website/source/docs/providers/azurerm/r/cdn_profile.html.markdown
+++ /dev/null
@@ -1,62 +0,0 @@
----
-layout: "azurerm"
-page_title: "Azure Resource Manager: azurerm_cdn_profile"
-sidebar_current: "docs-azurerm-resource-cdn-profile"
-description: |-
- Create a CDN Profile to create a collection of CDN Endpoints.
----
-
-# azurerm\_cdn\_profile
-
-Create a CDN Profile to create a collection of CDN Endpoints.
-
-## Example Usage
-
-```hcl
-resource "azurerm_resource_group" "test" {
- name = "resourceGroup1"
- location = "West US"
-}
-
-resource "azurerm_cdn_profile" "test" {
- name = "acceptanceTestCdnProfile1"
- location = "West US"
- resource_group_name = "${azurerm_resource_group.test.name}"
- sku = "Standard_Verizon"
-
- tags {
- environment = "Production"
- cost_center = "MSFT"
- }
-}
-```
-
-## Argument Reference
-
-The following arguments are supported:
-
-* `name` - (Required) Specifies the name of the CDN Profile. Changing this forces a
- new resource to be created.
-
-* `resource_group_name` - (Required) The name of the resource group in which to
- create the CDN Profile.
-
-* `location` - (Required) Specifies the supported Azure location where the resource exists. Changing this forces a new resource to be created.
-
-* `sku` - (Required) The pricing related information of current CDN profile. Accepted values are `Standard_Verizon`, `Standard_Akamai` or `Premium_Verizon`.
-
-* `tags` - (Optional) A mapping of tags to assign to the resource.
-
-## Attributes Reference
-
-The following attributes are exported:
-
-* `id` - The CDN Profile ID.
-
-## Import
-
-CDN Profiles can be imported using the `resource id`, e.g.
-
-```hcl
-terraform import azurerm_cdn_profile.test /subscriptions/00000000-0000-0000-0000-000000000000/resourceGroups/mygroup1/providers/Microsoft.Cdn/profiles/myprofile1
-```
diff --git a/website/source/docs/providers/azurerm/r/container_registry.html.markdown b/website/source/docs/providers/azurerm/r/container_registry.html.markdown
deleted file mode 100644
index b71959ce2..000000000
--- a/website/source/docs/providers/azurerm/r/container_registry.html.markdown
+++ /dev/null
@@ -1,88 +0,0 @@
----
-layout: "azurerm"
-page_title: "Azure Resource Manager: azurerm_container_registry"
-sidebar_current: "docs-azurerm-resource-container-registry"
-description: |-
- Create as an Azure Container Registry instance.
----
-
-# azurerm\_container\_registry
-
-Create as an Azure Container Registry instance.
-
-~> **Note:** All arguments including the access key will be stored in the raw state as plain-text.
-[Read more about sensitive data in state](/docs/state/sensitive-data.html).
-
-## Example Usage
-
-```hcl
-resource "azurerm_resource_group" "test" {
- name = "resourceGroup1"
- location = "West US"
-}
-
-resource "azurerm_storage_account" "test" {
- name = "storageAccount1"
- resource_group_name = "${azurerm_resource_group.test.name}"
- location = "${azurerm_resource_group.test.location}"
- account_type = "Standard_GRS"
-}
-
-resource "azurerm_container_registry" "test" {
- name = "containerRegistry1"
- resource_group_name = "${azurerm_resource_group.test.name}"
- location = "${azurerm_resource_group.test.location}"
- admin_enabled = true
- sku = "Basic"
-
- storage_account {
- name = "${azurerm_storage_account.test.name}"
- access_key = "${azurerm_storage_account.test.primary_access_key}"
- }
-}
-```
-
-## Argument Reference
-
-The following arguments are supported:
-
-* `name` - (Required) Specifies the name of the Container Registry. Changing this forces a
- new resource to be created.
-
-* `resource_group_name` - (Required) The name of the resource group in which to
- create the Container Registry.
-
-* `location` - (Required) Specifies the supported Azure location where the resource exists. Changing this forces a new resource to be created.
-
-* `admin_enabled` - (Optional) Specifies whether the admin user is enabled. Defaults to `false`.
-
-* `storage_account` - (Required) A Storage Account block as documented below - which must be located in the same data center as the Container Registry.
-
-* `sku` - (Optional) The SKU name of the the container registry. `Basic` is the only acceptable value at this time.
-
-* `tags` - (Optional) A mapping of tags to assign to the resource.
-
-`storage_account` supports the following:
-
-* `name` - (Required) The name of the storage account, which must be in the same physical location as the Container Registry.
-* `access_key` - (Required) The access key to the storage account.
-
-## Attributes Reference
-
-The following attributes are exported:
-
-* `id` - The Container Registry ID.
-
-* `login_server` - The URL that can be used to log into the container registry.
-
-* `admin_username` - The Username associated with the Container Registry Admin account - if the admin account is enabled.
-
-* `admin_password` - The Password associated with the Container Registry Admin account - if the admin account is enabled.
-
-## Import
-
-Container Registries can be imported using the `resource id`, e.g.
-
-```
-terraform import azurerm_container_registry.test /subscriptions/00000000-0000-0000-0000-000000000000/resourcegroups/mygroup1/providers/Microsoft.ContainerRegistry/registries/myregistry1
-```
diff --git a/website/source/docs/providers/azurerm/r/container_service.html.markdown b/website/source/docs/providers/azurerm/r/container_service.html.markdown
deleted file mode 100644
index a83121a09..000000000
--- a/website/source/docs/providers/azurerm/r/container_service.html.markdown
+++ /dev/null
@@ -1,218 +0,0 @@
----
-layout: "azurerm"
-page_title: "Azure Resource Manager: azurerm_container_service"
-sidebar_current: "docs-azurerm-resource-container-service"
-description: |-
- Creates an Azure Container Service instance.
----
-
-# azurerm\_container\_service
-
-Creates an Azure Container Service Instance
-
-~> **Note:** All arguments including the client secret will be stored in the raw state as plain-text.
-[Read more about sensitive data in state](/docs/state/sensitive-data.html).
-
-## Example Usage (DCOS)
-
-```hcl
-resource "azurerm_resource_group" "test" {
- name = "acctestRG1"
- location = "West US"
-}
-
-resource "azurerm_container_service" "test" {
- name = "acctestcontservice1"
- location = "${azurerm_resource_group.test.location}"
- resource_group_name = "${azurerm_resource_group.test.name}"
- orchestration_platform = "DCOS"
-
- master_profile {
- count = 1
- dns_prefix = "acctestmaster1"
- }
-
- linux_profile {
- admin_username = "acctestuser1"
-
- ssh_key {
- key_data = "ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAABAQCqaZoyiz1qbdOQ8xEf6uEu1cCwYowo5FHtsBhqLoDnnp7KUTEBN+L2NxRIfQ781rxV6Iq5jSav6b2Q8z5KiseOlvKA/RF2wqU0UPYqQviQhLmW6THTpmrv/YkUCuzxDpsH7DUDhZcwySLKVVe0Qm3+5N2Ta6UYH3lsDf9R9wTP2K/+vAnflKebuypNlmocIvakFWoZda18FOmsOoIVXQ8HWFNCuw9ZCunMSN62QGamCe3dL5cXlkgHYv7ekJE15IA9aOJcM7e90oeTqo+7HTcWfdu0qQqPWY5ujyMw/llas8tsXY85LFqRnr3gJ02bAscjc477+X+j/gkpFoN1QEmt terraform@demo.tld"
- }
- }
-
- agent_pool_profile {
- name = "default"
- count = 1
- dns_prefix = "acctestagent1"
- vm_size = "Standard_A0"
- }
-
- diagnostics_profile {
- enabled = false
- }
-
- tags {
- Environment = "Production"
- }
-}
-```
-
-## Example Usage (Kubernetes)
-
-```hcl
-resource "azurerm_resource_group" "test" {
- name = "acctestRG1"
- location = "West US"
-}
-
-resource "azurerm_container_service" "test" {
- name = "acctestcontservice1"
- location = "${azurerm_resource_group.test.location}"
- resource_group_name = "${azurerm_resource_group.test.name}"
- orchestration_platform = "Kubernetes"
-
- master_profile {
- count = 1
- dns_prefix = "acctestmaster1"
- }
-
- linux_profile {
- admin_username = "acctestuser1"
-
- ssh_key {
- key_data = "ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAABAQCqaZoyiz1qbdOQ8xEf6uEu1cCwYowo5FHtsBhqLoDnnp7KUTEBN+L2NxRIfQ781rxV6Iq5jSav6b2Q8z5KiseOlvKA/RF2wqU0UPYqQviQhLmW6THTpmrv/YkUCuzxDpsH7DUDhZcwySLKVVe0Qm3+5N2Ta6UYH3lsDf9R9wTP2K/+vAnflKebuypNlmocIvakFWoZda18FOmsOoIVXQ8HWFNCuw9ZCunMSN62QGamCe3dL5cXlkgHYv7ekJE15IA9aOJcM7e90oeTqo+7HTcWfdu0qQqPWY5ujyMw/llas8tsXY85LFqRnr3gJ02bAscjc477+X+j/gkpFoN1QEmt terraform@demo.tld"
- }
- }
-
- agent_pool_profile {
- name = "default"
- count = 1
- dns_prefix = "acctestagent1"
- vm_size = "Standard_A0"
- }
-
- service_principal {
- client_id = "00000000-0000-0000-0000-000000000000"
- client_secret = "00000000000000000000000000000000"
- }
-
- diagnostics_profile {
- enabled = false
- }
-
- tags {
- Environment = "Production"
- }
-}
-```
-
-## Example Usage (Swarm)
-
-```hcl
-resource "azurerm_resource_group" "test" {
- name = "acctestRG1"
- location = "West US"
-}
-
-resource "azurerm_container_service" "test" {
- name = "acctestcontservice1"
- location = "${azurerm_resource_group.test.location}"
- resource_group_name = "${azurerm_resource_group.test.name}"
- orchestration_platform = "Swarm"
-
- master_profile {
- count = 1
- dns_prefix = "acctestmaster1"
- }
-
- linux_profile {
- admin_username = "acctestuser1"
-
- ssh_key {
- key_data = "ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAABAQCqaZoyiz1qbdOQ8xEf6uEu1cCwYowo5FHtsBhqLoDnnp7KUTEBN+L2NxRIfQ781rxV6Iq5jSav6b2Q8z5KiseOlvKA/RF2wqU0UPYqQviQhLmW6THTpmrv/YkUCuzxDpsH7DUDhZcwySLKVVe0Qm3+5N2Ta6UYH3lsDf9R9wTP2K/+vAnflKebuypNlmocIvakFWoZda18FOmsOoIVXQ8HWFNCuw9ZCunMSN62QGamCe3dL5cXlkgHYv7ekJE15IA9aOJcM7e90oeTqo+7HTcWfdu0qQqPWY5ujyMw/llas8tsXY85LFqRnr3gJ02bAscjc477+X+j/gkpFoN1QEmt terraform@demo.tld"
- }
- }
-
- agent_pool_profile {
- name = "default"
- count = 1
- dns_prefix = "acctestagent1"
- vm_size = "Standard_A0"
- }
-
- diagnostics_profile {
- enabled = false
- }
-
- tags {
- Environment = "Production"
- }
-}
-```
-
-## Argument Reference
-
-The following arguments are supported:
-
-* `name` - (Required) The name of the Container Service instance to create. Changing this forces a new resource to be created.
-
-* `location` - (Required) The location where the Container Service instance should be created. Changing this forces a new resource to be created.
-
-* `resource_group_name` - (Required) Specifies the resource group where the resource exists. Changing this forces a new resource to be created.
-
-* `orchestration_platform` - (Required) Specifies the Container Orchestration Platform to use. Currently can be either `DCOS`, `Kubernetes` or `Swarm`. Changing this forces a new resource to be created.
-
-* `master_profile` - (Required) A Master Profile block as documented below.
-
-* `linux_profile` - (Required) A Linux Profile block as documented below.
-
-* `agent_pool_profile` - (Required) One or more Agent Pool Profile's block as documented below.
-
-* `service_principal` - (only Required when you're using `Kubernetes` as an Orchestration Platform) A Service Principal block as documented below.
-
-* `diagnostics_profile` - (Required) A VM Diagnostics Profile block as documented below.
-
-* `tags` - (Optional) A mapping of tags to assign to the resource.
-
-
-`master_profile` supports the following:
-
-* `count` - (Required) Number of masters (VMs) in the container service cluster. Allowed values are 1, 3, and 5. The default value is 1.
-* `dns_prefix` - (Required) The DNS Prefix to use for the Container Service master nodes.
-
-`linux_profile` supports the following:
-
-* `admin_username` - (Required) The Admin Username for the Cluster.
-* `ssh_key` - (Required) An SSH Key block as documented below.
-
-`ssh_key` supports the following:
-
-* `key_data` - (Required) The Public SSH Key used to access the cluster. The certificate must be in PEM format with or without headers.
-
-`agent_pool_profile` supports the following:
-
-* `name` - (Required) Unique name of the agent pool profile in the context of the subscription and resource group.
-* `count` - (Required) Number of agents (VMs) to host docker containers. Allowed values must be in the range of 1 to 100 (inclusive). The default value is 1.
-* `dns_prefix` - (Required) The DNS Prefix given to Agents in this Agent Pool.
-* `vm_size` - (Required) The VM Size of each of the Agent Pool VM's (e.g. Standard_F1 / Standard_D2v2).
-
-`service_principal` supports the following:
-
-* `client_id` - (Required) The ID for the Service Principal.
-* `client_secret` - (Required) The secret password associated with the service principal.
-
-`diagnostics_profile` supports the following:
-
-* `enabled` - (Required) Should VM Diagnostics be enabled for the Container Service VM's
-
-## Attributes Reference
-
-The following attributes are exported:
-
-* `id` - The Container Service ID.
-
-* `master_profile.fqdn` - FDQN for the master.
-
-* `agent_pool_profile.fqdn` - FDQN for the agent pool.
-
-* `diagnostics_profile.storage_uri` - The URI of the storage account where diagnostics are stored.
diff --git a/website/source/docs/providers/azurerm/r/dns_a_record.html.markdown b/website/source/docs/providers/azurerm/r/dns_a_record.html.markdown
deleted file mode 100644
index f1743607d..000000000
--- a/website/source/docs/providers/azurerm/r/dns_a_record.html.markdown
+++ /dev/null
@@ -1,63 +0,0 @@
----
-layout: "azurerm"
-page_title: "Azure Resource Manager: azurerm_dns_a_record"
-sidebar_current: "docs-azurerm-resource-dns-a-record"
-description: |-
- Create a DNS A Record.
----
-
-# azurerm\_dns\_a\_record
-
-Enables you to manage DNS A Records within Azure DNS.
-
-## Example Usage
-
-```hcl
-resource "azurerm_resource_group" "test" {
- name = "acceptanceTestResourceGroup1"
- location = "West US"
-}
-
-resource "azurerm_dns_zone" "test" {
- name = "mydomain.com"
- resource_group_name = "${azurerm_resource_group.test.name}"
-}
-
-resource "azurerm_dns_a_record" "test" {
- name = "test"
- zone_name = "${azurerm_dns_zone.test.name}"
- resource_group_name = "${azurerm_resource_group.test.name}"
- ttl = "300"
- records = ["10.0.180.17"]
-}
-```
-
-## Argument Reference
-
-The following arguments are supported:
-
-* `name` - (Required) The name of the DNS A Record.
-
-* `resource_group_name` - (Required) Specifies the resource group where the resource exists. Changing this forces a new resource to be created.
-
-* `zone_name` - (Required) Specifies the DNS Zone where the resource exists. Changing this forces a new resource to be created.
-
-* `TTL` - (Required) The Time To Live (TTL) of the DNS record.
-
-* `records` - (Required) List of IPv4 Addresses.
-
-* `tags` - (Optional) A mapping of tags to assign to the resource.
-
-## Attributes Reference
-
-The following attributes are exported:
-
-* `id` - The DNS A Record ID.
-
-## Import
-
-A records can be imported using the `resource id`, e.g.
-
-```
-terraform import azurerm_dns_a_record.test /subscriptions/00000000-0000-0000-0000-000000000000/resourceGroups/mygroup1/providers/Microsoft.Network/dnsZones/zone1/A/myrecord1
-```
diff --git a/website/source/docs/providers/azurerm/r/dns_aaaa_record.html.markdown b/website/source/docs/providers/azurerm/r/dns_aaaa_record.html.markdown
deleted file mode 100644
index 773bc0ef7..000000000
--- a/website/source/docs/providers/azurerm/r/dns_aaaa_record.html.markdown
+++ /dev/null
@@ -1,63 +0,0 @@
----
-layout: "azurerm"
-page_title: "Azure Resource Manager: azurerm_dns_aaaa_record"
-sidebar_current: "docs-azurerm-resource-dns-aaaa-record"
-description: |-
- Create a DNS AAAA Record.
----
-
-# azurerm\_dns\_aaaa\_record
-
-Enables you to manage DNS AAAA Records within Azure DNS.
-
-## Example Usage
-
-```hcl
-resource "azurerm_resource_group" "test" {
- name = "acceptanceTestResourceGroup1"
- location = "West US"
-}
-
-resource "azurerm_dns_zone" "test" {
- name = "mydomain.com"
- resource_group_name = "${azurerm_resource_group.test.name}"
-}
-
-resource "azurerm_dns_aaaa_record" "test" {
- name = "test"
- zone_name = "${azurerm_dns_zone.test.name}"
- resource_group_name = "${azurerm_resource_group.test.name}"
- ttl = "300"
- records = ["2607:f8b0:4009:1803::1005"]
-}
-```
-
-## Argument Reference
-
-The following arguments are supported:
-
-* `name` - (Required) The name of the DNS AAAA Record.
-
-* `resource_group_name` - (Required) Specifies the resource group where the resource exists. Changing this forces a new resource to be created.
-
-* `zone_name` - (Required) Specifies the DNS Zone where the resource exists. Changing this forces a new resource to be created.
-
-* `TTL` - (Required) The Time To Live (TTL) of the DNS record.
-
-* `records` - (Required) List of IPv6 Addresses.
-
-* `tags` - (Optional) A mapping of tags to assign to the resource.
-
-## Attributes Reference
-
-The following attributes are exported:
-
-* `id` - The DNS AAAA Record ID.
-
-## Import
-
-AAAA records can be imported using the `resource id`, e.g.
-
-```
-terraform import azurerm_dns_aaaa_record.test /subscriptions/00000000-0000-0000-0000-000000000000/resourceGroups/mygroup1/providers/Microsoft.Network/dnsZones/zone1/AAAA/myrecord1
-```
diff --git a/website/source/docs/providers/azurerm/r/dns_cname_record.html.markdown b/website/source/docs/providers/azurerm/r/dns_cname_record.html.markdown
deleted file mode 100644
index 5e00c2e6b..000000000
--- a/website/source/docs/providers/azurerm/r/dns_cname_record.html.markdown
+++ /dev/null
@@ -1,63 +0,0 @@
----
-layout: "azurerm"
-page_title: "Azure Resource Manager: azurerm_dns_cname_record"
-sidebar_current: "docs-azurerm-resource-dns-cname-record"
-description: |-
- Create a DNS CNAME Record.
----
-
-# azurerm\_dns\_cname\_record
-
-Enables you to manage DNS CNAME Records within Azure DNS.
-
-## Example Usage
-
-```hcl
-resource "azurerm_resource_group" "test" {
- name = "acceptanceTestResourceGroup1"
- location = "West US"
-}
-
-resource "azurerm_dns_zone" "test" {
- name = "mydomain.com"
- resource_group_name = "${azurerm_resource_group.test.name}"
-}
-
-resource "azurerm_dns_cname_record" "test" {
- name = "test"
- zone_name = "${azurerm_dns_zone.test.name}"
- resource_group_name = "${azurerm_resource_group.test.name}"
- ttl = "300"
- record = "contoso.com"
-}
-```
-
-## Argument Reference
-
-The following arguments are supported:
-
-* `name` - (Required) The name of the DNS CNAME Record.
-
-* `resource_group_name` - (Required) Specifies the resource group where the resource exists. Changing this forces a new resource to be created.
-
-* `zone_name` - (Required) Specifies the DNS Zone where the resource exists. Changing this forces a new resource to be created.
-
-* `TTL` - (Required) The Time To Live (TTL) of the DNS record.
-
-* `record` - (Required) The target of the CNAME.
-
-* `tags` - (Optional) A mapping of tags to assign to the resource.
-
-## Attributes Reference
-
-The following attributes are exported:
-
-* `id` - The DNS CName Record ID.
-
-## Import
-
-CNAME records can be imported using the `resource id`, e.g.
-
-```
-terraform import azurerm_dns_cname_record.test /subscriptions/00000000-0000-0000-0000-000000000000/resourceGroups/mygroup1/providers/Microsoft.Network/dnsZones/zone1/CNAME/myrecord1
-```
diff --git a/website/source/docs/providers/azurerm/r/dns_mx_record.html.markdown b/website/source/docs/providers/azurerm/r/dns_mx_record.html.markdown
deleted file mode 100644
index 197759215..000000000
--- a/website/source/docs/providers/azurerm/r/dns_mx_record.html.markdown
+++ /dev/null
@@ -1,81 +0,0 @@
----
-layout: "azurerm"
-page_title: "Azure Resource Manager: azurerm_dns_mx_record"
-sidebar_current: "docs-azurerm-resource-dns-mx-record"
-description: |-
- Manage a DNS MX Record.
----
-
-# azurerm\_dns\_mx\_record
-
-Enables you to manage DNS MX Records within Azure DNS.
-
-## Example Usage
-
-```hcl
-resource "azurerm_resource_group" "test" {
- name = "acceptanceTestResourceGroup1"
- location = "West US"
-}
-
-resource "azurerm_dns_zone" "test" {
- name = "mydomain.com"
- resource_group_name = "${azurerm_resource_group.test.name}"
-}
-
-resource "azurerm_dns_mx_record" "test" {
- name = "test"
- zone_name = "${azurerm_dns_zone.test.name}"
- resource_group_name = "${azurerm_resource_group.test.name}"
- ttl = "300"
-
- record {
- preference = 10
- exchange = "mail1.contoso.com"
- }
-
- record {
- preference = 20
- exchange = "mail2.contoso.com"
- }
-
- tags {
- Environment = "Production"
- }
-}
-```
-## Argument Reference
-
-The following arguments are supported:
-
-* `name` - (Required) The name of the DNS MX Record.
-
-* `resource_group_name` - (Required) Specifies the resource group where the resource exists. Changing this forces a new resource to be created.
-
-* `zone_name` - (Required) Specifies the DNS Zone where the resource exists. Changing this forces a new resource to be created.
-
-* `TTL` - (Required) The Time To Live (TTL) of the DNS record.
-
-* `record` - (Required) A list of values that make up the SRV record. Each `record` block supports fields documented below.
-
-* `tags` - (Optional) A mapping of tags to assign to the resource.
-
-The `record` block supports:
-
-* `preference` - (Required) String representing the "preference” value of the MX records. Records with lower preference value take priority.
-
-* `exchange` - (Required) The mail server responsible for the domain covered by the MX record.
-
-## Attributes Reference
-
-The following attributes are exported:
-
-* `id` - The DNS MX Record ID.
-
-## Import
-
-MX records can be imported using the `resource id`, e.g.
-
-```
-terraform import azurerm_dns_mx_record.test /subscriptions/00000000-0000-0000-0000-000000000000/resourceGroups/mygroup1/providers/Microsoft.Network/dnsZones/zone1/MX/myrecord1
-```
diff --git a/website/source/docs/providers/azurerm/r/dns_ns_record.html.markdown b/website/source/docs/providers/azurerm/r/dns_ns_record.html.markdown
deleted file mode 100644
index 99d6fd946..000000000
--- a/website/source/docs/providers/azurerm/r/dns_ns_record.html.markdown
+++ /dev/null
@@ -1,77 +0,0 @@
----
-layout: "azurerm"
-page_title: "Azure Resource Manager: azurerm_dns_ns_record"
-sidebar_current: "docs-azurerm-resource-dns-ns-record"
-description: |-
- Create a DNS NS Record.
----
-
-# azurerm\_dns\_ns\_record
-
-Enables you to manage DNS NS Records within Azure DNS.
-
-## Example Usage
-
-```hcl
-resource "azurerm_resource_group" "test" {
- name = "acceptanceTestResourceGroup1"
- location = "West US"
-}
-
-resource "azurerm_dns_zone" "test" {
- name = "mydomain.com"
- resource_group_name = "${azurerm_resource_group.test.name}"
-}
-
-resource "azurerm_dns_ns_record" "test" {
- name = "test"
- zone_name = "${azurerm_dns_zone.test.name}"
- resource_group_name = "${azurerm_resource_group.test.name}"
- ttl = "300"
-
- record {
- nsdname = "ns1.contoso.com"
- }
-
- record {
- nsdname = "ns2.contoso.com"
- }
-
- tags {
- Environment = "Production"
- }
-}
-```
-## Argument Reference
-
-The following arguments are supported:
-
-* `name` - (Required) The name of the DNS NS Record.
-
-* `resource_group_name` - (Required) Specifies the resource group where the resource exists. Changing this forces a new resource to be created.
-
-* `zone_name` - (Required) Specifies the DNS Zone where the resource exists. Changing this forces a new resource to be created.
-
-* `TTL` - (Required) The Time To Live (TTL) of the DNS record.
-
-* `record` - (Required) A list of values that make up the NS record. Each `record` block supports fields documented below.
-
-* `tags` - (Optional) A mapping of tags to assign to the resource.
-
-The `record` block supports:
-
-* `nsdname` - (Required) The value of the record.
-
-## Attributes Reference
-
-The following attributes are exported:
-
-* `id` - The DNS NS Record ID.
-
-## Import
-
-NS records can be imported using the `resource id`, e.g.
-
-```
-terraform import azurerm_dns_ns_record.test /subscriptions/00000000-0000-0000-0000-000000000000/resourceGroups/mygroup1/providers/Microsoft.Network/dnsZones/zone1/NS/myrecord1
-```
diff --git a/website/source/docs/providers/azurerm/r/dns_srv_record.html.markdown b/website/source/docs/providers/azurerm/r/dns_srv_record.html.markdown
deleted file mode 100644
index 34b5719b2..000000000
--- a/website/source/docs/providers/azurerm/r/dns_srv_record.html.markdown
+++ /dev/null
@@ -1,83 +0,0 @@
----
-layout: "azurerm"
-page_title: "Azure Resource Manager: azurerm_dns_srv_record"
-sidebar_current: "docs-azurerm-resource-dns-srv-record"
-description: |-
- Manage a DNS SRV Record.
----
-
-# azurerm\_dns\_srv\_record
-
-Enables you to manage DNS SRV Records within Azure DNS.
-
-## Example Usage
-
-```hcl
-resource "azurerm_resource_group" "test" {
- name = "acceptanceTestResourceGroup1"
- location = "West US"
-}
-
-resource "azurerm_dns_zone" "test" {
- name = "mydomain.com"
- resource_group_name = "${azurerm_resource_group.test.name}"
-}
-
-resource "azurerm_dns_srv_record" "test" {
- name = "test"
- zone_name = "${azurerm_dns_zone.test.name}"
- resource_group_name = "${azurerm_resource_group.test.name}"
- ttl = "300"
-
- record {
- priority = 1
- weight = 5
- port = 8080
- target = "target1.contoso.com"
- }
-
- tags {
- Environment = "Production"
- }
-}
-```
-## Argument Reference
-
-The following arguments are supported:
-
-* `name` - (Required) The name of the DNS SRV Record.
-
-* `resource_group_name` - (Required) Specifies the resource group where the resource exists. Changing this forces a new resource to be created.
-
-* `zone_name` - (Required) Specifies the DNS Zone where the resource exists. Changing this forces a new resource to be created.
-
-* `TTL` - (Required) The Time To Live (TTL) of the DNS record.
-
-* `record` - (Required) A list of values that make up the SRV record. Each `record` block supports fields documented below.
-
-* `tags` - (Optional) A mapping of tags to assign to the resource.
-
-The `record` block supports:
-
-* `priority` - (Required) Priority of the SRV record.
-
-* `weight` - (Required) Weight of the SRV record.
-
-* `port` - (Required) Port the service is listening on.
-
-* `target` - (Required) FQDN of the service.
-
-
-## Attributes Reference
-
-The following attributes are exported:
-
-* `id` - The DNS SRV Record ID.
-
-## Import
-
-SRV records can be imported using the `resource id`, e.g.
-
-```
-terraform import azurerm_dns_srv_record.test /subscriptions/00000000-0000-0000-0000-000000000000/resourceGroups/mygroup1/providers/Microsoft.Network/dnsZones/zone1/SRV/myrecord1
-```
diff --git a/website/source/docs/providers/azurerm/r/dns_txt_record.html.markdown b/website/source/docs/providers/azurerm/r/dns_txt_record.html.markdown
deleted file mode 100644
index 14299be53..000000000
--- a/website/source/docs/providers/azurerm/r/dns_txt_record.html.markdown
+++ /dev/null
@@ -1,77 +0,0 @@
----
-layout: "azurerm"
-page_title: "Azure Resource Manager: azurerm_dns_txt_record"
-sidebar_current: "docs-azurerm-resource-dns-txt-record"
-description: |-
- Create a DNS TXT Record.
----
-
-# azurerm\_dns\_txt\_record
-
-Enables you to manage DNS TXT Records within Azure DNS.
-
-## Example Usage
-
-```hcl
-resource "azurerm_resource_group" "test" {
- name = "acceptanceTestResourceGroup1"
- location = "West US"
-}
-
-resource "azurerm_dns_zone" "test" {
- name = "mydomain.com"
- resource_group_name = "${azurerm_resource_group.test.name}"
-}
-
-resource "azurerm_dns_txt_record" "test" {
- name = "test"
- zone_name = "${azurerm_dns_zone.test.name}"
- resource_group_name = "${azurerm_resource_group.test.name}"
- ttl = "300"
-
- record {
- value = "google-site-authenticator"
- }
-
- record {
- value = "more site information here"
- }
-
- tags {
- Environment = "Production"
- }
-}
-```
-## Argument Reference
-
-The following arguments are supported:
-
-* `name` - (Required) The name of the DNS TXT Record.
-
-* `resource_group_name` - (Required) Specifies the resource group where the resource exists. Changing this forces a new resource to be created.
-
-* `zone_name` - (Required) Specifies the DNS Zone where the resource exists. Changing this forces a new resource to be created.
-
-* `TTL` - (Required) The Time To Live (TTL) of the DNS record.
-
-* `record` - (Required) A list of values that make up the txt record. Each `record` block supports fields documented below.
-
-* `tags` - (Optional) A mapping of tags to assign to the resource.
-
-The `record` block supports:
-
-* `value` - (Required) The value of the record.
-
-## Attributes Reference
-
-The following attributes are exported:
-
-* `id` - The DNS TXT Record ID.
-
-## Import
-
-TXT records can be imported using the `resource id`, e.g.
-
-```
-terraform import azurerm_dns_txt_record.test /subscriptions/00000000-0000-0000-0000-000000000000/resourceGroups/mygroup1/providers/Microsoft.Network/dnsZones/zone1/TXT/myrecord1
-```
diff --git a/website/source/docs/providers/azurerm/r/dns_zone.html.markdown b/website/source/docs/providers/azurerm/r/dns_zone.html.markdown
deleted file mode 100644
index 7a896c8aa..000000000
--- a/website/source/docs/providers/azurerm/r/dns_zone.html.markdown
+++ /dev/null
@@ -1,52 +0,0 @@
----
-layout: "azurerm"
-page_title: "Azure Resource Manager: azurerm_dns_zone"
-sidebar_current: "docs-azurerm-resource-dns-zone"
-description: |-
- Create a DNS Zone.
----
-
-# azurerm\_dns\_zone
-
-Enables you to manage DNS zones within Azure DNS. These zones are hosted on Azure's name servers to which you can delegate the zone from the parent domain.
-
-## Example Usage
-
-```hcl
-resource "azurerm_resource_group" "test" {
- name = "acceptanceTestResourceGroup1"
- location = "West US"
-}
-
-resource "azurerm_dns_zone" "test" {
- name = "mydomain.com"
- resource_group_name = "${azurerm_resource_group.test.name}"
-}
-```
-## Argument Reference
-
-The following arguments are supported:
-
-* `name` - (Required) The name of the DNS Zone. Must be a valid domain name.
-
-* `resource_group_name` - (Required) Specifies the resource group where the resource exists. Changing this forces a new resource to be created.
-
-* `tags` - (Optional) A mapping of tags to assign to the resource.
-
-## Attributes Reference
-
-The following attributes are exported:
-
-* `id` - The DNS Zone ID.
-* `max_number_of_record_sets` - (Optional) Maximum number of Records in the zone. Defaults to `1000`.
-* `number_of_record_sets` - (Optional) The number of records already in the zone.
-* `name_servers` - (Optional) A list of values that make up the NS record for the zone.
-
-
-## Import
-
-DNS Zones can be imported using the `resource id`, e.g.
-
-```
-terraform import azurerm_dns_zone.zone1 /subscriptions/00000000-0000-0000-0000-000000000000/resourceGroups/mygroup1/providers/Microsoft.Network/dnsZones/zone1
-```
diff --git a/website/source/docs/providers/azurerm/r/eventhub.html.markdown b/website/source/docs/providers/azurerm/r/eventhub.html.markdown
deleted file mode 100644
index 8392fee44..000000000
--- a/website/source/docs/providers/azurerm/r/eventhub.html.markdown
+++ /dev/null
@@ -1,73 +0,0 @@
----
-layout: "azurerm"
-page_title: "Azure Resource Manager: azurerm_eventhub"
-sidebar_current: "docs-azurerm-resource-eventhub"
-description: |-
- Creates a new Event Hub as a nested resource within an Event Hub Namespace.
----
-
-# azurerm\_eventhub
-
-Creates a new Event Hub as a nested resource within a Event Hub Namespace.
-
-## Example Usage
-
-```hcl
-resource "azurerm_resource_group" "test" {
- name = "resourceGroup1"
- location = "West US"
-}
-
-resource "azurerm_eventhub_namespace" "test" {
- name = "acceptanceTestEventHubNamespace"
- location = "West US"
- resource_group_name = "${azurerm_resource_group.test.name}"
- sku = "Basic"
- capacity = 2
-
- tags {
- environment = "Production"
- }
-}
-
-resource "azurerm_eventhub" "test" {
- name = "acceptanceTestEventHub"
- namespace_name = "${azurerm_eventhub_namespace.test.name}"
- location = "${azurerm_resource_group.test.location}"
- resource_group_name = "${azurerm_resource_group.test.name}"
- partition_count = 2
- message_retention = 2
-}
-```
-
-## Argument Reference
-
-The following arguments are supported:
-
-* `name` - (Required) Specifies the name of the EventHub Namespace resource. Changing this forces a new resource to be created.
-
-* `namespace_name` - (Required) Specifies the name of the EventHub Namespace. Changing this forces a new resource to be created.
-
-* `resource_group_name` - (Required) The name of the resource group in which the EventHub's parent Namespace exists. Changing this forces a new resource to be created.
-
-* `location` - (Required) Specifies the supported Azure location where the resource exists. Changing this forces a new resource to be created.
-
-* `partition_count` - (Required) Specifies the current number of shards on the Event Hub.
-
-* `message_retention` - (Required) Specifies the number of days to retain the events for this Event Hub. Needs to be between 1 and 7 days; or 1 day when using a Basic SKU for the parent EventHub Namespace.
-
-## Attributes Reference
-
-The following attributes are exported:
-
-* `id` - The EventHub ID.
-
-* `partition_ids` - The identifiers for partitions created for Event Hubs.
-
-## Import
-
-EventHubs can be imported using the `resource id`, e.g.
-
-```
-terraform import azurerm_eventhub.eventhub1 /subscriptions/00000000-0000-0000-0000-000000000000/resourceGroups/group1/providers/Microsoft.EventHub/namespaces/namespace1/eventhubs/eventhub1
-```
diff --git a/website/source/docs/providers/azurerm/r/eventhub_authorization_rule.html.markdown b/website/source/docs/providers/azurerm/r/eventhub_authorization_rule.html.markdown
deleted file mode 100644
index 08faea08d..000000000
--- a/website/source/docs/providers/azurerm/r/eventhub_authorization_rule.html.markdown
+++ /dev/null
@@ -1,96 +0,0 @@
----
-layout: "azurerm"
-page_title: "Azure Resource Manager: azurerm_eventhub_authorization_rule"
-sidebar_current: "docs-azurerm-resource-eventhub-authorization-rule"
-description: |-
- Creates a new Event Hub Authorization Rule within an Event Hub.
----
-
-# azurerm\_eventhub\_authorization\_rule
-
-Creates a new Event Hub Authorization Rule within an Event Hub.
-
-## Example Usage
-
-```hcl
-resource "azurerm_resource_group" "test" {
- name = "resourceGroup1"
- location = "West US"
-}
-
-resource "azurerm_eventhub_namespace" "test" {
- name = "acceptanceTestEventHubNamespace"
- location = "West US"
- resource_group_name = "${azurerm_resource_group.test.name}"
- sku = "Basic"
- capacity = 2
-
- tags {
- environment = "Production"
- }
-}
-
-resource "azurerm_eventhub" "test" {
- name = "acceptanceTestEventHub"
- namespace_name = "${azurerm_eventhub_namespace.test.name}"
- location = "${azurerm_resource_group.test.location}"
- resource_group_name = "${azurerm_resource_group.test.name}"
- partition_count = 2
- message_retention = 2
-}
-
-resource "azurerm_eventhub_authorization_rule" "test" {
- name = "navi"
- namespace_name = "${azurerm_eventhub_namespace.test.name}"
- eventhub_name = "${azurerm_eventhub.test.name}"
- resource_group_name = "${azurerm_resource_group.test.name}"
- location = "${azurerm_resource_group.test.location}"
- listen = true
- send = false
- manage = false
-}
-```
-
-## Argument Reference
-
-The following arguments are supported:
-
-* `name` - (Required) Specifies the name of the EventHub Authorization Rule resource. Changing this forces a new resource to be created.
-
-* `namespace_name` - (Required) Specifies the name of the grandparent EventHub Namespace. Changing this forces a new resource to be created.
-
-* `eventhub_name` - (Required) Specifies the name of the EventHub. Changing this forces a new resource to be created.
-
-* `resource_group_name` - (Required) The name of the resource group in which the EventHub Namespace exists. Changing this forces a new resource to be created.
-
-* `location` - (Required) Specifies the supported Azure location where the resource exists. Changing this forces a new resource to be created.
-
-~> **NOTE** At least one of the 3 permissions below needs to be set.
-
-* `listen` - (Optional) Does this Authorization Rule have permissions to Listen to the Event Hub? Defaults to `false`.
-
-* `send` - (Optional) Does this Authorization Rule have permissions to Send to the Event Hub? Defaults to `false`.
-
-* `manage` - (Optional) Does this Authorization Rule have permissions to Manage to the Event Hub? When this property is `true` - both `listen` and `send` must be too. Defaults to `false`.
-
-## Attributes Reference
-
-The following attributes are exported:
-
-* `id` - The EventHub ID.
-
-* `primary_key` - The Primary Key for the Event Hub Authorization Rule.
-
-* `primary_connection_string` - The Primary Connection String for the Event Hub Authorization Rule.
-
-* `secondary_key` - The Secondary Key for the Event Hub Authorization Rule.
-
-* `secondary_connection_string` - The Secondary Connection String for the Event Hub Authorization Rule.
-
-## Import
-
-EventHubs can be imported using the `resource id`, e.g.
-
-```
-terraform import azurerm_eventhub_authorization_rule.rule1 /subscriptions/00000000-0000-0000-0000-000000000000/resourceGroups/group1/providers/Microsoft.EventHub/namespaces/namespace1/eventhubs/eventhub1/authorizationRules/rule1
-```
diff --git a/website/source/docs/providers/azurerm/r/eventhub_consumer_group.html.markdown b/website/source/docs/providers/azurerm/r/eventhub_consumer_group.html.markdown
deleted file mode 100644
index 68758c263..000000000
--- a/website/source/docs/providers/azurerm/r/eventhub_consumer_group.html.markdown
+++ /dev/null
@@ -1,80 +0,0 @@
----
-layout: "azurerm"
-page_title: "Azure Resource Manager: azurerm_eventhub_consumer_group"
-sidebar_current: "docs-azurerm-resource-eventhub-consumer-group"
-description: |-
- Creates a new Event Hub Consumer Group as a nested resource within an Event Hub.
----
-
-# azurerm\_eventhub\_consumer\_group
-
-Creates a new Event Hub Consumer Group as a nested resource within an Event Hub.
-
-## Example Usage
-
-```hcl
-resource "azurerm_resource_group" "test" {
- name = "resourceGroup1"
- location = "West US"
-}
-
-resource "azurerm_eventhub_namespace" "test" {
- name = "acceptanceTestEventHubNamespace"
- location = "West US"
- resource_group_name = "${azurerm_resource_group.test.name}"
- sku = "Basic"
- capacity = 2
-
- tags {
- environment = "Production"
- }
-}
-
-resource "azurerm_eventhub" "test" {
- name = "acceptanceTestEventHub"
- namespace_name = "${azurerm_eventhub_namespace.test.name}"
- location = "${azurerm_resource_group.test.location}"
- resource_group_name = "${azurerm_resource_group.test.name}"
- partition_count = 2
- message_retention = 2
-}
-
-resource "azurerm_eventhub_consumer_group" "test" {
- name = "acceptanceTestEventHubConsumerGroup"
- namespace_name = "${azurerm_eventhub_namespace.test.name}"
- eventhub_name = "${azurerm_eventhub.test.name}"
- resource_group_name = "${azurerm_resource_group.test.name}"
- location = "${azurerm_resource_group.test.location}"
- user_metadata = "some-meta-data"
-}
-```
-
-## Argument Reference
-
-The following arguments are supported:
-
-* `name` - (Required) Specifies the name of the EventHub Consumer Group resource. Changing this forces a new resource to be created.
-
-* `namespace_name` - (Required) Specifies the name of the grandparent EventHub Namespace. Changing this forces a new resource to be created.
-
-* `eventhub_name` - (Required) Specifies the name of the EventHub. Changing this forces a new resource to be created.
-
-* `resource_group_name` - (Required) The name of the resource group in which the EventHub Consumer Group's grandparent Namespace exists. Changing this forces a new resource to be created.
-
-* `location` - (Required) Specifies the supported Azure location where the resource exists. Changing this forces a new resource to be created.
-
-* `user_metadata` - (Optional) Specifies the user metadata.
-
-## Attributes Reference
-
-The following attributes are exported:
-
-* `id` - The EventHub Consumer Group ID.
-
-## Import
-
-EventHub Consumer Groups can be imported using the `resource id`, e.g.
-
-```
-terraform import azurerm_eventhub_consumer_group.consumerGroup1 /subscriptions/00000000-0000-0000-0000-000000000000/resourceGroups/group1/providers/Microsoft.EventHub/namespaces/namespace1/eventhubs/eventhub1/consumergroups/consumerGroup1
-```
diff --git a/website/source/docs/providers/azurerm/r/eventhub_namespace.html.markdown b/website/source/docs/providers/azurerm/r/eventhub_namespace.html.markdown
deleted file mode 100644
index 0d69bdba3..000000000
--- a/website/source/docs/providers/azurerm/r/eventhub_namespace.html.markdown
+++ /dev/null
@@ -1,77 +0,0 @@
----
-layout: "azurerm"
-page_title: "Azure Resource Manager: azurerm_eventhub_namespace"
-sidebar_current: "docs-azurerm-resource-eventhub-namespace"
-description: |-
- Create an EventHub Namespace.
----
-
-# azurerm\_eventhub\_namespace
-
-Create an EventHub Namespace.
-
-## Example Usage
-
-```hcl
-resource "azurerm_resource_group" "test" {
- name = "resourceGroup1"
- location = "West US"
-}
-
-resource "azurerm_eventhub_namespace" "test" {
- name = "acceptanceTestEventHubNamespace"
- location = "West US"
- resource_group_name = "${azurerm_resource_group.test.name}"
- sku = "Basic"
- capacity = 2
-
- tags {
- environment = "Production"
- }
-}
-```
-
-## Argument Reference
-
-The following arguments are supported:
-
-* `name` - (Required) Specifies the name of the EventHub Namespace resource . Changing this forces a
- new resource to be created.
-
-* `resource_group_name` - (Required) The name of the resource group in which to
- create the namespace. Changing this forces a new resource to be created.
-
-* `location` - (Required) Specifies the supported Azure location where the resource exists. Changing this forces a new resource to be created.
-
-* `sku` - (Required) Defines which tier to use. Options are Basic or Standard.
-
-* `capacity` - (Optional) Specifies the capacity of a Standard namespace. Can be 1, 2 or 4
-
-* `tags` - (Optional) A mapping of tags to assign to the resource.
-
-## Attributes Reference
-
-The following attributes are exported:
-
-* `id` - The EventHub Namespace ID.
-
-The following attributes are exported only if there is an authorization rule named
-`RootManageSharedAccessKey` which is created automatically by Azure.
-
-* `default_primary_connection_string` - The primary connection string for the authorization
- rule `RootManageSharedAccessKey`.
-
-* `default_secondary_connection_string` - The secondary connection string for the
- authorization rule `RootManageSharedAccessKey`.
-
-* `default_primary_key` - The primary access key for the authorization rule `RootManageSharedAccessKey`.
-
-* `default_secondary_key` - The secondary access key for the authorization rule `RootManageSharedAccessKey`.
-
-## Import
-
-EventHub Namespaces can be imported using the `resource id`, e.g.
-
-```
-terraform import azurerm_eventhub_namespace.namespace1 /subscriptions/00000000-0000-0000-0000-000000000000/resourceGroups/group1/providers/Microsoft.EventHub/namespaces/namespace1
-```
diff --git a/website/source/docs/providers/azurerm/r/express_route_circuit.html.markdown b/website/source/docs/providers/azurerm/r/express_route_circuit.html.markdown
deleted file mode 100644
index fb9a159c2..000000000
--- a/website/source/docs/providers/azurerm/r/express_route_circuit.html.markdown
+++ /dev/null
@@ -1,89 +0,0 @@
----
-layout: "azurerm"
-page_title: "Azure Resource Manager: azurerm_express_route_circuit"
-sidebar_current: "docs-azurerm-resource-express-route-circuit"
-description: |-
- Creates an ExpressRoute circuit.
----
-
-# azurerm\_express\_route\_circuit
-
-Creates an ExpressRoute circuit.
-
-## Example Usage
-
-```hcl
-resource "azurerm_resource_group" "test" {
- name = "exprtTest"
- location = "West US"
-}
-
-resource "azurerm_express_route_circuit" "test" {
- name = "expressRoute1"
- resource_group_name = "${azurerm_resource_group.test.name}"
- location = "West US"
- service_provider_name = "Equinix"
- peering_location = "Silicon Valley"
- bandwidth_in_mbps = 50
- sku {
- tier = "Standard"
- family = "MeteredData"
- }
- allow_classic_operations = false
-
- tags {
- environment = "Production"
- }
-}
-```
-
-## Argument Reference
-
-The following arguments are supported:
-
-* `name` - (Required) The name of the ExpressRoute circuit. Changing this forces a
- new resource to be created.
-
-* `resource_group_name` - (Required) The name of the resource group in which to
- create the namespace. Changing this forces a new resource to be created.
-
-* `location` - (Required) Specifies the supported Azure location where the resource exists.
- Changing this forces a new resource to be created.
-
-* `service_provider_name` - (Required) The name of the ExpressRoute Service Provider.
-
-* `peering_location` - (Required) The name of the peering location and not the ARM resource location.
-
-* `bandwidth_in_mbps` - (Required) The bandwidth in Mbps of the circuit being created. Once you increase your bandwidth,
- you will not be able to decrease it to its previous value.
-
-* `sku` - (Required) Chosen SKU of ExpressRoute circuit as documented below.
-
-* `allow_classic_operations` - (Optional) Allow the circuit to interact with classic (RDFE) resources.
- The default value is false.
-
-* `tags` - (Optional) A mapping of tags to assign to the resource.
-
-`sku` supports the following:
-
-* `tier` - (Required) The service tier. Value must be either "Premium" or "Standard".
-
-* `family` - (Required) The billing mode. Value must be either "MeteredData" or "UnlimitedData".
- Once you set the billing model to "UnlimitedData", you will not be able to switch to "MeteredData".
-
-## Attributes Reference
-
-The following attributes are exported:
-
-* `id` - The Resource ID of the ExpressRoute circuit.
-* `service_provider_provisioning_state` - The ExpressRoute circuit provisioning state from your chosen service provider.
- Possible values are "NotProvisioned", "Provisioning", "Provisioned", and "Deprovisioning".
-* `service_key` - The string needed by the service provider to provision the ExpressRoute circuit.
-
-## Import
-
-ExpressRoute circuits can be imported using the `resource id`, e.g.
-
-```
-terraform import azurerm_express_route_circuit.myExpressRoute /subscriptions/00000000-0000-0000-0000-000000000000/resourceGroups/mygroup1/providers/Microsoft.Network/expressRouteCircuits/myExpressRoute
-```
diff --git a/website/source/docs/providers/azurerm/r/key_vault.html.markdown b/website/source/docs/providers/azurerm/r/key_vault.html.markdown
deleted file mode 100644
index 1e869ab7f..000000000
--- a/website/source/docs/providers/azurerm/r/key_vault.html.markdown
+++ /dev/null
@@ -1,115 +0,0 @@
----
-layout: "azurerm"
-page_title: "Azure Resource Manager: azurerm_key_vault"
-sidebar_current: "docs-azurerm-resource-key-vault"
-description: |-
- Create a Key Vault.
----
-
-# azurerm\_key\_vault
-
-Create a Key Vault.
-
-## Example Usage
-
-```hcl
-resource "azurerm_resource_group" "test" {
- name = "resourceGroup1"
- location = "West US"
-}
-
-resource "azurerm_key_vault" "test" {
- name = "testvault"
- location = "West US"
- resource_group_name = "${azurerm_resource_group.test.name}"
-
- sku {
- name = "standard"
- }
-
- tenant_id = "d6e396d0-5584-41dc-9fc0-268df99bc610"
-
- access_policy {
- tenant_id = "d6e396d0-5584-41dc-9fc0-268df99bc610"
- object_id = "d746815a-0433-4a21-b95d-fc437d2d475b"
-
- key_permissions = [
- "all",
- ]
-
- secret_permissions = [
- "get",
- ]
- }
-
- enabled_for_disk_encryption = true
-
- tags {
- environment = "Production"
- }
-}
-```
-
-## Argument Reference
-
-The following arguments are supported:
-
-* `name` - (Required) Specifies the name of the Key Vault resource. Changing this
- forces a new resource to be created.
-
-* `location` - (Required) Specifies the supported Azure location where the resource exists.
- Changing this forces a new resource to be created.
-
-* `resource_group_name` - (Required) The name of the resource group in which to
- create the namespace. Changing this forces a new resource to be created.
-
-* `sku` - (Required) An SKU block as described below.
-
-* `tenant_id` - (Required) The Azure Active Directory tenant ID that should be
- used for authenticating requests to the key vault.
-
-* `access_policy` - (Required) An access policy block as described below. At least
- one policy is required up to a maximum of 16.
-
-* `enabled_for_deployment` - (Optional) Boolean flag to specify whether Azure Virtual
- Machines are permitted to retrieve certificates stored as secrets from the key
- vault. Defaults to false.
-
-* `enabled_for_disk_encryption` - (Optional) Boolean flag to specify whether Azure
- Disk Encryption is permitted to retrieve secrets from the vault and unwrap keys.
- Defaults to false.
-
-* `enabled_for_template_deployment` - (Optional) Boolean flag to specify whether
- Azure Resource Manager is permitted to retrieve secrets from the key vault.
- Defaults to false.
-
-* `tags` - (Optional) A mapping of tags to assign to the resource.
-
-`sku` supports the following:
-
-* `name` - (Required) SKU name to specify whether the key vault is a `standard`
- or `premium` vault.
-
-`access_policy` supports the following:
-
-* `tenant_id` - (Required) The Azure Active Directory tenant ID that should be used
- for authenticating requests to the key vault. Must match the `tenant_id` used
- above.
-
-* `object_id` - (Required) The object ID of a user, service principal or security
- group in the Azure Active Directory tenant for the vault. The object ID must
- be unique for the list of access policies.
-
-* `key_permissions` - (Required) List of key permissions, must be one or more from
- the following: `all`, `backup`, `create`, `decrypt`, `delete`, `encrypt`, `get`,
- `import`, `list`, `restore`, `sign`, `unwrapKey`, `update`, `verify`, `wrapKey`.
-
-* `secret_permissions` - (Required) List of secret permissions, must be one or more
- from the following: `all`, `delete`, `get`, `list`, `set`.
-
-## Attributes Reference
-
-The following attributes are exported:
-
-* `id` - The Vault ID.
-* `vault_uri` - The URI of the vault for performing operations on keys and secrets.
diff --git a/website/source/docs/providers/azurerm/r/loadbalancer.html.markdown b/website/source/docs/providers/azurerm/r/loadbalancer.html.markdown
deleted file mode 100644
index 7753a5ca5..000000000
--- a/website/source/docs/providers/azurerm/r/loadbalancer.html.markdown
+++ /dev/null
@@ -1,72 +0,0 @@
----
-layout: "azurerm"
-page_title: "Azure Resource Manager: azurerm_lb"
-sidebar_current: "docs-azurerm-resource-loadbalancer"
-description: |-
- Create a LoadBalancer Resource.
----
-
-# azurerm\_lb
-
-Create a LoadBalancer Resource.
-
-## Example Usage
-
-```hcl
-resource "azurerm_resource_group" "test" {
- name = "LoadBalancerRG"
- location = "West US"
-}
-
-resource "azurerm_public_ip" "test" {
- name = "PublicIPForLB"
- location = "West US"
- resource_group_name = "${azurerm_resource_group.test.name}"
- public_ip_address_allocation = "static"
-}
-
-resource "azurerm_lb" "test" {
- name = "TestLoadBalancer"
- location = "West US"
- resource_group_name = "${azurerm_resource_group.test.name}"
-
- frontend_ip_configuration {
- name = "PublicIPAddress"
- public_ip_address_id = "${azurerm_public_ip.test.id}"
- }
-}
-```
-
-## Argument Reference
-
-The following arguments are supported:
-
-* `name` - (Required) Specifies the name of the LoadBalancer.
-* `resource_group_name` - (Required) The name of the resource group in which to create the LoadBalancer.
-* `location` - (Required) Specifies the supported Azure location where the resource exists.
-* `frontend_ip_configuration` - (Optional) A frontend ip configuration block as documented below.
-* `tags` - (Optional) A mapping of tags to assign to the resource.
-
-`frontend_ip_configuration` supports the following:
-
-* `name` - (Required) Specifies the name of the frontend ip configuration.
-* `subnet_id` - (Optional) Reference to subnet associated with the IP Configuration.
-* `private_ip_address` - (Optional) Private IP Address to assign to the Load Balancer. The last one and first four IPs in any range are reserved and cannot be manually assigned.
-* `private_ip_address_allocation` - (Optional) Defines how a private IP address is assigned. Options are Static or Dynamic.
-* `public_ip_address_id` - (Optional) Reference to Public IP address to be associated with the Load Balancer.
-
-## Attributes Reference
-
-The following attributes are exported:
-
-* `id` - The LoadBalancer ID.
-* `private_ip_address` - The private IP address assigned to the load balancer, if any.
-
-## Import
-
-Load Balancers can be imported using the `resource id`, e.g.
-
-```
-terraform import azurerm_lb.test /subscriptions/00000000-0000-0000-0000-000000000000/resourceGroups/group1/providers/Microsoft.Network/loadBalancers/lb1
-```
-
diff --git a/website/source/docs/providers/azurerm/r/loadbalancer_backend_address_pool.html.markdown b/website/source/docs/providers/azurerm/r/loadbalancer_backend_address_pool.html.markdown
deleted file mode 100644
index 307fc29ad..000000000
--- a/website/source/docs/providers/azurerm/r/loadbalancer_backend_address_pool.html.markdown
+++ /dev/null
@@ -1,68 +0,0 @@
----
-layout: "azurerm"
-page_title: "Azure Resource Manager: azurerm_lb_backend_address_pool"
-sidebar_current: "docs-azurerm-resource-loadbalancer-backend-address-pool"
-description: |-
- Create a LoadBalancer Backend Address Pool.
----
-
-# azurerm\_lb\_backend\_address\_pool
-
-Create a LoadBalancer Backend Address Pool.
-
-~> **NOTE When using this resource, the LoadBalancer needs to have a FrontEnd IP Configuration Attached
-
-## Example Usage
-
-```hcl
-resource "azurerm_resource_group" "test" {
- name = "LoadBalancerRG"
- location = "West US"
-}
-
-resource "azurerm_public_ip" "test" {
- name = "PublicIPForLB"
- location = "West US"
- resource_group_name = "${azurerm_resource_group.test.name}"
- public_ip_address_allocation = "static"
-}
-
-resource "azurerm_lb" "test" {
- name = "TestLoadBalancer"
- location = "West US"
- resource_group_name = "${azurerm_resource_group.test.name}"
-
- frontend_ip_configuration {
- name = "PublicIPAddress"
- public_ip_address_id = "${azurerm_public_ip.test.id}"
- }
-}
-
-resource "azurerm_lb_backend_address_pool" "test" {
- resource_group_name = "${azurerm_resource_group.test.name}"
- loadbalancer_id = "${azurerm_lb.test.id}"
- name = "BackEndAddressPool"
-}
-```
-
-## Argument Reference
-
-The following arguments are supported:
-
-* `name` - (Required) Specifies the name of the Backend Address Pool.
-* `resource_group_name` - (Required) The name of the resource group in which to create the resource.
-* `loadbalancer_id` - (Required) The ID of the LoadBalancer in which to create the Backend Address Pool.
-
-## Attributes Reference
-
-The following attributes are exported:
-
-* `id` - The ID of the LoadBalancer to which the resource is attached.
-
-## Import
-
-Load Balancer Backend Address Pools can be imported using the `resource id`, e.g.
-
-```
-terraform import azurerm_lb_backend_address_pool.test /subscriptions/00000000-0000-0000-0000-000000000000/resourceGroups/group1/providers/Microsoft.Network/loadBalancers/lb1/backendAddressPools/pool1
-```
diff --git a/website/source/docs/providers/azurerm/r/loadbalancer_nat_pool.html.markdown b/website/source/docs/providers/azurerm/r/loadbalancer_nat_pool.html.markdown
deleted file mode 100644
index 6e34d5a76..000000000
--- a/website/source/docs/providers/azurerm/r/loadbalancer_nat_pool.html.markdown
+++ /dev/null
@@ -1,78 +0,0 @@
----
-layout: "azurerm"
-page_title: "Azure Resource Manager: azurerm_lb_nat_pool"
-sidebar_current: "docs-azurerm-resource-loadbalancer-nat-pool"
-description: |-
- Create a LoadBalancer NAT Pool.
----
-
-# azurerm\_lb\_nat\_pool
-
-Create a LoadBalancer NAT pool.
-
-~> **NOTE When using this resource, the LoadBalancer needs to have a FrontEnd IP Configuration Attached
-
-## Example Usage
-
-```hcl
-resource "azurerm_resource_group" "test" {
- name = "LoadBalancerRG"
- location = "West US"
-}
-
-resource "azurerm_public_ip" "test" {
- name = "PublicIPForLB"
- location = "West US"
- resource_group_name = "${azurerm_resource_group.test.name}"
- public_ip_address_allocation = "static"
-}
-
-resource "azurerm_lb" "test" {
- name = "TestLoadBalancer"
- location = "West US"
- resource_group_name = "${azurerm_resource_group.test.name}"
-
- frontend_ip_configuration {
- name = "PublicIPAddress"
- public_ip_address_id = "${azurerm_public_ip.test.id}"
- }
-}
-
-resource "azurerm_lb_nat_pool" "test" {
- resource_group_name = "${azurerm_resource_group.test.name}"
- loadbalancer_id = "${azurerm_lb.test.id}"
- name = "SampleApplication Pool"
- protocol = "Tcp"
- frontend_port_start = 80
- frontend_port_end = 81
- backend_port = 8080
- frontend_ip_configuration_name = "PublicIPAddress"
-}
-```
-
-## Argument Reference
-
-The following arguments are supported:
-
-* `name` - (Required) Specifies the name of the NAT pool.
-* `resource_group_name` - (Required) The name of the resource group in which to create the resource.
-* `loadbalancer_id` - (Required) The ID of the LoadBalancer in which to create the NAT pool.
-* `frontend_ip_configuration_name` - (Required) The name of the frontend IP configuration exposing this rule.
-* `protocol` - (Required) The transport protocol for the external endpoint. Possible values are `Udp` or `Tcp`.
-* `frontend_port_start` - (Required) The first port number in the range of external ports that will be used to provide Inbound Nat to NICs associated with this Load Balancer. Possible values range between 1 and 65534, inclusive.
-* `frontend_port_end` - (Required) The last port number in the range of external ports that will be used to provide Inbound Nat to NICs associated with this Load Balancer. Possible values range between 1 and 65534, inclusive.
-* `backend_port` - (Required) The port used for the internal endpoint. Possible values range between 1 and 65535, inclusive.
-
-## Attributes Reference
-
-The following attributes are exported:
-
-* `id` - The ID of the LoadBalancer to which the resource is attached.
-
-## Import
-
-Load Balancer NAT Pools can be imported using the `resource id`, e.g.
-
-```
-terraform import azurerm_lb_nat_pool.test /subscriptions/00000000-0000-0000-0000-000000000000/resourceGroups/group1/providers/Microsoft.Network/loadBalancers/lb1/inboundNatPools/pool1
-```
diff --git a/website/source/docs/providers/azurerm/r/loadbalancer_nat_rule.html.markdown b/website/source/docs/providers/azurerm/r/loadbalancer_nat_rule.html.markdown
deleted file mode 100644
index 49f87f58e..000000000
--- a/website/source/docs/providers/azurerm/r/loadbalancer_nat_rule.html.markdown
+++ /dev/null
@@ -1,76 +0,0 @@
----
-layout: "azurerm"
-page_title: "Azure Resource Manager: azurerm_lb_nat_rule"
-sidebar_current: "docs-azurerm-resource-loadbalancer-nat-rule"
-description: |-
- Create a LoadBalancer NAT Rule.
----
-
-# azurerm\_lb\_nat\_rule
-
-Create a LoadBalancer NAT Rule.
-
-~> **NOTE When using this resource, the LoadBalancer needs to have a FrontEnd IP Configuration Attached
-
-## Example Usage
-
-```hcl
-resource "azurerm_resource_group" "test" {
- name = "LoadBalancerRG"
- location = "West US"
-}
-
-resource "azurerm_public_ip" "test" {
- name = "PublicIPForLB"
- location = "West US"
- resource_group_name = "${azurerm_resource_group.test.name}"
- public_ip_address_allocation = "static"
-}
-
-resource "azurerm_lb" "test" {
- name = "TestLoadBalancer"
- location = "West US"
- resource_group_name = "${azurerm_resource_group.test.name}"
-
- frontend_ip_configuration {
- name = "PublicIPAddress"
- public_ip_address_id = "${azurerm_public_ip.test.id}"
- }
-}
-
-resource "azurerm_lb_nat_rule" "test" {
- resource_group_name = "${azurerm_resource_group.test.name}"
- loadbalancer_id = "${azurerm_lb.test.id}"
- name = "RDP Access"
- protocol = "Tcp"
- frontend_port = 3389
- backend_port = 3389
- frontend_ip_configuration_name = "PublicIPAddress"
-}
-```
-
-## Argument Reference
-
-The following arguments are supported:
-
-* `name` - (Required) Specifies the name of the NAT Rule.
-* `resource_group_name` - (Required) The name of the resource group in which to create the resource.
-* `loadbalancer_id` - (Required) The ID of the LoadBalancer in which to create the NAT Rule.
-* `frontend_ip_configuration_name` - (Required) The name of the frontend IP configuration exposing this rule.
-* `protocol` - (Required) The transport protocol for the external endpoint. Possible values are `Udp` or `Tcp`.
-* `frontend_port` - (Required) The port for the external endpoint. Port numbers for each Rule must be unique within the Load Balancer. Possible values range between 1 and 65534, inclusive.
-* `backend_port` - (Required) The port used for internal connections on the endpoint. Possible values range between 1 and 65535, inclusive.
-
-## Attributes Reference
-
-The following attributes are exported:
-
-* `id` - The ID of the LoadBalancer to which the resource is attached.
-
-## Import
-
-Load Balancer NAT Rules can be imported using the `resource id`, e.g.
-
-```
-terraform import azurerm_lb_nat_rule.test /subscriptions/00000000-0000-0000-0000-000000000000/resourceGroups/group1/providers/Microsoft.Network/loadBalancers/lb1/inboundNatRules/rule1
-```
diff --git a/website/source/docs/providers/azurerm/r/loadbalancer_probe.html.markdown b/website/source/docs/providers/azurerm/r/loadbalancer_probe.html.markdown
deleted file mode 100644
index 05339951e..000000000
--- a/website/source/docs/providers/azurerm/r/loadbalancer_probe.html.markdown
+++ /dev/null
@@ -1,75 +0,0 @@
----
-layout: "azurerm"
-page_title: "Azure Resource Manager: azurerm_lb_probe"
-sidebar_current: "docs-azurerm-resource-loadbalancer-probe"
-description: |-
- Create a LoadBalancer Probe Resource.
----
-
-# azurerm\_lb\_probe
-
-Create a LoadBalancer Probe Resource.
-
-~> **NOTE When using this resource, the LoadBalancer needs to have a FrontEnd IP Configuration Attached
-
-## Example Usage
-
-```hcl
-resource "azurerm_resource_group" "test" {
- name = "LoadBalancerRG"
- location = "West US"
-}
-
-resource "azurerm_public_ip" "test" {
- name = "PublicIPForLB"
- location = "West US"
- resource_group_name = "${azurerm_resource_group.test.name}"
- public_ip_address_allocation = "static"
-}
-
-resource "azurerm_lb" "test" {
- name = "TestLoadBalancer"
- location = "West US"
- resource_group_name = "${azurerm_resource_group.test.name}"
-
- frontend_ip_configuration {
- name = "PublicIPAddress"
- public_ip_address_id = "${azurerm_public_ip.test.id}"
- }
-}
-
-resource "azurerm_lb_probe" "test" {
- resource_group_name = "${azurerm_resource_group.test.name}"
- loadbalancer_id = "${azurerm_lb.test.id}"
- name = "ssh-running-probe"
- port = 22
-}
-```
-
-## Argument Reference
-
-The following arguments are supported:
-
-* `name` - (Required) Specifies the name of the Probe.
-* `resource_group_name` - (Required) The name of the resource group in which to create the resource.
-* `loadbalancer_id` - (Required) The ID of the LoadBalancer in which to create the NAT Rule.
-* `protocol` - (Optional) Specifies the protocol of the end point. Possible values are `Http` or `Tcp`. If Tcp is specified, a received ACK is required for the probe to be successful. If Http is specified, a 200 OK response from the specified URI is required for the probe to be successful.
-* `port` - (Required) Port on which the Probe queries the backend endpoint. Possible values range from 1 to 65535, inclusive.
-* `request_path` - (Optional) The URI used for requesting health status from the backend endpoint. Required if protocol is set to Http. Otherwise, it is not allowed.
-* `interval_in_seconds` - (Optional) The interval, in seconds between probes to the backend endpoint for health status. The default value is 15, the minimum value is 5.
-* `number_of_probes` - (Optional) The number of failed probe attempts after which the backend endpoint is removed from rotation. The default value is 2. NumberOfProbes multiplied by intervalInSeconds value must be greater or equal to 10.Endpoints are returned to rotation when at least one probe is successful.
-
-
-## Attributes Reference
-
-The following attributes are exported:
-
-* `id` - The ID of the LoadBalancer to which the resource is attached.
-
-## Import
-
-Load Balancer Probes can be imported using the `resource id`, e.g.
-
-```
-terraform import azurerm_lb_probe.test /subscriptions/00000000-0000-0000-0000-000000000000/resourceGroups/group1/providers/Microsoft.Network/loadBalancers/lb1/probes/probe1
-```
diff --git a/website/source/docs/providers/azurerm/r/loadbalancer_rule.html.markdown b/website/source/docs/providers/azurerm/r/loadbalancer_rule.html.markdown
deleted file mode 100644
index cd8cf97b4..000000000
--- a/website/source/docs/providers/azurerm/r/loadbalancer_rule.html.markdown
+++ /dev/null
@@ -1,81 +0,0 @@
----
-layout: "azurerm"
-page_title: "Azure Resource Manager: azurerm_lb_rule"
-sidebar_current: "docs-azurerm-resource-loadbalancer-rule"
-description: |-
- Create a LoadBalancer Rule.
----
-
-# azurerm\_lb\_rule
-
-Create a LoadBalancer Rule.
-
-~> **NOTE When using this resource, the LoadBalancer needs to have a FrontEnd IP Configuration Attached
-
-## Example Usage
-
-```hcl
-resource "azurerm_resource_group" "test" {
- name = "LoadBalancerRG"
- location = "West US"
-}
-
-resource "azurerm_public_ip" "test" {
- name = "PublicIPForLB"
- location = "West US"
- resource_group_name = "${azurerm_resource_group.test.name}"
- public_ip_address_allocation = "static"
-}
-
-resource "azurerm_lb" "test" {
- name = "TestLoadBalancer"
- location = "West US"
- resource_group_name = "${azurerm_resource_group.test.name}"
-
- frontend_ip_configuration {
- name = "PublicIPAddress"
- public_ip_address_id = "${azurerm_public_ip.test.id}"
- }
-}
-
-resource "azurerm_lb_rule" "test" {
- resource_group_name = "${azurerm_resource_group.test.name}"
- loadbalancer_id = "${azurerm_lb.test.id}"
- name = "LBRule"
- protocol = "Tcp"
- frontend_port = 3389
- backend_port = 3389
- frontend_ip_configuration_name = "PublicIPAddress"
-}
-```
-
-## Argument Reference
-
-The following arguments are supported:
-
-* `name` - (Required) Specifies the name of the LB Rule.
-* `resource_group_name` - (Required) The name of the resource group in which to create the resource.
-* `loadbalancer_id` - (Required) The ID of the LoadBalancer in which to create the Rule.
-* `frontend_ip_configuration_name` - (Required) The name of the frontend IP configuration to which the rule is associated.
-* `protocol` - (Required) The transport protocol for the external endpoint. Possible values are `Udp` or `Tcp`.
-* `frontend_port` - (Required) The port for the external endpoint. Port numbers for each Rule must be unique within the Load Balancer. Possible values range between 1 and 65534, inclusive.
-* `backend_port` - (Required) The port used for internal connections on the endpoint. Possible values range between 1 and 65535, inclusive.
-* `backend_address_pool_id` - (Optional) A reference to a Backend Address Pool over which this Load Balancing Rule operates.
-* `probe_id` - (Optional) A reference to a Probe used by this Load Balancing Rule.
-* `enable_floating_ip` - (Optional) Floating IP is pertinent to failover scenarios: a "floating” IP is reassigned to a secondary server in case the primary server fails. Floating IP is required for SQL AlwaysOn.
-* `idle_timeout_in_minutes` - (Optional) Specifies the timeout for the Tcp idle connection. The value can be set between 4 and 30 minutes. The default value is 4 minutes. This element is only used when the protocol is set to Tcp.
-* `load_distribution` - (Optional) Specifies the load balancing distribution type to be used by the Load Balancer. Possible values are: Default – The load balancer is configured to use a 5 tuple hash to map traffic to available servers. SourceIP – The load balancer is configured to use a 2 tuple hash to map traffic to available servers. SourceIPProtocol – The load balancer is configured to use a 3 tuple hash to map traffic to available servers.
-
-## Attributes Reference
-
-The following attributes are exported:
-
-* `id` - The ID of the LoadBalancer to which the resource is attached.
-
-## Import
-
-Load Balancer Rules can be imported using the `resource id`, e.g.
-
-```
-terraform import azurerm_lb_rule.test /subscriptions/00000000-0000-0000-0000-000000000000/resourceGroups/group1/providers/Microsoft.Network/loadBalancers/lb1/loadBalancingRules/rule1
-```
diff --git a/website/source/docs/providers/azurerm/r/local_network_gateway.html.markdown b/website/source/docs/providers/azurerm/r/local_network_gateway.html.markdown
deleted file mode 100644
index b70e99741..000000000
--- a/website/source/docs/providers/azurerm/r/local_network_gateway.html.markdown
+++ /dev/null
@@ -1,56 +0,0 @@
----
-layout: "azurerm"
-page_title: "Azure Resource Manager: azurerm_local_network_gateway"
-sidebar_current: "docs-azurerm-resource-local-network-gateway"
-description: |-
- Creates a new local network gateway connection over which specific connections can be configured.
----
-
-# azurerm\_local\_network\_gateway
-
-Creates a new local network gateway connection over which specific connections can be configured.
-
-## Example Usage
-
-```hcl
-resource "azurerm_local_network_gateway" "home" {
- name = "backHome"
- resource_group_name = "${azurerm_resource_group.test.name}"
- location = "${azurerm_resource_group.test.location}"
- gateway_address = "12.13.14.15"
- address_space = ["10.0.0.0/16"]
-}
-```
-
-## Argument Reference
-
-The following arguments are supported:
-
-* `name` - (Required) The name of the local network gateway. Changing this
- forces a new resource to be created.
-
-* `resource_group_name` - (Required) The name of the resource group in which to
- create the local network gateway.
-
-* `location` - (Required) The location/region where the local network gatway is
- created. Changing this forces a new resource to be created.
-
-* `gateway_address` - (Required) The IP address of the gateway to which to
- connect.
-
-* `address_space` - (Required) The list of string CIDRs representing the
- address spaces the gateway exposes.
-
-## Attributes Reference
-
-The following attributes are exported:
-
-* `id` - The local network gateway unique ID within Azure.
-
-## Import
-
-Local Network Gateways can be imported using the `resource id`, e.g.
-
-```
-terraform import azurerm_local_network_gateway.lng1 /subscriptions/00000000-0000-0000-0000-000000000000/resourceGroups/mygroup1/providers/Microsoft.Network/localNetworkGateways/lng1
-```
diff --git a/website/source/docs/providers/azurerm/r/managed_disk.html.markdown b/website/source/docs/providers/azurerm/r/managed_disk.html.markdown
deleted file mode 100644
index 1337d94c1..000000000
--- a/website/source/docs/providers/azurerm/r/managed_disk.html.markdown
+++ /dev/null
@@ -1,110 +0,0 @@
----
-layout: "azurerm"
-page_title: "Azure Resource Manager: azurerm_managed_disk"
-sidebar_current: "docs-azurerm-resource-managed-disk"
-description: |-
- Create a Managed Disk.
----
-
-# azurerm\_managed\_disk
-
-Create a managed disk.
-
-## Example Usage with Create Empty
-
-```hcl
-resource "azurerm_resource_group" "test" {
- name = "acctestrg"
- location = "West US 2"
-}
-
-resource "azurerm_managed_disk" "test" {
- name = "acctestmd"
- location = "West US 2"
- resource_group_name = "${azurerm_resource_group.test.name}"
- storage_account_type = "Standard_LRS"
- create_option = "Empty"
- disk_size_gb = "1"
-
- tags {
- environment = "staging"
- }
-}
-```
-
-## Example Usage with Create Copy
-
-```hcl
-resource "azurerm_resource_group" "test" {
- name = "acctestrg"
- location = "West US 2"
-}
-
-resource "azurerm_managed_disk" "source" {
- name = "acctestmd1"
- location = "West US 2"
- resource_group_name = "${azurerm_resource_group.test.name}"
- storage_account_type = "Standard_LRS"
- create_option = "Empty"
- disk_size_gb = "1"
-
- tags {
- environment = "staging"
- }
-}
-
-resource "azurerm_managed_disk" "copy" {
- name = "acctestmd2"
- location = "West US 2"
- resource_group_name = "${azurerm_resource_group.test.name}"
- storage_account_type = "Standard_LRS"
- create_option = "Copy"
- source_resource_id = "${azurerm_managed_disk.source.id}"
- disk_size_gb = "1"
-
- tags {
- environment = "staging"
- }
-}
-```
-
-## Argument Reference
-
-The following arguments are supported:
-
-* `name` - (Required) Specifies the name of the managed disk. Changing this forces a
- new resource to be created.
-* `resource_group_name` - (Required) The name of the resource group in which to create
- the managed disk.
-* `location` - (Required) Specified the supported Azure location where the resource exists.
- Changing this forces a new resource to be created.
-* `storage_account_type` - (Required) The type of storage to use for the managed disk.
- Allowable values are `Standard_LRS` or `Premium_LRS`.
-* `create_option` - (Required) The method to use when creating the managed disk.
- * `Import` - Import a VHD file in to the managed disk (VHD specified with `source_uri`).
- * `Empty` - Create an empty managed disk.
- * `Copy` - Copy an existing managed disk or snapshot (specified with `source_resource_id`).
-* `source_uri` - (Optional) URI to a valid VHD file to be used when `create_option` is `Import`.
-* `source_resource_id` - (Optional) ID of an existing managed disk to copy when `create_option` is `Copy`.
-* `os_type` - (Optional) Specify a value when the source of an `Import` or `Copy`
- operation targets a source that contains an operating system. Valid values are `Linux` or `Windows`
-* `disk_size_gb` - (Required) Specifies the size of the managed disk to create in gigabytes.
- If `create_option` is `Copy`, then the value must be equal to or greater than the source's size.
-* `tags` - (Optional) A mapping of tags to assign to the resource.
-
-For more information on managed disks, such as sizing options and pricing, please check out the
-[azure documentation](https://docs.microsoft.com/en-us/azure/storage/storage-managed-disks-overview).
-
-## Attributes Reference
-
-The following attributes are exported:
-
-* `id` - The managed disk ID.
-
-## Import
-
-Managed Disks can be imported using the `resource id`, e.g.
-
-```
-terraform import azurerm_managed_disk.test /subscriptions/00000000-0000-0000-0000-000000000000/resourceGroups/mygroup1/providers/microsoft.compute/disks/manageddisk1
-```
diff --git a/website/source/docs/providers/azurerm/r/network_interface.html.markdown b/website/source/docs/providers/azurerm/r/network_interface.html.markdown
deleted file mode 100644
index 82e925ac5..000000000
--- a/website/source/docs/providers/azurerm/r/network_interface.html.markdown
+++ /dev/null
@@ -1,111 +0,0 @@
----
-layout: "azurerm"
-page_title: "Azure Resource Manager: azure_network_interface"
-sidebar_current: "docs-azurerm-resource-network-interface"
-description: |-
- Manages the Network Interface cards that link the Virtual Machines and Virtual Network.
----
-
-# azurerm\_network\_interface
-
-Network interface cards are virtual network cards that form the link between virtual machines and the virtual network
-
-## Example Usage
-
-```hcl
-resource "azurerm_resource_group" "test" {
- name = "acceptanceTestResourceGroup1"
- location = "West US"
-}
-
-resource "azurerm_virtual_network" "test" {
- name = "acceptanceTestVirtualNetwork1"
- address_space = ["10.0.0.0/16"]
- location = "West US"
- resource_group_name = "${azurerm_resource_group.test.name}"
-}
-
-resource "azurerm_subnet" "test" {
- name = "testsubnet"
- resource_group_name = "${azurerm_resource_group.test.name}"
- virtual_network_name = "${azurerm_virtual_network.test.name}"
- address_prefix = "10.0.2.0/24"
-}
-
-resource "azurerm_network_interface" "test" {
- name = "acceptanceTestNetworkInterface1"
- location = "West US"
- resource_group_name = "${azurerm_resource_group.test.name}"
-
- ip_configuration {
- name = "testconfiguration1"
- subnet_id = "${azurerm_subnet.test.id}"
- private_ip_address_allocation = "dynamic"
- }
-
- tags {
- environment = "staging"
- }
-}
-```
-
-## Argument Reference
-
-The following arguments are supported:
-
-* `name` - (Required) The name of the network interface. Changing this forces a
- new resource to be created.
-
-* `resource_group_name` - (Required) The name of the resource group in which to
- create the network interface.
-
-* `location` - (Required) The location/region where the network interface is
- created. Changing this forces a new resource to be created.
-
-* `network_security_group_id` - (Optional) The ID of the Network Security Group to associate with
- the network interface.
-
-* `internal_dns_name_label` - (Optional) Relative DNS name for this NIC used for internal communications between VMs in the same VNet
-
-* `enable_ip_forwarding` - (Optional) Enables IP Forwarding on the NIC. Defaults to `false`.
-
-* `dns_servers` - (Optional) List of DNS servers IP addresses to use for this NIC, overrides the VNet-level server list
-
-* `ip_configuration` - (Required) Collection of ipConfigurations associated with this NIC. Each `ip_configuration` block supports fields documented below.
-
-* `tags` - (Optional) A mapping of tags to assign to the resource.
-
-The `ip_configuration` block supports:
-
-* `name` - (Required) User-defined name of the IP.
-
-* `subnet_id` - (Required) Reference to a subnet in which this NIC has been created.
-
-* `private_ip_address` - (Optional) Static IP Address.
-
-* `private_ip_address_allocation` - (Required) Defines how a private IP address is assigned. Options are Static or Dynamic.
-
-* `public_ip_address_id` - (Optional) Reference to a Public IP Address to associate with this NIC
-
-* `load_balancer_backend_address_pools_ids` - (Optional) List of Load Balancer Backend Address Pool IDs references to which this NIC belongs
-
-* `load_balancer_inbound_nat_rules_ids` - (Optional) List of Load Balancer Inbound Nat Rules IDs involving this NIC
-
-## Attributes Reference
-
-The following attributes are exported:
-
-* `id` - The virtual NetworkConfiguration ID.
-* `mac_address` - The media access control (MAC) address of the network interface.
-* `private_ip_address` - The private ip address of the network interface.
-* `virtual_machine_id` - Reference to a VM with which this NIC has been associated.
-* `applied_dns_servers` - If the VM that uses this NIC is part of an Availability Set, then this list will have the union of all DNS servers from all NICs that are part of the Availability Set
-* `internal_fqdn` - Fully qualified DNS name supporting internal communications between VMs in the same VNet
-
-## Import
-
-Network Interfaces can be imported using the `resource id`, e.g.
-
-```
-terraform import azurerm_network_interface.test /subscriptions/00000000-0000-0000-0000-000000000000/resourceGroups/mygroup1/providers/microsoft.network/networkInterfaces/nic1
-```
\ No newline at end of file
diff --git a/website/source/docs/providers/azurerm/r/network_security_group.html.markdown b/website/source/docs/providers/azurerm/r/network_security_group.html.markdown
deleted file mode 100644
index 1e49cfc0c..000000000
--- a/website/source/docs/providers/azurerm/r/network_security_group.html.markdown
+++ /dev/null
@@ -1,98 +0,0 @@
----
-layout: "azurerm"
-page_title: "Azure Resource Manager: azurerm_network_security_group"
-sidebar_current: "docs-azurerm-resource-network-security-group"
-description: |-
- Create a network security group that contains a list of network security rules. Network security groups enable inbound or outbound traffic to be enabled or denied.
----
-
-# azurerm\_security\_group
-
-Create a network security group that contains a list of network security rules.
-
-## Example Usage
-
-```hcl
-resource "azurerm_resource_group" "test" {
- name = "acceptanceTestResourceGroup1"
- location = "West US"
-}
-
-resource "azurerm_network_security_group" "test" {
- name = "acceptanceTestSecurityGroup1"
- location = "West US"
- resource_group_name = "${azurerm_resource_group.test.name}"
-
- security_rule {
- name = "test123"
- priority = 100
- direction = "Inbound"
- access = "Allow"
- protocol = "Tcp"
- source_port_range = "*"
- destination_port_range = "*"
- source_address_prefix = "*"
- destination_address_prefix = "*"
- }
-
- tags {
- environment = "Production"
- }
-}
-```
-
-## Argument Reference
-
-The following arguments are supported:
-
-* `name` - (Required) Specifies the name of the network security group. Changing this forces a
- new resource to be created.
-
-* `resource_group_name` - (Required) The name of the resource group in which to
- create the availability set.
-
-* `location` - (Required) Specifies the supported Azure location where the resource exists. Changing this forces a new resource to be created.
-
-* `security_rule` - (Optional) Can be specified multiple times to define multiple
- security rules. Each `security_rule` block supports fields documented below.
-
-* `tags` - (Optional) A mapping of tags to assign to the resource.
-
-
-The `security_rule` block supports:
-
-* `name` - (Required) The name of the security rule.
-
-* `description` - (Optional) A description for this rule. Restricted to 140 characters.
-
-* `protocol` - (Required) Network protocol this rule applies to. Can be Tcp, Udp or * to match both.
-
-* `source_port_range` - (Required) Source Port or Range. Integer or range between 0 and 65535 or * to match any.
-
-* `destination_port_range` - (Required) Destination Port or Range. Integer or range between 0 and 65535 or * to match any.
-
-* `source_address_prefix` - (Required) CIDR or source IP range or * to match any IP. Tags such as ‘VirtualNetwork’, ‘AzureLoadBalancer’ and ‘Internet’ can also be used.
-
-* `destination_address_prefix` - (Required) CIDR or destination IP range or * to match any IP. Tags such as ‘VirtualNetwork’, ‘AzureLoadBalancer’ and ‘Internet’ can also be used.
-
-* `access` - (Required) Specifies whether network traffic is allowed or denied. Possible values are "Allow” and "Deny”.
-
-* `priority` - (Required) Specifies the priority of the rule. The value can be between 100 and 4096. The priority number must be unique for each rule in the collection. The lower the priority number, the higher the priority of the rule.
-
-* `direction` - (Required) The direction specifies if rule will be evaluated on incoming or outgoing traffic. Possible values are "Inbound” and "Outbound”.
-
-
-## Attributes Reference
-
-The following attributes are exported:
-
-* `id` - The Network Security Group ID.
-
-
-## Import
-
-Network Security Groups can be imported using the `resource id`, e.g.
-
-```
-terraform import azurerm_network_security_group.group1 /subscriptions/00000000-0000-0000-0000-000000000000/resourceGroups/mygroup1/providers/Microsoft.Network/networkSecurityGroups/mySecurityGroup
-```
diff --git a/website/source/docs/providers/azurerm/r/network_security_rule.html.markdown b/website/source/docs/providers/azurerm/r/network_security_rule.html.markdown
deleted file mode 100644
index c30506706..000000000
--- a/website/source/docs/providers/azurerm/r/network_security_rule.html.markdown
+++ /dev/null
@@ -1,84 +0,0 @@
----
-layout: "azurerm"
-page_title: "Azure Resource Manager: azurerm_network_security_rule"
-sidebar_current: "docs-azurerm-resource-network-security-rule"
-description: |-
- Create a Network Security Rule.
----
-
-# azurerm\_network\_security\_rule
-
-Create a Network Security Rule.
-
-## Example Usage
-
-```hcl
-resource "azurerm_resource_group" "test" {
- name = "acceptanceTestResourceGroup1"
- location = "West US"
-}
-
-resource "azurerm_network_security_group" "test" {
- name = "acceptanceTestSecurityGroup1"
- location = "West US"
- resource_group_name = "${azurerm_resource_group.test.name}"
-}
-
-resource "azurerm_network_security_rule" "test" {
- name = "test123"
- priority = 100
- direction = "Outbound"
- access = "Allow"
- protocol = "Tcp"
- source_port_range = "*"
- destination_port_range = "*"
- source_address_prefix = "*"
- destination_address_prefix = "*"
- resource_group_name = "${azurerm_resource_group.test.name}"
- network_security_group_name = "${azurerm_network_security_group.test.name}"
-}
-```
-
-## Argument Reference
-
-The following arguments are supported:
-
-* `name` - (Required) The name of the security rule.
-
-* `resource_group_name` - (Required) The name of the resource group in which to
- create the Network Security Rule.
-
-* `network_security_group_name` - (Required) The name of the Network Security Group that we want to attach the rule to.
-
-* `description` - (Optional) A description for this rule. Restricted to 140 characters.
-
-* `protocol` - (Required) Network protocol this rule applies to. Can be Tcp, Udp or * to match both.
-
-* `source_port_range` - (Required) Source Port or Range. Integer or range between 0 and 65535 or * to match any.
-
-* `destination_port_range` - (Required) Destination Port or Range. Integer or range between 0 and 65535 or * to match any.
-
-* `source_address_prefix` - (Required) CIDR or source IP range or * to match any IP. Tags such as ‘VirtualNetwork’, ‘AzureLoadBalancer’ and ‘Internet’ can also be used.
-
-* `destination_address_prefix` - (Required) CIDR or destination IP range or * to match any IP. Tags such as ‘VirtualNetwork’, ‘AzureLoadBalancer’ and ‘Internet’ can also be used.
-
-* `access` - (Required) Specifies whether network traffic is allowed or denied. Possible values are "Allow” and "Deny”.
-
-* `priority` - (Required) Specifies the priority of the rule. The value can be between 100 and 4096. The priority number must be unique for each rule in the collection. The lower the priority number, the higher the priority of the rule.
-
-* `direction` - (Required) The direction specifies if rule will be evaluated on incoming or outgoing traffic. Possible values are "Inbound” and "Outbound”.
-
-## Attributes Reference
-
-The following attributes are exported:
-
-* `id` - The Network Security Rule ID.
-
-
-## Import
-
-Network Security Rules can be imported using the `resource id`, e.g.
-
-```
-terraform import azurerm_network_security_rule.rule1 /subscriptions/00000000-0000-0000-0000-000000000000/resourceGroups/mygroup1/providers/Microsoft.Network/networkSecurityGroups/mySecurityGroup/securityRules/rule1
-```
\ No newline at end of file
diff --git a/website/source/docs/providers/azurerm/r/public_ip.html.markdown b/website/source/docs/providers/azurerm/r/public_ip.html.markdown
deleted file mode 100644
index e83df9321..000000000
--- a/website/source/docs/providers/azurerm/r/public_ip.html.markdown
+++ /dev/null
@@ -1,70 +0,0 @@
----
-layout: "azurerm"
-page_title: "Azure Resource Manager: azurerm_public_ip"
-sidebar_current: "docs-azurerm-resource-network-public-ip"
-description: |-
- Create a Public IP Address.
----
-
-# azurerm\_public\_ip
-
-Create a Public IP Address.
-
-## Example Usage
-
-```hcl
-resource "azurerm_resource_group" "test" {
- name = "resourceGroup1"
- location = "West US"
-}
-
-resource "azurerm_public_ip" "test" {
- name = "acceptanceTestPublicIp1"
- location = "West US"
- resource_group_name = "${azurerm_resource_group.test.name}"
- public_ip_address_allocation = "static"
-
- tags {
- environment = "Production"
- }
-}
-```
-
-## Argument Reference
-
-The following arguments are supported:
-
-* `name` - (Required) Specifies the name of the Public IP resource . Changing this forces a
- new resource to be created.
-
-* `resource_group_name` - (Required) The name of the resource group in which to
- create the public ip.
-
-* `location` - (Required) Specifies the supported Azure location where the resource exists. Changing this forces a new resource to be created.
-
-* `public_ip_address_allocation` - (Required) Defines whether the IP address is stable or dynamic. Options are Static or Dynamic.
-
-* `idle_timeout_in_minutes` - (Optional) Specifies the timeout for the TCP idle connection. The value can be set between 4 and 30 minutes.
-
-* `domain_name_label` - (Optional) Label for the Domain Name. Will be used to make up the FQDN. If a domain name label is specified, an A DNS record is created for the public IP in the Microsoft Azure DNS system.
-
-* `reverse_fqdn` - (Optional) A fully qualified domain name that resolves to this public IP address. If the reverseFqdn is specified, then a PTR DNS record is created pointing from the IP address in the in-addr.arpa domain to the reverse FQDN.
-
-* `tags` - (Optional) A mapping of tags to assign to the resource.
-
-## Attributes Reference
-
-The following attributes are exported:
-
-* `id` - The Public IP ID.
-* `ip_address` - The IP address value that was allocated.
-* `fqdn` - Fully qualified domain name of the A DNS record associated with the public IP. This is the concatenation of the domainNameLabel and the regionalized DNS zone
-
-
-## Import
-
-Public IPs can be imported using the `resource id`, e.g.
-
-```
-terraform import azurerm_public_ip.myPublicIp /subscriptions/00000000-0000-0000-0000-000000000000/resourceGroups/mygroup1/providers/Microsoft.Network/publicIPAddresses/myPublicIpAddress1
-```
\ No newline at end of file
diff --git a/website/source/docs/providers/azurerm/r/redis_cache.html.markdown b/website/source/docs/providers/azurerm/r/redis_cache.html.markdown
deleted file mode 100644
index ddb904ddc..000000000
--- a/website/source/docs/providers/azurerm/r/redis_cache.html.markdown
+++ /dev/null
@@ -1,149 +0,0 @@
----
-layout: "azurerm"
-page_title: "Azure Resource Manager: azurerm_redis_cache"
-sidebar_current: "docs-azurerm-resource-redis-cache"
-description: |-
- Creates a new Redis Cache Resource
----
-
-# azurerm\_redis\_cache
-
-Creates a new Redis Cache Resource
-
-## Example Usage (Basic)
-
-```hcl
-resource "azurerm_resource_group" "test" {
- name = "acceptanceTestResourceGroup1"
- location = "West US"
-}
-
-resource "azurerm_redis_cache" "test" {
- name = "test"
- location = "${azurerm_resource_group.test.location}"
- resource_group_name = "${azurerm_resource_group.test.name}"
- capacity = 0
- family = "C"
- sku_name = "Basic"
- enable_non_ssl_port = false
-
- redis_configuration {
- maxclients = "256"
- }
-}
-```
-
-## Example Usage (Standard)
-
-```hcl
-resource "azurerm_resource_group" "test" {
- name = "acceptanceTestResourceGroup1"
- location = "West US"
-}
-
-resource "azurerm_redis_cache" "test" {
- name = "test"
- location = "${azurerm_resource_group.test.location}"
- resource_group_name = "${azurerm_resource_group.test.name}"
- capacity = 2
- family = "C"
- sku_name = "Standard"
- enable_non_ssl_port = false
-
- redis_configuration {
- maxclients = "1000"
- }
-}
-```
-
-## Example Usage (Premium with Clustering)
-
-```hcl
-resource "azurerm_resource_group" "test" {
- name = "acceptanceTestResourceGroup1"
- location = "West US"
-}
-
-resource "azurerm_redis_cache" "test" {
- name = "clustered-test"
- location = "${azurerm_resource_group.test.location}"
- resource_group_name = "${azurerm_resource_group.test.name}"
- capacity = 1
- family = "P"
- sku_name = "Premium"
- enable_non_ssl_port = false
- shard_count = 3
-
- redis_configuration {
- maxclients = "7500"
- maxmemory_reserved = "2"
- maxmemory_delta = "2"
- maxmemory_policy = "allkeys-lru"
- }
-}
-```
-
-## Argument Reference
-
-The following arguments are supported:
-
-* `name` - (Required) The name of the Redis instance. Changing this forces a
- new resource to be created.
-
-* `location` - (Required) The location of the resource group.
-
-* `resource_group_name` - (Required) The name of the resource group in which to
- create the Redis instance.
-
-* `capacity` - (Required) The size of the Redis cache to deploy. Valid values for a SKU `family` of C (Basic/Standard) are `0, 1, 2, 3, 4, 5, 6`, and for P (Premium) `family` are `1, 2, 3, 4`.
-
-* `family` - (Required) The SKU family to use. Valid values are `C` and `P`, where C = Basic/Standard, P = Premium.
-
-The pricing group for the Redis Family - either "C" or "P" at present.
-
-* `sku_name` - (Required) The SKU of Redis to use - can be either Basic, Standard or Premium.
-
-* `enable_non_ssl_port` - (Optional) Enable the non-SSL port (6789) - disabled by default.
-
-* `shard_count` - (Optional) *Only available when using the Premium SKU* The number of Shards to create on the Redis Cluster.
-
-* `redis_configuration` - (Required) Potential Redis configuration values - with some limitations by SKU - defaults/details are shown below.
-
-```hcl
-redis_configuration {
- maxclients = "512"
- maxmemory_reserve = "10"
- maxmemory_delta = "2"
- maxmemory_policy = "allkeys-lru"
-}
-```
-
-## Default Redis Configuration Values
-| Redis Value | Basic | Standard | Premium |
-| ------------------ | ------------ | ------------ | ------------ |
-| maxclients | 256 | 1000 | 7500 |
-| maxmemory_reserved | 2 | 50 | 200 |
-| maxmemory_delta | 2 | 50 | 200 |
-| maxmemory_policy | volatile-lru | volatile-lru | volatile-lru |
-
-_*Important*: The maxmemory_reserved setting is only available for Standard and Premium caches. More details are available in the Relevant Links section below._
-
-## Attributes Reference
-
-The following attributes are exported:
-
-* `id` - The Route ID.
-
-* `hostname` - The Hostname of the Redis Instance
-
-* `ssl_port` - The SSL Port of the Redis Instance
-
-* `port` - The non-SSL Port of the Redis Instance
-
-* `primary_access_key` - The Primary Access Key for the Redis Instance
-
-* `secondary_access_key` - The Secondary Access Key for the Redis Instance
-
-## Relevant Links
- - [Azure Redis Cache: SKU specific configuration limitations](https://azure.microsoft.com/en-us/documentation/articles/cache-configure/#advanced-settings)
- - [Redis: Available Configuration Settings](http://redis.io/topics/config)
diff --git a/website/source/docs/providers/azurerm/r/resource_group.html.markdown b/website/source/docs/providers/azurerm/r/resource_group.html.markdown
deleted file mode 100644
index dcb8fbb0c..000000000
--- a/website/source/docs/providers/azurerm/r/resource_group.html.markdown
+++ /dev/null
@@ -1,51 +0,0 @@
----
-layout: "azurerm"
-page_title: "Azure Resource Manager: azurerm_resource_group"
-sidebar_current: "docs-azurerm-resource-resource-group"
-description: |-
- Creates a new resource group on Azure.
----
-
-# azurerm\_resource\_group
-
-Creates a new resource group on Azure.
-
-## Example Usage
-
-```hcl
-resource "azurerm_resource_group" "test" {
- name = "testResourceGroup1"
- location = "West US"
-
- tags {
- environment = "Production"
- }
-}
-```
-
-## Argument Reference
-
-The following arguments are supported:
-
-* `name` - (Required) The name of the resource group. Must be unique on your
- Azure subscription.
-
-* `location` - (Required) The location where the resource group should be created.
- For a list of all Azure locations, please consult [this link](http://azure.microsoft.com/en-us/regions/).
-
-* `tags` - (Optional) A mapping of tags to assign to the resource.
-
-## Attributes Reference
-
-The following attributes are exported:
-
-* `id` - The resource group ID.
-
-
-## Import
-
-Resource Groups can be imported using the `resource id`, e.g.
-
-```
-terraform import azurerm_resource_group.mygroup /subscriptions/00000000-0000-0000-0000-000000000000/resourceGroups/myresourcegroup
-```
diff --git a/website/source/docs/providers/azurerm/r/route.html.markdown b/website/source/docs/providers/azurerm/r/route.html.markdown
deleted file mode 100644
index 2602150d7..000000000
--- a/website/source/docs/providers/azurerm/r/route.html.markdown
+++ /dev/null
@@ -1,69 +0,0 @@
----
-layout: "azurerm"
-page_title: "Azure Resource Manager: azurerm_route"
-sidebar_current: "docs-azurerm-resource-network-route"
-description: |-
- Creates a new Route Resource
----
-
-# azurerm\_route
-
-Creates a new Route Resource
-
-## Example Usage
-
-```hcl
-resource "azurerm_resource_group" "test" {
- name = "acceptanceTestResourceGroup1"
- location = "West US"
-}
-
-resource "azurerm_route_table" "test" {
- name = "acceptanceTestRouteTable1"
- location = "West US"
- resource_group_name = "${azurerm_resource_group.test.name}"
-}
-
-resource "azurerm_route" "test" {
- name = "acceptanceTestRoute1"
- resource_group_name = "${azurerm_resource_group.test.name}"
- route_table_name = "${azurerm_route_table.test.name}"
-
- address_prefix = "10.1.0.0/16"
- next_hop_type = "vnetlocal"
-}
-```
-
-## Argument Reference
-
-The following arguments are supported:
-
-* `name` - (Required) The name of the route. Changing this forces a
- new resource to be created.
-
-* `resource_group_name` - (Required) The name of the resource group in which to
- create the route.
-
-
-* `route_table_name` - (Required) The name of the route table to which to create the route
-
-* `address_prefix` - (Required) The destination CIDR to which the route applies, such as 10.1.0.0/16
-
-* `next_hop_type` - (Required) The type of Azure hop the packet should be sent to.
- Possible values are VirtualNetworkGateway, VnetLocal, Internet, VirtualAppliance and None
-
-* `next_hop_in_ip_address` - (Optional) Contains the IP address packets should be forwarded to. Next hop values are only allowed in routes where the next hop type is VirtualAppliance.
-
-## Attributes Reference
-
-The following attributes are exported:
-
-* `id` - The Route ID.
-
-## Import
-
-
-Routes can be imported using the `resource id`, e.g.
-```
-terraform import azurerm_route.testRoute /subscriptions/00000000-0000-0000-0000-000000000000/resourceGroups/mygroup1/providers/Microsoft.Network/routeTables/mytable1/routes/myroute1
-```
diff --git a/website/source/docs/providers/azurerm/r/route_table.html.markdown b/website/source/docs/providers/azurerm/r/route_table.html.markdown
deleted file mode 100644
index 5b2432662..000000000
--- a/website/source/docs/providers/azurerm/r/route_table.html.markdown
+++ /dev/null
@@ -1,79 +0,0 @@
----
-layout: "azurerm"
-page_title: "Azure Resource Manager: azurerm_route_table"
-sidebar_current: "docs-azurerm-resource-network-route-table"
-description: |-
- Creates a new Route Table Resource
----
-
-# azurerm\_route\_table
-
-Creates a new Route Table Resource
-
-## Example Usage
-
-```hcl
-resource "azurerm_resource_group" "test" {
- name = "acceptanceTestResourceGroup1"
- location = "West US"
-}
-
-resource "azurerm_route_table" "test" {
- name = "acceptanceTestSecurityGroup1"
- location = "West US"
- resource_group_name = "${azurerm_resource_group.test.name}"
-
- route {
- name = "route1"
- address_prefix = "10.1.0.0/16"
- next_hop_type = "vnetlocal"
- }
-
- tags {
- environment = "Production"
- }
-}
-```
-
-## Argument Reference
-
-The following arguments are supported:
-
-* `name` - (Required) The name of the route table. Changing this forces a
- new resource to be created.
-
-* `resource_group_name` - (Required) The name of the resource group in which to
- create the route table.
-
-* `location` - (Required) Specifies the supported Azure location where the resource exists. Changing this forces a new resource to be created.
-
-* `route` - (Optional) Can be specified multiple times to define multiple
- routes. Each `route` block supports fields documented below.
-
-* `tags` - (Optional) A mapping of tags to assign to the resource.
-
-The `route` block supports:
-
-* `name` - (Required) The name of the route.
-
-* `address_prefix` - (Required) The destination CIDR to which the route applies, such as 10.1.0.0/16
-
-* `next_hop_type` - (Required) The type of Azure hop the packet should be sent to.
- Possible values are VirtualNetworkGateway, VnetLocal, Internet, VirtualAppliance and None
-
-* `next_hop_in_ip_address` - (Optional) Contains the IP address packets should be forwarded to. Next hop values are only allowed in routes where the next hop type is VirtualAppliance.
-
-## Attributes Reference
-
-The following attributes are exported:
-
-* `id` - The Route Table ID.
-* `subnets` - The collection of Subnets associated with this route table.
-
-## Import
-
-
-Route Tables can be imported using the `resource id`, e.g.
-```
-terraform import azurerm_route_table.test /subscriptions/00000000-0000-0000-0000-000000000000/resourceGroups/mygroup1/providers/Microsoft.Network/routeTables/mytable1
-```
\ No newline at end of file
diff --git a/website/source/docs/providers/azurerm/r/search_service.html.markdown b/website/source/docs/providers/azurerm/r/search_service.html.markdown
deleted file mode 100644
index a0e22c201..000000000
--- a/website/source/docs/providers/azurerm/r/search_service.html.markdown
+++ /dev/null
@@ -1,56 +0,0 @@
----
-layout: "azurerm"
-page_title: "Azure Resource Manager: azurerm_search_service"
-sidebar_current: "docs-azurerm-resource-search-service"
-description: |-
- Manage a Search Service.
----
-
-# azurerm\_search\_service
-
-Allows you to manage an Azure Search Service
-
-## Example Usage
-
-```hcl
-resource "azurerm_resource_group" "test" {
- name = "acceptanceTestResourceGroup1"
- location = "West US"
-}
-
-resource "azurerm_search_service" "test" {
- name = "acceptanceTestSearchService1"
- resource_group_name = "${azurerm_resource_group.test.name}"
- location = "West US"
- sku = "standard"
-
- tags {
- environment = "staging"
- database = "test"
- }
-}
-```
-## Argument Reference
-
-The following arguments are supported:
-
-* `name` - (Required) The name of the Search Service.
-
-* `resource_group_name` - (Required) The name of the resource group in which to
- create the Search Service.
-
-* `location` - (Required) Specifies the supported Azure location where the resource exists. Changing this forces a new resource to be created.
-
-* `sku` - (Required) Valid values are `free` and `standard`. `standard2` is also valid, but can only be used when it's enabled on the backend by Microsoft support. `free` provisions the service in shared clusters. `standard` provisions the service in dedicated clusters
-
-* `replica_count` - (Optional) Default is 1. Valid values include 1 through 12. Valid only when `sku` is `standard`.
-
-* `partition_count` - (Optional) Default is 1. Valid values include 1, 2, 3, 4, 6, or 12. Valid only when `sku` is `standard`.
-
-* `tags` - (Optional) A mapping of tags to assign to the resource.
-
-## Attributes Reference
-
-The following attributes are exported:
-
-* `id` - The Search Service ID.
diff --git a/website/source/docs/providers/azurerm/r/servicebus_namespace.html.markdown b/website/source/docs/providers/azurerm/r/servicebus_namespace.html.markdown
deleted file mode 100644
index 70ff65fee..000000000
--- a/website/source/docs/providers/azurerm/r/servicebus_namespace.html.markdown
+++ /dev/null
@@ -1,76 +0,0 @@
----
-layout: "azurerm"
-page_title: "Azure Resource Manager: azurerm_servicebus_namespace"
-sidebar_current: "docs-azurerm-resource-servicebus-namespace"
-description: |-
- Create a ServiceBus Namespace.
----
-
-# azurerm\_servicebus\_namespace
-
-Create a ServiceBus Namespace.
-
-## Example Usage
-
-```hcl
-resource "azurerm_resource_group" "test" {
- name = "resourceGroup1"
- location = "West US"
-}
-
-resource "azurerm_servicebus_namespace" "test" {
- name = "acceptanceTestServiceBusNamespace"
- location = "West US"
- resource_group_name = "${azurerm_resource_group.test.name}"
- sku = "basic"
-
- tags {
- environment = "Production"
- }
-}
-```
-
-## Argument Reference
-
-The following arguments are supported:
-
-* `name` - (Required) Specifies the name of the ServiceBus Namespace resource . Changing this forces a
- new resource to be created.
-
-* `resource_group_name` - (Required) The name of the resource group in which to
- create the namespace.
-
-* `location` - (Required) Specifies the supported Azure location where the resource exists. Changing this forces a new resource to be created.
-
-* `sku` - (Required) Defines which tier to use. Options are basic, standard or premium.
-
-* `capacity` - (Optional) Specifies the capacity of a premium namespace. Can be 1, 2 or 4
-
-* `tags` - (Optional) A mapping of tags to assign to the resource.
-
-## Attributes Reference
-
-The following attributes are exported:
-
-* `id` - The ServiceBus Namespace ID.
-
-The following attributes are exported only if there is an authorization rule named
-`RootManageSharedAccessKey` which is created automatically by Azure.
-
-* `default_primary_connection_string` - The primary connection string for the authorization
- rule `RootManageSharedAccessKey`.
-
-* `default_secondary_connection_string` - The secondary connection string for the
- authorization rule `RootManageSharedAccessKey`.
-
-* `default_primary_key` - The primary access key for the authorization rule `RootManageSharedAccessKey`.
-
-* `default_secondary_key` - The secondary access key for the authorization rule `RootManageSharedAccessKey`.
-
-## Import
-
-Service Bus Namespace can be imported using the `resource id`, e.g.
-
-```
-terraform import azurerm_servicebus_namespace.test /subscriptions/00000000-0000-0000-0000-000000000000/resourceGroups/mygroup1/providers/microsoft.servicebus/namespaces/sbns1
-```
diff --git a/website/source/docs/providers/azurerm/r/servicebus_subscription.html.markdown b/website/source/docs/providers/azurerm/r/servicebus_subscription.html.markdown
deleted file mode 100644
index 92669b029..000000000
--- a/website/source/docs/providers/azurerm/r/servicebus_subscription.html.markdown
+++ /dev/null
@@ -1,115 +0,0 @@
----
-layout: "azurerm"
-page_title: "Azure Resource Manager: azurerm_servicebus_subscription"
-sidebar_current: "docs-azurerm-resource-servicebus-subscription"
-description: |-
- Create a ServiceBus Subscription.
----
-
-# azurerm\_servicebus\_subscription
-
-Create a ServiceBus Subscription.
-
-## Example Usage
-
-```hcl
-resource "azurerm_resource_group" "test" {
- name = "resourceGroup1"
- location = "West US"
-}
-
-resource "azurerm_servicebus_namespace" "test" {
- name = "acceptanceTestServiceBusNamespace"
- location = "West US"
- resource_group_name = "${azurerm_resource_group.test.name}"
- sku = "standard"
-
- tags {
- environment = "Production"
- }
-}
-
-resource "azurerm_servicebus_topic" "test" {
- name = "testTopic"
- location = "West US"
- resource_group_name = "${azurerm_resource_group.test.name}"
- namespace_name = "${azurerm_servicebus_namespace.test.name}"
-
- enable_partitioning = true
-}
-
-resource "azurerm_servicebus_subscription" "test" {
- name = "testSubscription"
- location = "West US"
- resource_group_name = "${azurerm_resource_group.test.name}"
- namespace_name = "${azurerm_servicebus_namespace.test.name}"
- topic_name = "${azurerm_servicebus_topic.test.name}"
- max_delivery_count = 1
-}
-```
-
-## Argument Reference
-
-The following arguments are supported:
-
-* `name` - (Required) Specifies the name of the ServiceBus Subscription resource.
- Changing this forces a new resource to be created.
-
-* `namespace_name` - (Required) The name of the ServiceBus Namespace to create
- this Subscription in. Changing this forces a new resource to be created.
-
-* `topic_name` - (Required) The name of the ServiceBus Topic to create
- this Subscription in. Changing this forces a new resource to be created.
-
-* `location` - (Required) Specifies the supported Azure location where the resource exists.
- Changing this forces a new resource to be created.
-
-* `resource_group_name` - (Required) The name of the resource group in which to
- create the namespace. Changing this forces a new resource to be created.
-
-* `max_delivery_count` - (Required) The maximum number of deliveries.
-
-* `auto_delete_on_idle` - (Optional) The idle interval after which the
- Subscription is automatically deleted, minimum of 5 minutes. Provided in the
- [TimeSpan](#timespan-format) format.
-
-* `default_message_ttl` - (Optional) The TTL of messages sent to this Subscription
- if no TTL value is set on the message itself. Provided in the [TimeSpan](#timespan-format)
- format.
-
-* `lock_duration` - (Optional) The lock duration for the subscription, maximum
- supported value is 5 minutes. Defaults to 1 minute.
-
-* `dead_lettering_on_filter_evaluation_exceptions` - (Optional) Boolean flag which
- controls whether the Subscription has dead letter support on Filter evaluation
- exceptions. Defaults to false.
-
-* `dead_lettering_on_message_expiration` - (Optional) Boolean flag which controls
- whether the Subscription has dead letter support when a message expires. Defaults
- to false.
-
-* `enable_batched_operations` - (Optional) Boolean flag which controls whether the
- Subscription supports batched operations. Defaults to false.
-
-* `requires_session` - (Optional) Boolean flag which controls whether this Subscription
- supports the concept of a session. Defaults to false. Changing this forces a
- new resource to be created.
-
-### TimeSpan Format
-
-Some arguments for this resource are required in the TimeSpan format which is
-used to represent a length of time. The supported format is documented [here](https://msdn.microsoft.com/en-us/library/se73z7b9(v=vs.110).aspx#Anchor_2)
-
-## Attributes Reference
-
-The following attributes are exported:
-
-* `id` - The ServiceBus Subscription ID.
-
-## Import
-
-Service Bus Subscriptions can be imported using the `resource id`, e.g.
-
-```
-terraform import azurerm_servicebus_subscription.test /subscriptions/00000000-0000-0000-0000-000000000000/resourceGroups/mygroup1/providers/microsoft.servicebus/namespaces/sbns1/topics/sntopic1/subscriptions/sbsub1
-```
diff --git a/website/source/docs/providers/azurerm/r/servicebus_topic.html.markdown b/website/source/docs/providers/azurerm/r/servicebus_topic.html.markdown
deleted file mode 100644
index 040518788..000000000
--- a/website/source/docs/providers/azurerm/r/servicebus_topic.html.markdown
+++ /dev/null
@@ -1,115 +0,0 @@
----
-layout: "azurerm"
-page_title: "Azure Resource Manager: azurerm_servicebus_topic"
-sidebar_current: "docs-azurerm-resource-servicebus-topic"
-description: |-
- Create a ServiceBus Topic.
----
-
-# azurerm\_servicebus\_topic
-
-Create a ServiceBus Topic.
-
-**Note** Topics can only be created in Namespaces with an SKU or `standard` or
-higher.
-
-## Example Usage
-
-```hcl
-resource "azurerm_resource_group" "test" {
- name = "resourceGroup1"
- location = "West US"
-}
-
-resource "azurerm_servicebus_namespace" "test" {
- name = "acceptanceTestServiceBusNamespace"
- location = "West US"
- resource_group_name = "${azurerm_resource_group.test.name}"
- sku = "standard"
-
- tags {
- environment = "Production"
- }
-}
-
-resource "azurerm_servicebus_topic" "test" {
- name = "testTopic"
- location = "West US"
- resource_group_name = "${azurerm_resource_group.test.name}"
- namespace_name = "${azurerm_servicebus_namespace.test.name}"
-
- enable_partitioning = true
-}
-```
-
-## Argument Reference
-
-The following arguments are supported:
-
-* `name` - (Required) Specifies the name of the ServiceBus Topic resource. Changing this forces a
- new resource to be created.
-
-* `namespace_name` - (Required) The name of the ServiceBus Namespace to create
- this topic in. Changing this forces a new resource to be created.
-
-* `location` - (Required) Specifies the supported Azure location where the resource exists.
- Changing this forces a new resource to be created.
-
-* `resource_group_name` - (Required) The name of the resource group in which to
- create the namespace. Changing this forces a new resource to be created.
-
-* `auto_delete_on_idle` - (Optional) The idle interval after which the
- Topic is automatically deleted, minimum of 5 minutes. Provided in the [TimeSpan](#timespan-format)
- format.
-
-* `default_message_ttl` - (Optional) The TTL of messages sent to this topic if no
- TTL value is set on the message itself. Provided in the [TimeSpan](#timespan-format)
- format.
-
-* `duplicate_detection_history_time_window` - (Optional) The duration during which
- duplicates can be detected. Provided in the [TimeSpan](#timespan-format) format. Defaults to 10 minutes (`00:10:00`)
-
-* `enable_batched_operations` - (Optional) Boolean flag which controls if server-side
- batched operations are enabled. Defaults to false.
-
-* `enable_express` - (Optional) Boolean flag which controls whether Express Entities
- are enabled. An express topic holds a message in memory temporarily before writing
- it to persistent storage. Defaults to false.
-
-* `enable_filtering_messages_before_publishing` - (Optional) Boolean flag which
- controls whether messages should be filtered before publishing. Defaults to
- false.
-
-* `enable_partitioning` - (Optional) Boolean flag which controls whether to enable
- the topic to be partitioned across multiple message brokers. Defaults to false.
- Changing this forces a new resource to be created.
-
-* `max_size_in_megabytes` - (Optional) Integer value which controls the size of
- memory allocated for the topic. For supported values see the "Queue/topic size"
- section of [this document](https://docs.microsoft.com/en-us/azure/service-bus-messaging/service-bus-quotas).
-
-* `requires_duplicate_detection` - (Optional) Boolean flag which controls whether
- the Topic requires duplicate detection. Defaults to false. Changing this forces
- a new resource to be created.
-
-* `support_ordering` - (Optional) Boolean flag which controls whether the Topic
- supports ordering. Defaults to false.
-
-### TimeSpan Format
-
-Some arguments for this resource are required in the TimeSpan format which is
-used to represent a lengh of time. The supported format is documented [here](https://msdn.microsoft.com/en-us/library/se73z7b9(v=vs.110).aspx#Anchor_2)
-
-## Attributes Reference
-
-The following attributes are exported:
-
-* `id` - The ServiceBus Topic ID.
-
-## Import
-
-Service Bus Topics can be imported using the `resource id`, e.g.
-
-```
-terraform import azurerm_servicebus_topic.test /subscriptions/00000000-0000-0000-0000-000000000000/resourceGroups/mygroup1/providers/microsoft.servicebus/namespaces/sbns1/topics/sntopic1
-```
diff --git a/website/source/docs/providers/azurerm/r/sql_database.html.markdown b/website/source/docs/providers/azurerm/r/sql_database.html.markdown
deleted file mode 100644
index 65df58053..000000000
--- a/website/source/docs/providers/azurerm/r/sql_database.html.markdown
+++ /dev/null
@@ -1,72 +0,0 @@
----
-layout: "azurerm"
-page_title: "Azure Resource Manager: azurerm_sql_database"
-sidebar_current: "docs-azurerm-resource-sql-database"
-description: |-
- Create a SQL Database.
----
-
-# azurerm\_sql\_database
-
-Allows you to manage an Azure SQL Database
-
-## Example Usage
-
-```hcl
-resource "azurerm_resource_group" "test" {
- name = "acceptanceTestResourceGroup1"
- location = "West US"
-}
-
-resource "azurerm_sql_database" "test" {
- name = "MySQLDatabase"
- resource_group_name = "${azurerm_resource_group.test.name}"
- location = "West US"
-
- tags {
- environment = "production"
- }
-}
-```
-## Argument Reference
-
-The following arguments are supported:
-
-* `name` - (Required) The name of the database.
-
-* `resource_group_name` - (Required) The name of the resource group in which to create the database. This must be the same as Database Server resource group currently.
-
-* `location` - (Required) Specifies the supported Azure location where the resource exists. Changing this forces a new resource to be created.
-
-* `server_name` - (Required) The name of the SQL Server on which to create the database.
-
-* `create_mode` - (Optional) Specifies the type of database to create. Defaults to `Default`. See below for the accepted values/
-
-* `source_database_id` - (Optional) The URI of the source database if `create_mode` value is not `Default`.
-
-* `restore_point_in_time` - (Optional) The point in time for the restore. Only applies if `create_mode` is `PointInTimeRestore` e.g. 2013-11-08T22:00:40Z
-
-* `edition` - (Optional) The edition of the database to be created. Applies only if `create_mode` is `Default`. Valid values are: `Basic`, `Standard`, `Premium`, or `DataWarehouse`. Please see [Azure SQL Database Service Tiers](https://azure.microsoft.com/en-gb/documentation/articles/sql-database-service-tiers/).
-
-* `collation` - (Optional) The name of the collation. Applies only if `create_mode` is `Default`. Azure default is `SQL_LATIN1_GENERAL_CP1_CI_AS`
-
-* `max_size_bytes` - (Optional) The maximum size that the database can grow to. Applies only if `create_mode` is `Default`. Please see [Azure SQL Database Service Tiers](https://azure.microsoft.com/en-gb/documentation/articles/sql-database-service-tiers/).
-
-* `requested_service_objective_id` - (Optional) Use `requested_service_objective_id` or `requested_service_objective_name` to set the performance level for the database.
- Valid values are: `S0`, `S1`, `S2`, `S3`, `P1`, `P2`, `P4`, `P6`, `P11` and `ElasticPool`. Please see [Azure SQL Database Service Tiers](https://azure.microsoft.com/en-gb/documentation/articles/sql-database-service-tiers/).
-
-* `requested_service_objective_name` - (Optional) Use `requested_service_objective_name` or `requested_service_objective_id` to set the performance level for the database. Please see [Azure SQL Database Service Tiers](https://azure.microsoft.com/en-gb/documentation/articles/sql-database-service-tiers/).
-
-* `source_database_deletion_date` - (Optional) The deletion date time of the source database. Only applies to deleted databases where `create_mode` is `PointInTimeRestore`.
-
-* `elastic_pool_name` - (Optional) The name of the elastic database pool.
-
-* `tags` - (Optional) A mapping of tags to assign to the resource.
-
-## Attributes Reference
-
-The following attributes are exported:
-
-* `id` - The SQL Database ID.
-* `creation_data` - The creation date of the SQL Database.
-* `default_secondary_location` - The default secondary location of the SQL Database.
diff --git a/website/source/docs/providers/azurerm/r/sql_elasticpool.html.markdown b/website/source/docs/providers/azurerm/r/sql_elasticpool.html.markdown
deleted file mode 100644
index 7d8a07541..000000000
--- a/website/source/docs/providers/azurerm/r/sql_elasticpool.html.markdown
+++ /dev/null
@@ -1,75 +0,0 @@
----
-layout: "azurerm"
-page_title: "Azure Resource Manager: azurerm_sql_elasticpool"
-sidebar_current: "docs-azurerm-resource-sql-elasticpool"
-description: |-
- Create a SQL Elastic Pool.
----
-
-# azurerm\_sql\_elasticpool
-
-Allows you to manage an Azure SQL Elastic Pool.
-
-## Example Usage
-
-```hcl
-resource "azurerm_resource_group" "test" {
- name = "test"
- location = "West US"
-}
-
-resource "azurerm_sql_server" "test" {
- name = "test"
- resource_group_name = "${azurerm_resource_group.test.name}"
- location = "West US"
- version = "12.0"
- administrator_login = "4dm1n157r470r"
- administrator_login_password = "4-v3ry-53cr37-p455w0rd"
-}
-
-resource "azurerm_sql_elasticpool" "test" {
- name = "test"
- resource_group_name = "${azurerm_resource_group.test.name}"
- location = "West US"
- server_name = "${azurerm_sql_server.test.name}"
- edition = "Basic"
- dtu = 100
- db_dtu_min = 0
- db_dtu_max = 5
- pool_size = 5000
-}
-```
-
-~> **NOTE on `azurerm_sql_elasticpool`:** - The values of `edition`, `dtu`, and `pool_size` must be consistent with the [Azure SQL Database Service Tiers](https://docs.microsoft.com/en-gb/azure/sql-database/sql-database-service-tiers#elastic-pool-service-tiers-and-performance-in-edtus). Any inconsistent argument configuration will be rejected.
-
-## Argument Reference
-
-The following arguments are supported:
-
-* `name` - (Required) The name of the elastic pool.
-
-* `resource_group_name` - (Required) The name of the resource group in which to create the elastic pool. This must be the same as the resource group of the underlying SQL server.
-
-* `location` - (Required) Specifies the supported Azure location where the resource exists. Changing this forces a new resource to be created.
-
-* `server_name` - (Required) The name of the SQL Server on which to create the elastic pool. Changing this forces a new resource to be created.
-
-* `edition` - (Required) The edition of the elastic pool to be created. Valid values are `Basic`, `Standard`, and `Premium`. Refer to [Azure SQL Database Service Tiers](https://docs.microsoft.com/en-gb/azure/sql-database/sql-database-service-tiers#elastic-pool-service-tiers-and-performance-in-edtus) for details. Changing this forces a new resource to be created.
-
-* `dtu` - (Required) The total shared DTU for the elastic pool. Valid values depend on the `edition` which has been defined. Refer to [Azure SQL Database Service Tiers](https://docs.microsoft.com/en-gb/azure/sql-database/sql-database-service-tiers#elastic-pool-service-tiers-and-performance-in-edtus) for valid combinations.
-
-* `db_dtu_min` - (Optional) The minimum DTU which will be guaranteed to all databases in the elastic pool to be created.
-
-* `db_dtu_max` - (Optional) The maximum DTU which will be guaranteed to all databases in the elastic pool to be created.
-
-* `pool_size` - (Optional) The maximum size in MB that all databases in the elastic pool can grow to. The maximum size must be consistent with combination of `edition` and `dtu` and the limits documented in [Azure SQL Database Service Tiers](https://docs.microsoft.com/en-gb/azure/sql-database/sql-database-service-tiers#elastic-pool-service-tiers-and-performance-in-edtus). If not defined when creating an elastic pool, the value is set to the size implied by `edition` and `dtu`.
-
-* `tags` - (Optional) A mapping of tags to assign to the resource.
-
-## Attributes Reference
-
-The following attributes are exported:
-
-* `id` - The SQL Elastic Pool ID.
-
-* `creation_date` - The creation date of the SQL Elastic Pool.
diff --git a/website/source/docs/providers/azurerm/r/sql_firewall_rule.html.markdown b/website/source/docs/providers/azurerm/r/sql_firewall_rule.html.markdown
deleted file mode 100644
index 5d31b1062..000000000
--- a/website/source/docs/providers/azurerm/r/sql_firewall_rule.html.markdown
+++ /dev/null
@@ -1,67 +0,0 @@
----
-layout: "azurerm"
-page_title: "Azure Resource Manager: azurerm_sql_firewall_rule"
-sidebar_current: "docs-azurerm-resource-sql-firewall_rule"
-description: |-
- Create a SQL Firewall Rule.
----
-
-# azurerm\_sql\_firewall\_rule
-
-Allows you to manage an Azure SQL Firewall Rule
-
-## Example Usage
-
-```hcl
-resource "azurerm_resource_group" "test" {
- name = "acceptanceTestResourceGroup1"
- location = "West US"
-}
-
-resource "azurerm_sql_database" "test" {
- name = "MySQLDatabase"
- resource_group_name = "${azurerm_resource_group.test.name}"
- location = "West US"
-
- tags {
- environment = "production"
- }
-}
-
-resource "azurerm_sql_firewall_rule" "test" {
- name = "FirewallRule1"
- resource_group_name = "${azurerm_resource_group.test.name}"
- server_name = "${azurerm_sql_server.test.name}"
- start_ip_address = "10.0.17.62"
- end_ip_address = "10.0.17.62"
-}
-```
-## Argument Reference
-
-The following arguments are supported:
-
-* `name` - (Required) The name of the SQL Server.
-
-* `resource_group_name` - (Required) The name of the resource group in which to
- create the sql server.
-
-* `server_name` - (Required) The name of the SQL Server on which to create the Firewall Rule.
-
-* `start_ip_address` - (Required) The starting IP address to allow through the firewall for this rule.
-
-* `end_ip_address` - (Required) The ending IP address to allow through the firewall for this rule.
-
-## Attributes Reference
-
-The following attributes are exported:
-
-* `id` - The SQL Firewall Rule ID.
-
-## Import
-
-SQL Firewall Rules can be imported using the `resource id`, e.g.
-
-```
-terraform import azurerm_sql_firewall_rule.rule1 /subscriptions/00000000-0000-0000-0000-000000000000/resourceGroups/myresourcegroup/providers/Microsoft.Sql/servers/myserver/firewallRules/rule1
-```
-
diff --git a/website/source/docs/providers/azurerm/r/sql_server.html.markdown b/website/source/docs/providers/azurerm/r/sql_server.html.markdown
deleted file mode 100644
index 8af90f9dc..000000000
--- a/website/source/docs/providers/azurerm/r/sql_server.html.markdown
+++ /dev/null
@@ -1,69 +0,0 @@
----
-layout: "azurerm"
-page_title: "Azure Resource Manager: azurerm_sql_server"
-sidebar_current: "docs-azurerm-resource-sql-server"
-description: |-
- Create a SQL Server.
----
-
-# azurerm\_sql\_server
-
-Allows you to manage an Azure SQL Database Server
-
-~> **Note:** All arguments including the administrator login and password will be stored in the raw state as plain-text.
-[Read more about sensitive data in state](/docs/state/sensitive-data.html).
-
-## Example Usage
-
-```hcl
-resource "azurerm_resource_group" "test" {
- name = "acceptanceTestResourceGroup1"
- location = "West US"
-}
-
-resource "azurerm_sql_server" "test" {
- name = "mysqlserver"
- resource_group_name = "${azurerm_resource_group.test.name}"
- location = "West US"
- version = "12.0"
- administrator_login = "mradministrator"
- administrator_login_password = "thisIsDog11"
-
- tags {
- environment = "production"
- }
-}
-```
-## Argument Reference
-
-The following arguments are supported:
-
-* `name` - (Required) The name of the SQL Server.
-
-* `resource_group_name` - (Required) The name of the resource group in which to
- create the sql server.
-
-* `location` - (Required) Specifies the supported Azure location where the resource exists. Changing this forces a new resource to be created.
-
-* `version` - (Required) The version for the new server. Valid values are: 2.0 (for v11 server) and 12.0 (for v12 server).
-
-* `administrator_login` - (Required) The administrator login name for the new server.
-
-* `administrator_login_password` - (Required) The password for the new AdministratorLogin. Please following Azures [Password Policy](https://msdn.microsoft.com/library/ms161959.aspx)
-
-* `tags` - (Optional) A mapping of tags to assign to the resource.
-
-## Attributes Reference
-
-The following attributes are exported:
-
-* `id` - The SQL Server ID.
-* `fully_qualified_domain_name` - The fully qualified domain name of the Azure SQL Server (e.g. myServerName.database.windows.net)
-
-## Import
-
-SQL Servers can be imported using the `resource id`, e.g.
-
-```
-terraform import azurerm_sql_server.test /subscriptions/00000000-0000-0000-0000-000000000000/resourceGroups/myresourcegroup/providers/Microsoft.Sql/servers/myserver
-```
diff --git a/website/source/docs/providers/azurerm/r/storage_account.html.markdown b/website/source/docs/providers/azurerm/r/storage_account.html.markdown
deleted file mode 100644
index 9b4e13064..000000000
--- a/website/source/docs/providers/azurerm/r/storage_account.html.markdown
+++ /dev/null
@@ -1,95 +0,0 @@
----
-layout: "azurerm"
-page_title: "Azure Resource Manager: azurerm_storage_account"
-sidebar_current: "docs-azurerm-resource-storage-account"
-description: |-
- Create a Azure Storage Account.
----
-
-# azurerm\_storage\_account
-
-Create an Azure Storage Account.
-
-## Example Usage
-
-```hcl
-resource "azurerm_resource_group" "testrg" {
- name = "resourceGroupName"
- location = "westus"
-}
-
-resource "azurerm_storage_account" "testsa" {
- name = "storageaccountname"
- resource_group_name = "${azurerm_resource_group.testrg.name}"
-
- location = "westus"
- account_type = "Standard_GRS"
-
- tags {
- environment = "staging"
- }
-}
-```
-
-## Argument Reference
-
-The following arguments are supported:
-
-* `name` - (Required) Specifies the name of the storage account. Changing this forces a
- new resource to be created. This must be unique across the entire Azure service,
- not just within the resource group.
-
-* `resource_group_name` - (Required) The name of the resource group in which to
- create the storage account. Changing this forces a new resource to be created.
-
-* `location` - (Required) Specifies the supported Azure location where the
- resource exists. Changing this forces a new resource to be created.
-
-* `account_kind` - (Optional) Defines the Kind of account. Valid options are `Storage`
- and `BlobStorage`. Changing this forces a new resource to be created. Defaults
- to `Storage`.
-
-* `account_type` - (Required) Defines the type of storage account to be
- created. Valid options are `Standard_LRS`, `Standard_ZRS`, `Standard_GRS`,
- `Standard_RAGRS`, `Premium_LRS`. Changing this is sometimes valid - see the Azure
- documentation for more information on which types of accounts can be converted
- into other types.
-
-* `access_tier` - (Required for `BlobStorage` accounts) Defines the access tier
- for `BlobStorage` accounts. Valid options are `Hot` and `Cold`, defaults to
- `Hot`.
-
-* `enable_blob_encryption` - (Optional) Boolean flag which controls if Encryption
- Services are enabled for Blob storage, see [here](https://azure.microsoft.com/en-us/documentation/articles/storage-service-encryption/)
- for more information.
-
-* `tags` - (Optional) A mapping of tags to assign to the resource.
-
-Note that although the Azure API supports setting custom domain names for
-storage accounts, this is not currently supported.
-
-## Attributes Reference
-
-The following attributes are exported in addition to the arguments listed above:
-
-* `id` - The storage account Resource ID.
-* `primary_location` - The primary location of the storage account.
-* `secondary_location` - The secondary location of the storage account.
-* `primary_blob_endpoint` - The endpoint URL for blob storage in the primary location.
-* `secondary_blob_endpoint` - The endpoint URL for blob storage in the secondary location.
-* `primary_queue_endpoint` - The endpoint URL for queue storage in the primary location.
-* `secondary_queue_endpoint` - The endpoint URL for queue storage in the secondary location.
-* `primary_table_endpoint` - The endpoint URL for table storage in the primary location.
-* `secondary_table_endpoint` - The endpoint URL for table storage in the secondary location.
-* `primary_file_endpoint` - The endpoint URL for file storage in the primary location.
-* `primary_access_key` - The primary access key for the storage account
-* `secondary_access_key` - The secondary access key for the storage account
-
-## Import
-
-Storage Accounts can be imported using the `resource id`, e.g.
-
-```
-terraform import azurerm_storage_account.storageAcc1 /subscriptions/00000000-0000-0000-0000-000000000000/resourceGroups/myresourcegroup/providers/Microsoft.Storage/storageAccounts/myaccount
-```
-
diff --git a/website/source/docs/providers/azurerm/r/storage_blob.html.markdown b/website/source/docs/providers/azurerm/r/storage_blob.html.markdown
deleted file mode 100644
index 77e13ea92..000000000
--- a/website/source/docs/providers/azurerm/r/storage_blob.html.markdown
+++ /dev/null
@@ -1,80 +0,0 @@
----
-layout: "azurerm"
-page_title: "Azure Resource Manager: azurerm_storage_blob"
-sidebar_current: "docs-azurerm-resource-storage-blob"
-description: |-
- Create a Azure Storage Blob.
----
-
-# azurerm\_storage\_blob
-
-Create an Azure Storage Blob.
-
-## Example Usage
-
-```hcl
-resource "azurerm_resource_group" "test" {
- name = "acctestrg-%d"
- location = "westus"
-}
-
-resource "azurerm_storage_account" "test" {
- name = "acctestacc%s"
- resource_group_name = "${azurerm_resource_group.test.name}"
- location = "westus"
- account_type = "Standard_LRS"
-}
-
-resource "azurerm_storage_container" "test" {
- name = "vhds"
- resource_group_name = "${azurerm_resource_group.test.name}"
- storage_account_name = "${azurerm_storage_account.test.name}"
- container_access_type = "private"
-}
-
-resource "azurerm_storage_blob" "testsb" {
- name = "sample.vhd"
-
- resource_group_name = "${azurerm_resource_group.test.name}"
- storage_account_name = "${azurerm_storage_account.test.name}"
- storage_container_name = "${azurerm_storage_container.test.name}"
-
- type = "page"
- size = 5120
-}
-```
-
-## Argument Reference
-
-The following arguments are supported:
-
-* `name` - (Required) The name of the storage blob. Must be unique within the storage container the blob is located.
-
-* `resource_group_name` - (Required) The name of the resource group in which to
- create the storage container. Changing this forces a new resource to be created.
-
-* `storage_account_name` - (Required) Specifies the storage account in which to create the storage container.
- Changing this forces a new resource to be created.
-
-* `storage_container_name` - (Required) The name of the storage container in which this blob should be created.
-
-* `type` - (Optional) The type of the storage blob to be created. One of either `block` or `page`. When not copying from an existing blob,
- this becomes required.
-
-* `size` - (Optional) Used only for `page` blobs to specify the size in bytes of the blob to be created. Must be a multiple of 512. Defaults to 0.
-
-* `source` - (Optional) An absolute path to a file on the local system. Cannot be defined if `source_uri` is defined.
-
-* `source_uri` - (Optional) The URI of an existing blob, or a file in the Azure File service, to use as the source contents
- for the blob to be created. Changing this forces a new resource to be created. Cannot be defined if `source` is defined.
-
-* `parallelism` - (Optional) The number of workers per CPU core to run for concurrent uploads. Defaults to `8`.
-
-* `attempts` - (Optional) The number of attempts to make per page or block when uploading. Defaults to `1`.
-
-## Attributes Reference
-
-The following attributes are exported in addition to the arguments listed above:
-
-* `id` - The storage blob Resource ID.
-* `url` - The URL of the blob
diff --git a/website/source/docs/providers/azurerm/r/storage_container.html.markdown b/website/source/docs/providers/azurerm/r/storage_container.html.markdown
deleted file mode 100644
index f355bb8ed..000000000
--- a/website/source/docs/providers/azurerm/r/storage_container.html.markdown
+++ /dev/null
@@ -1,59 +0,0 @@
----
-layout: "azurerm"
-page_title: "Azure Resource Manager: azurerm_storage_container"
-sidebar_current: "docs-azurerm-resource-storage-container"
-description: |-
- Create a Azure Storage Container.
----
-
-# azurerm\_storage\_container
-
-Create an Azure Storage Container.
-
-## Example Usage
-
-```hcl
-resource "azurerm_resource_group" "test" {
- name = "acctestrg"
- location = "westus"
-}
-
-resource "azurerm_storage_account" "test" {
- name = "accteststorageaccount"
- resource_group_name = "${azurerm_resource_group.test.name}"
- location = "westus"
- account_type = "Standard_LRS"
-
- tags {
- environment = "staging"
- }
-}
-
-resource "azurerm_storage_container" "test" {
- name = "vhds"
- resource_group_name = "${azurerm_resource_group.test.name}"
- storage_account_name = "${azurerm_storage_account.test.name}"
- container_access_type = "private"
-}
-```
-
-## Argument Reference
-
-The following arguments are supported:
-
-* `name` - (Required) The name of the storage container. Must be unique within the storage service the container is located.
-
-* `resource_group_name` - (Required) The name of the resource group in which to
- create the storage container. Changing this forces a new resource to be created.
-
-* `storage_account_name` - (Required) Specifies the storage account in which to create the storage container.
- Changing this forces a new resource to be created.
-
-* `container_access_type` - (Required) The 'interface' for access the container provides. Can be either `blob`, `container` or `private`.
-
-## Attributes Reference
-
-The following attributes are exported in addition to the arguments listed above:
-
-* `id` - The storage container Resource ID.
-* `properties` - Key-value definition of additional properties associated to the storage container
diff --git a/website/source/docs/providers/azurerm/r/storage_queue.html.markdown b/website/source/docs/providers/azurerm/r/storage_queue.html.markdown
deleted file mode 100644
index 6d0e1f573..000000000
--- a/website/source/docs/providers/azurerm/r/storage_queue.html.markdown
+++ /dev/null
@@ -1,51 +0,0 @@
----
-layout: "azurerm"
-page_title: "Azure Resource Manager: azurerm_storage_queue"
-sidebar_current: "docs-azurerm-resource-storage-queue"
-description: |-
- Create a Azure Storage Queue.
----
-
-# azurerm\_storage\_queue
-
-Create an Azure Storage Queue.
-
-## Example Usage
-
-```hcl
-resource "azurerm_resource_group" "test" {
- name = "acctestrg-%d"
- location = "westus"
-}
-
-resource "azurerm_storage_account" "test" {
- name = "acctestacc%s"
- resource_group_name = "${azurerm_resource_group.test.name}"
- location = "westus"
- account_type = "Standard_LRS"
-}
-
-resource "azurerm_storage_queue" "test" {
- name = "mysamplequeue"
- resource_group_name = "${azurerm_resource_group.test.name}"
- storage_account_name = "${azurerm_storage_account.test.name}"
-}
-```
-
-## Argument Reference
-
-The following arguments are supported:
-
-* `name` - (Required) The name of the storage queue. Must be unique within the storage account the queue is located.
-
-* `resource_group_name` - (Required) The name of the resource group in which to
- create the storage queue. Changing this forces a new resource to be created.
-
-* `storage_account_name` - (Required) Specifies the storage account in which to create the storage queue.
- Changing this forces a new resource to be created.
-
-## Attributes Reference
-
-The following attributes are exported in addition to the arguments listed above:
-
-* `id` - The storage queue Resource ID.
diff --git a/website/source/docs/providers/azurerm/r/storage_share.html.markdown b/website/source/docs/providers/azurerm/r/storage_share.html.markdown
deleted file mode 100644
index 0e36586c4..000000000
--- a/website/source/docs/providers/azurerm/r/storage_share.html.markdown
+++ /dev/null
@@ -1,58 +0,0 @@
----
-layout: "azurerm"
-page_title: "Azure Resource Manager: azurerm_storage_share"
-sidebar_current: "docs-azurerm-resource-storage-share"
-description: |-
- Create an Azure Storage Share.
----
-
-# azurerm\_storage\_share
-
-Create an Azure Storage File Share.
-
-## Example Usage
-
-```hcl
-resource "azurerm_resource_group" "test" {
- name = "acctestrg-%d"
- location = "westus"
-}
-
-resource "azurerm_storage_account" "test" {
- name = "acctestacc%s"
- resource_group_name = "${azurerm_resource_group.test.name}"
- location = "westus"
- account_type = "Standard_LRS"
-}
-
-resource "azurerm_storage_share" "testshare" {
- name = "sharename"
-
- resource_group_name = "${azurerm_resource_group.test.name}"
- storage_account_name = "${azurerm_storage_account.test.name}"
-
- quota = 50
-}
-```
-
-## Argument Reference
-
-The following arguments are supported:
-
-* `name` - (Required) The name of the share. Must be unique within the storage account where the share is located.
-
-* `resource_group_name` - (Required) The name of the resource group in which to
- create the share. Changing this forces a new resource to be created.
-
-* `storage_account_name` - (Required) Specifies the storage account in which to create the share.
- Changing this forces a new resource to be created.
-
-* `quota` - (Optional) The maximum size of the share, in gigabytes. Must be greater than 0, and less than or equal to 5 TB (5120 GB). Default this is set to 0 which results in setting the quota to 5 TB.
-
-
-## Attributes Reference
-
-The following attributes are exported in addition to the arguments listed above:
-
-* `id` - The storage share Resource ID.
-* `url` - The URL of the share
diff --git a/website/source/docs/providers/azurerm/r/storage_table.html.markdown b/website/source/docs/providers/azurerm/r/storage_table.html.markdown
deleted file mode 100644
index ff597cc64..000000000
--- a/website/source/docs/providers/azurerm/r/storage_table.html.markdown
+++ /dev/null
@@ -1,51 +0,0 @@
----
-layout: "azurerm"
-page_title: "Azure Resource Manager: azurerm_storage_table"
-sidebar_current: "docs-azurerm-resource-storage-table"
-description: |-
- Create a Azure Storage Table.
----
-
-# azurerm\_storage\_table
-
-Create an Azure Storage Table.
-
-## Example Usage
-
-```hcl
-resource "azurerm_resource_group" "test" {
- name = "acctestrg-%d"
- location = "westus"
-}
-
-resource "azurerm_storage_account" "test" {
- name = "acctestacc%s"
- resource_group_name = "${azurerm_resource_group.test.name}"
- location = "westus"
- account_type = "Standard_LRS"
-}
-
-resource "azurerm_storage_table" "test" {
- name = "mysampletable"
- resource_group_name = "${azurerm_resource_group.test.name}"
- storage_account_name = "${azurerm_storage_account.test.name}"
-}
-```
-
-## Argument Reference
-
-The following arguments are supported:
-
-* `name` - (Required) The name of the storage table. Must be unique within the storage account the table is located.
-
-* `resource_group_name` - (Required) The name of the resource group in which to
- create the storage table. Changing this forces a new resource to be created.
-
-* `storage_account_name` - (Required) Specifies the storage account in which to create the storage table.
- Changing this forces a new resource to be created.
-
-## Attributes Reference
-
-The following attributes are exported in addition to the arguments listed above:
-
-* `id` - The storage table Resource ID.
diff --git a/website/source/docs/providers/azurerm/r/subnet.html.markdown b/website/source/docs/providers/azurerm/r/subnet.html.markdown
deleted file mode 100644
index 34f4d97ea..000000000
--- a/website/source/docs/providers/azurerm/r/subnet.html.markdown
+++ /dev/null
@@ -1,73 +0,0 @@
----
-layout: "azurerm"
-page_title: "Azure Resource Manager: azure_subnet"
-sidebar_current: "docs-azurerm-resource-network-subnet"
-description: |-
- Creates a new subnet. Subnets represent network segments within the IP space defined by the virtual network.
----
-
-# azurerm\_subnet
-
-Creates a new subnet. Subnets represent network segments within the IP space defined by the virtual network.
-
-## Example Usage
-
-```hcl
-resource "azurerm_resource_group" "test" {
- name = "acceptanceTestResourceGroup1"
- location = "West US"
-}
-
-resource "azurerm_virtual_network" "test" {
- name = "acceptanceTestVirtualNetwork1"
- address_space = ["10.0.0.0/16"]
- location = "West US"
- resource_group_name = "${azurerm_resource_group.test.name}"
-}
-
-resource "azurerm_subnet" "test" {
- name = "testsubnet"
- resource_group_name = "${azurerm_resource_group.test.name}"
- virtual_network_name = "${azurerm_virtual_network.test.name}"
- address_prefix = "10.0.1.0/24"
-}
-```
-
-## Argument Reference
-
-The following arguments are supported:
-
-* `name` - (Required) The name of the subnet. Changing this forces a
- new resource to be created.
-
-* `resource_group_name` - (Required) The name of the resource group in which to
- create the subnet.
-
-* `virtual_network_name` - (Required) The name of the virtual network to which to attach the subnet.
-
-* `address_prefix` - (Required) The address prefix to use for the subnet.
-
-* `network_security_group_id` - (Optional) The ID of the Network Security Group to associate with
- the subnet.
-
-* `route_table_id` - (Optional) The ID of the Route Table to associate with
- the subnet.
-
-## Attributes Reference
-
-The following attributes are exported:
-
-* `id` - The subnet ID.
-* `ip_configurations` - The collection of IP Configurations with IPs within this subnet.
-* `name` - The name of the subnet.
-* `resource_group_name` - The name of the resource group in which the subnet is created in.
-* `virtual_network_name` - The name of the virtual network in which the subnet is created in
-* `address_prefix` - The address prefix for the subnet
-
-## Import
-
-Subnets can be imported using the `resource id`, e.g.
-
-```
-terraform import azurerm_subnet.testSubnet /subscriptions/00000000-0000-0000-0000-000000000000/resourceGroups/mygroup1/providers/Microsoft.Network/virtualNetworks/myvnet1/subnets/mysubnet1
-```
diff --git a/website/source/docs/providers/azurerm/r/template_deployment.html.markdown b/website/source/docs/providers/azurerm/r/template_deployment.html.markdown
deleted file mode 100644
index ffb638695..000000000
--- a/website/source/docs/providers/azurerm/r/template_deployment.html.markdown
+++ /dev/null
@@ -1,115 +0,0 @@
----
-layout: "azurerm"
-page_title: "Azure Resource Manager: azurerm_template_deployment"
-sidebar_current: "docs-azurerm-resource-template-deployment"
-description: |-
- Create a template deployment of resources.
----
-
-# azurerm\_template\_deployment
-
-Create a template deployment of resources
-
-## Example Usage
-
-```hcl
-resource "azurerm_resource_group" "test" {
- name = "acctestrg-01"
- location = "West US"
-}
-
-resource "azurerm_template_deployment" "test" {
- name = "acctesttemplate-01"
- resource_group_name = "${azurerm_resource_group.test.name}"
-
- template_body = < **NOTE:** `admin_password` must be between 6-72 characters long and must satisfy at least 3 of password complexity requirements from the following:
-1. Contains an uppercase character
-2. Contains a lowercase character
-3. Contains a numeric digit
-4. Contains a special character
-
-`os_profile_windows_config` supports the following:
-
-* `provision_vm_agent` - (Optional)
-* `enable_automatic_upgrades` - (Optional)
-* `winrm` - (Optional) A collection of WinRM configuration blocks as documented below.
-* `additional_unattend_config` - (Optional) An Additional Unattended Config block as documented below.
-
-`winrm` supports the following:
-
-* `protocol` - (Required) Specifies the protocol of listener
-* `certificate_url` - (Optional) Specifies URL of the certificate with which new Virtual Machines is provisioned.
-
-`additional_unattend_config` supports the following:
-
-* `pass` - (Required) Specifies the name of the pass that the content applies to. The only allowable value is `oobeSystem`.
-* `component` - (Required) Specifies the name of the component to configure with the added content. The only allowable value is `Microsoft-Windows-Shell-Setup`.
-* `setting_name` - (Required) Specifies the name of the setting to which the content applies. Possible values are: `FirstLogonCommands` and `AutoLogon`.
-* `content` - (Optional) Specifies the base-64 encoded XML formatted content that is added to the unattend.xml file for the specified path and component.
-
-`os_profile_linux_config` supports the following:
-
-* `disable_password_authentication` - (Required) Specifies whether password authentication should be disabled.
-* `ssh_keys` - (Optional) Specifies a collection of `path` and `key_data` to be placed on the virtual machine.
-
-~> **Note:** Please note that the only allowed `path` is `/home//.ssh/authorized_keys` due to a limitation of Azure.
-
-`os_profile_secrets` supports the following:
-
-* `source_vault_id` - (Required) Specifies the key vault to use.
-* `vault_certificates` - (Required) A collection of Vault Certificates as documented below
-
-`vault_certificates` support the following:
-
-* `certificate_url` - (Required) Specifies the URI of the key vault secrets in the format of `https:///secrets//`. Stored secret is the Base64 encoding of a JSON Object that which is encoded in UTF-8 of which the contents need to be
-
-```json
-{
- "data":"",
- "dataType":"pfx",
- "password":""
-}
-```
-
-* `certificate_store` - (Required, on windows machines) Specifies the certificate store on the Virtual Machine where the certificate should be added to.
-
-## Attributes Reference
-
-The following attributes are exported:
-
-* `id` - The virtual machine ID.
-
-## Import
-
-Virtual Machines can be imported using the `resource id`, e.g.
-
-```hcl
-terraform import azurerm_virtual_machine.test /subscriptions/00000000-0000-0000-0000-000000000000/resourceGroups/mygroup1/providers/microsoft.compute/virtualMachines/machine1
-```
diff --git a/website/source/docs/providers/azurerm/r/virtual_machine_extension.html.markdown b/website/source/docs/providers/azurerm/r/virtual_machine_extension.html.markdown
deleted file mode 100644
index adbb87142..000000000
--- a/website/source/docs/providers/azurerm/r/virtual_machine_extension.html.markdown
+++ /dev/null
@@ -1,171 +0,0 @@
----
-layout: "azurerm"
-page_title: "Azure Resource Manager: azure_virtual_machine_extension"
-sidebar_current: "docs-azurerm-resource-virtualmachine-extension"
-description: |-
- Creates a new Virtual Machine Extension to provide post deployment
- configuration and run automated tasks.
----
-
-# azurerm\_virtual\_machine\_extension
-
-Creates a new Virtual Machine Extension to provide post deployment configuration
-and run automated tasks.
-
-## Example Usage
-
-```hcl
-resource "azurerm_resource_group" "test" {
- name = "acctestrg"
- location = "West US"
-}
-
-resource "azurerm_virtual_network" "test" {
- name = "acctvn"
- address_space = ["10.0.0.0/16"]
- location = "West US"
- resource_group_name = "${azurerm_resource_group.test.name}"
-}
-
-resource "azurerm_subnet" "test" {
- name = "acctsub"
- resource_group_name = "${azurerm_resource_group.test.name}"
- virtual_network_name = "${azurerm_virtual_network.test.name}"
- address_prefix = "10.0.2.0/24"
-}
-
-resource "azurerm_network_interface" "test" {
- name = "acctni"
- location = "West US"
- resource_group_name = "${azurerm_resource_group.test.name}"
-
- ip_configuration {
- name = "testconfiguration1"
- subnet_id = "${azurerm_subnet.test.id}"
- private_ip_address_allocation = "dynamic"
- }
-}
-
-resource "azurerm_storage_account" "test" {
- name = "accsa"
- resource_group_name = "${azurerm_resource_group.test.name}"
- location = "westus"
- account_type = "Standard_LRS"
-
- tags {
- environment = "staging"
- }
-}
-
-resource "azurerm_storage_container" "test" {
- name = "vhds"
- resource_group_name = "${azurerm_resource_group.test.name}"
- storage_account_name = "${azurerm_storage_account.test.name}"
- container_access_type = "private"
-}
-
-resource "azurerm_virtual_machine" "test" {
- name = "acctvm"
- location = "West US"
- resource_group_name = "${azurerm_resource_group.test.name}"
- network_interface_ids = ["${azurerm_network_interface.test.id}"]
- vm_size = "Standard_A0"
-
- storage_image_reference {
- publisher = "Canonical"
- offer = "UbuntuServer"
- sku = "14.04.2-LTS"
- version = "latest"
- }
-
- storage_os_disk {
- name = "myosdisk1"
- vhd_uri = "${azurerm_storage_account.test.primary_blob_endpoint}${azurerm_storage_container.test.name}/myosdisk1.vhd"
- caching = "ReadWrite"
- create_option = "FromImage"
- }
-
- os_profile {
- computer_name = "hostname"
- admin_username = "testadmin"
- admin_password = "Password1234!"
- }
-
- os_profile_linux_config {
- disable_password_authentication = false
- }
-
- tags {
- environment = "staging"
- }
-}
-
-resource "azurerm_virtual_machine_extension" "test" {
- name = "hostname"
- location = "West US"
- resource_group_name = "${azurerm_resource_group.test.name}"
- virtual_machine_name = "${azurerm_virtual_machine.test.name}"
- publisher = "Microsoft.OSTCExtensions"
- type = "CustomScriptForLinux"
- type_handler_version = "1.2"
-
- settings = < **Note:** All arguments including the administrator login and password will be stored in the raw state as plain-text.
-[Read more about sensitive data in state](/docs/state/sensitive-data.html).
-
-## Example Usage
-
-```hcl
-resource "azurerm_resource_group" "test" {
- name = "acctestrg"
- location = "West US"
-}
-
-resource "azurerm_virtual_network" "test" {
- name = "acctvn"
- address_space = ["10.0.0.0/16"]
- location = "West US"
- resource_group_name = "${azurerm_resource_group.test.name}"
-}
-
-resource "azurerm_subnet" "test" {
- name = "acctsub"
- resource_group_name = "${azurerm_resource_group.test.name}"
- virtual_network_name = "${azurerm_virtual_network.test.name}"
- address_prefix = "10.0.2.0/24"
-}
-
-resource "azurerm_storage_account" "test" {
- name = "accsa"
- resource_group_name = "${azurerm_resource_group.test.name}"
- location = "westus"
- account_type = "Standard_LRS"
-
- tags {
- environment = "staging"
- }
-}
-
-resource "azurerm_storage_container" "test" {
- name = "vhds"
- resource_group_name = "${azurerm_resource_group.test.name}"
- storage_account_name = "${azurerm_storage_account.test.name}"
- container_access_type = "private"
-}
-
-resource "azurerm_virtual_machine_scale_set" "test" {
- name = "mytestscaleset-1"
- location = "West US"
- resource_group_name = "${azurerm_resource_group.test.name}"
- upgrade_policy_mode = "Manual"
-
- sku {
- name = "Standard_A0"
- tier = "Standard"
- capacity = 2
- }
-
- os_profile {
- computer_name_prefix = "testvm"
- admin_username = "myadmin"
- admin_password = "Passwword1234"
- }
-
- os_profile_linux_config {
- disable_password_authentication = true
-
- ssh_keys {
- path = "/home/myadmin/.ssh/authorized_keys"
- key_data = "${file("~/.ssh/demo_key.pub")}"
- }
- }
-
- network_profile {
- name = "TestNetworkProfile"
- primary = true
-
- ip_configuration {
- name = "TestIPConfiguration"
- subnet_id = "${azurerm_subnet.test.id}"
- }
- }
-
- storage_profile_os_disk {
- name = "osDiskProfile"
- caching = "ReadWrite"
- create_option = "FromImage"
- vhd_containers = ["${azurerm_storage_account.test.primary_blob_endpoint}${azurerm_storage_container.test.name}"]
- }
-
- storage_profile_image_reference {
- publisher = "Canonical"
- offer = "UbuntuServer"
- sku = "14.04.2-LTS"
- version = "latest"
- }
-}
-```
-
-## Example Usage with Managed Disks
-
-```hcl
-resource "azurerm_resource_group" "test" {
- name = "acctestrg"
- location = "West US 2"
-}
-
-resource "azurerm_virtual_network" "test" {
- name = "acctvn"
- address_space = ["10.0.0.0/16"]
- location = "West US 2"
- resource_group_name = "${azurerm_resource_group.test.name}"
-}
-
-resource "azurerm_subnet" "test" {
- name = "acctsub"
- resource_group_name = "${azurerm_resource_group.test.name}"
- virtual_network_name = "${azurerm_virtual_network.test.name}"
- address_prefix = "10.0.2.0/24"
-}
-
-resource "azurerm_public_ip" "test" {
- name = "test"
- location = "West US 2"
- resource_group_name = "${azurerm_resource_group.test.name}"
- public_ip_address_allocation = "static"
- domain_name_label = "${azurerm_resource_group.test.name}"
-
- tags {
- environment = "staging"
- }
-}
-
-resource "azurerm_lb" "test" {
- name = "test"
- location = "West US 2"
- resource_group_name = "${azurerm_resource_group.test.name}"
-
- frontend_ip_configuration {
- name = "PublicIPAddress"
- public_ip_address_id = "${azurerm_public_ip.test.id}"
- }
-}
-
-resource "azurerm_lb_backend_address_pool" "bpepool" {
- resource_group_name = "${azurerm_resource_group.test.name}"
- loadbalancer_id = "${azurerm_lb.test.id}"
- name = "BackEndAddressPool"
-}
-
-resource "azurerm_lb_nat_pool" "lbnatpool" {
- count = 3
- resource_group_name = "${azurerm_resource_group.test.name}"
- name = "ssh"
- loadbalancer_id = "${azurerm_lb.test.id}"
- protocol = "Tcp"
- frontend_port_start = 50000
- frontend_port_end = 50119
- backend_port = 22
- frontend_ip_configuration_name = "PublicIPAddress"
-}
-
-resource "azurerm_virtual_machine_scale_set" "test" {
- name = "mytestscaleset-1"
- location = "West US 2"
- resource_group_name = "${azurerm_resource_group.test.name}"
- upgrade_policy_mode = "Manual"
-
- sku {
- name = "Standard_A0"
- tier = "Standard"
- capacity = 2
- }
-
- storage_profile_image_reference {
- publisher = "Canonical"
- offer = "UbuntuServer"
- sku = "14.04.2-LTS"
- version = "latest"
- }
-
- storage_profile_os_disk {
- name = ""
- caching = "ReadWrite"
- create_option = "FromImage"
- managed_disk_type = "Standard_LRS"
- }
-
- storage_profile_data_disk {
- lun = 0
- caching = "ReadWrite"
- create_option = "Empty"
- disk_size_gb = 10
- }
-
- os_profile {
- computer_name_prefix = "testvm"
- admin_username = "myadmin"
- admin_password = "Passwword1234"
- }
-
- os_profile_linux_config {
- disable_password_authentication = true
-
- ssh_keys {
- path = "/home/myadmin/.ssh/authorized_keys"
- key_data = "${file("~/.ssh/demo_key.pub")}"
- }
- }
-
- network_profile {
- name = "terraformnetworkprofile"
- primary = true
-
- ip_configuration {
- name = "TestIPConfiguration"
- subnet_id = "${azurerm_subnet.test.id}"
- load_balancer_backend_address_pool_ids = ["${azurerm_lb_backend_address_pool.bpepool.id}"]
- load_balancer_inbound_nat_rules_ids = ["${element(azurerm_lb_nat_pool.lbnatpool.*.id, count.index)}"]
- }
- }
-
- tags {
- environment = "staging"
- }
-}
-```
-
-## Argument Reference
-
-The following arguments are supported:
-
-* `name` - (Required) Specifies the name of the virtual machine scale set resource. Changing this forces a new resource to be created.
-* `resource_group_name` - (Required) The name of the resource group in which to create the virtual machine scale set. Changing this forces a new resource to be created.
-* `location` - (Required) Specifies the supported Azure location where the resource exists. Changing this forces a new resource to be created.
-* `sku` - (Required) A sku block as documented below.
-* `upgrade_policy_mode` - (Required) Specifies the mode of an upgrade to virtual machines in the scale set. Possible values, `Manual` or `Automatic`.
-* `overprovision` - (Optional) Specifies whether the virtual machine scale set should be overprovisioned.
-* `single_placement_group` - (Optional) Specifies whether the scale set is limited to a single placement group with a maximum size of 100 virtual machines. If set to false, managed disks must be used. Default is true. Changing this forces a
- new resource to be created. See [documentation](http://docs.microsoft.com/en-us/azure/virtual-machine-scale-sets/virtual-machine-scale-sets-placement-groups) for more information.
-* `os_profile` - (Required) A Virtual Machine OS Profile block as documented below.
-* `os_profile_secrets` - (Optional) A collection of Secret blocks as documented below.
-* `os_profile_windows_config` - (Required, when a windows machine) A Windows config block as documented below.
-* `os_profile_linux_config` - (Required, when a linux machine) A Linux config block as documented below.
-* `network_profile` - (Required) A collection of network profile block as documented below.
-* `storage_profile_os_disk` - (Required) A storage profile os disk block as documented below
-* `storage_profile_data_disk` - (Optional) A storage profile data disk block as documented below
-* `storage_profile_image_reference` - (Optional) A storage profile image reference block as documented below.
-* `extension` - (Optional) Can be specified multiple times to add extension profiles to the scale set. Each `extension` block supports the fields documented below.
-* `tags` - (Optional) A mapping of tags to assign to the resource.
-
-
-`sku` supports the following:
-
-* `name` - (Required) Specifies the size of virtual machines in a scale set.
-* `tier` - (Optional) Specifies the tier of virtual machines in a scale set. Possible values, `standard` or `basic`.
-* `capacity` - (Required) Specifies the number of virtual machines in the scale set.
-
-`os_profile` supports the following:
-
-* `computer_name_prefix` - (Required) Specifies the computer name prefix for all of the virtual machines in the scale set. Computer name prefixes must be 1 to 15 characters long.
-* `admin_username` - (Required) Specifies the administrator account name to use for all the instances of virtual machines in the scale set.
-* `admin_password` - (Required) Specifies the administrator password to use for all the instances of virtual machines in a scale set..
-* `custom_data` - (Optional) Specifies custom data to supply to the machine. On linux-based systems, this can be used as a cloud-init script. On other systems, this will be copied as a file on disk. Internally, Terraform will base64 encode this value before sending it to the API. The maximum length of the binary array is 65535 bytes. Changing this forces a new resource to be created.
-
-`os_profile_secrets` supports the following:
-
-* `source_vault_id` - (Required) Specifies the key vault to use.
-* `vault_certificates` - (Required, on windows machines) A collection of Vault Certificates as documented below
-
-`vault_certificates` support the following:
-
-* `certificate_url` - (Required) It is the Base64 encoding of a JSON Object that which is encoded in UTF-8 of which the contents need to be `data`, `dataType` and `password`.
-* `certificate_store` - (Required, on windows machines) Specifies the certificate store on the Virtual Machine where the certificate should be added to.
-
-
-`os_profile_windows_config` supports the following:
-
-* `provision_vm_agent` - (Optional) Indicates whether virtual machine agent should be provisioned on the virtual machines in the scale set.
-* `enable_automatic_upgrades` - (Optional) Indicates whether virtual machines in the scale set are enabled for automatic updates.
-* `winrm` - (Optional) A collection of WinRM configuration blocks as documented below.
-* `additional_unattend_config` - (Optional) An Additional Unattended Config block as documented below.
-
-`winrm` supports the following:
-
-* `protocol` - (Required) Specifies the protocol of listener
-* `certificate_url` - (Optional) Specifies URL of the certificate with which new Virtual Machines is provisioned.
-
-`additional_unattend_config` supports the following:
-
-* `pass` - (Required) Specifies the name of the pass that the content applies to. The only allowable value is `oobeSystem`.
-* `component` - (Required) Specifies the name of the component to configure with the added content. The only allowable value is `Microsoft-Windows-Shell-Setup`.
-* `setting_name` - (Required) Specifies the name of the setting to which the content applies. Possible values are: `FirstLogonCommands` and `AutoLogon`.
-* `content` - (Optional) Specifies the base-64 encoded XML formatted content that is added to the unattend.xml file for the specified path and component.
-
-`os_profile_linux_config` supports the following:
-
-* `disable_password_authentication` - (Required) Specifies whether password authentication should be disabled. Changing this forces a new resource to be created.
-* `ssh_keys` - (Optional) Specifies a collection of `path` and `key_data` to be placed on the virtual machine.
-
-~> _**Note:** Please note that the only allowed `path` is `/home//.ssh/authorized_keys` due to a limitation of Azure_
-
-
-`network_profile` supports the following:
-
-* `name` - (Required) Specifies the name of the network interface configuration.
-* `primary` - (Required) Indicates whether network interfaces created from the network interface configuration will be the primary NIC of the VM.
-* `ip_configuration` - (Required) An ip_configuration block as documented below
-
-`ip_configuration` supports the following:
-
-* `name` - (Required) Specifies name of the IP configuration.
-* `subnet_id` - (Required) Specifies the identifier of the subnet.
-* `load_balancer_backend_address_pool_ids` - (Optional) Specifies an array of references to backend address pools of load balancers. A scale set can reference backend address pools of one public and one internal load balancer. Multiple scale sets cannot use the same load balancer.
-* `load_balancer_inbound_nat_rules_ids` - (Optional) Specifies an array of references to inbound NAT rules for load balancers.
-
-`storage_profile_os_disk` supports the following:
-
-* `name` - (Required) Specifies the disk name. Value must be blank (`""`) when `managed_disk_type` is specified.
-* `vhd_containers` - (Optional) Specifies the vhd uri. Cannot be used when `image` or `managed_disk_type` is specified.
-* `managed_disk_type` - (Optional) Specifies the type of managed disk to create. Value you must be either `Standard_LRS` or `Premium_LRS`. Cannot be used when `vhd_containers` or `image` is specified.
-* `create_option` - (Required) Specifies how the virtual machine should be created. The only possible option is `FromImage`.
-* `caching` - (Optional) Specifies the caching requirements. Possible values include: `None` (default), `ReadOnly`, `ReadWrite`.
-* `image` - (Optional) Specifies the blob uri for user image. A virtual machine scale set creates an os disk in the same container as the user image.
- Updating the osDisk image causes the existing disk to be deleted and a new one created with the new image. If the VM scale set is in Manual upgrade mode then the virtual machines are not updated until they have manualUpgrade applied to them.
- When setting this field `os_type` needs to be specified. Cannot be used when `vhd_containers`, `managed_disk_type` or `storage_profile_image_reference ` are specified.
-* `os_type` - (Optional) Specifies the operating system Type, valid values are windows, linux.
-
-`storage_profile_data_disk` supports the following:
-
-* `lun` - (Required) Specifies the Logical Unit Number of the disk in each virtual machine in the scale set.
-* `create_option` - (Optional) Specifies how the data disk should be created. The only possible options are `FromImage` and `Empty`.
-* `caching` - (Optional) Specifies the caching requirements. Possible values include: `None` (default), `ReadOnly`, `ReadWrite`.
-* `disk_size_gb` - (Optional) Specifies the size of the disk in GB. This element is required when creating an empty disk.
-* `managed_disk_type` - (Optional) Specifies the type of managed disk to create. Value must be either `Standard_LRS` or `Premium_LRS`.
-
-`storage_profile_image_reference` supports the following:
-
-* `publisher` - (Required) Specifies the publisher of the image used to create the virtual machines
-* `offer` - (Required) Specifies the offer of the image used to create the virtual machines.
-* `sku` - (Required) Specifies the SKU of the image used to create the virtual machines.
-* `version` - (Optional) Specifies the version of the image used to create the virtual machines.
-
-`extension` supports the following:
-
-* `name` - (Required) Specifies the name of the extension.
-* `publisher` - (Required) The publisher of the extension, available publishers can be found by using the Azure CLI.
-* `type` - (Required) The type of extension, available types for a publisher can be found using the Azure CLI.
-* `type_handler_version` - (Required) Specifies the version of the extension to use, available versions can be found using the Azure CLI.
-* `auto_upgrade_minor_version` - (Optional) Specifies whether or not to use the latest minor version available.
-* `settings` - (Required) The settings passed to the extension, these are specified as a JSON object in a string.
-* `protected_settings` - (Optional) The protected_settings passed to the extension, like settings, these are specified as a JSON object in a string.
-
-## Attributes Reference
-
-The following attributes are exported:
-
-* `id` - The virtual machine scale set ID.
-
-
-## Import
-
-Virtual Machine Scale Sets can be imported using the `resource id`, e.g.
-
-```
-terraform import azurerm_virtual_machine_scale_set.scaleset1 /subscriptions/00000000-0000-0000-0000-000000000000/resourceGroups/mygroup1/providers/Microsoft.Compute/virtualMachineScaleSets/scaleset1
-```
diff --git a/website/source/docs/providers/azurerm/r/virtual_network.html.markdown b/website/source/docs/providers/azurerm/r/virtual_network.html.markdown
deleted file mode 100644
index 697960072..000000000
--- a/website/source/docs/providers/azurerm/r/virtual_network.html.markdown
+++ /dev/null
@@ -1,100 +0,0 @@
----
-layout: "azurerm"
-page_title: "Azure Resource Manager: azure_virtual_network"
-sidebar_current: "docs-azurerm-resource-network-virtual-network"
-description: |-
- Creates a new virtual network including any configured subnets. Each subnet can optionally be configured with a security group to be associated with the subnet.
----
-
-# azurerm\_virtual\_network
-
-Creates a new virtual network including any configured subnets. Each subnet can
-optionally be configured with a security group to be associated with the subnet.
-
-## Example Usage
-
-```hcl
-resource "azurerm_virtual_network" "test" {
- name = "virtualNetwork1"
- resource_group_name = "${azurerm_resource_group.test.name}"
- address_space = ["10.0.0.0/16"]
- location = "West US"
- dns_servers = ["10.0.0.4", "10.0.0.5"]
-
- subnet {
- name = "subnet1"
- address_prefix = "10.0.1.0/24"
- }
-
- subnet {
- name = "subnet2"
- address_prefix = "10.0.2.0/24"
- }
-
- subnet {
- name = "subnet3"
- address_prefix = "10.0.3.0/24"
- security_group = "${azurerm_network_security_group.test.id}"
- }
-
- tags {
- environment = "Production"
- }
-}
-```
-
-## Argument Reference
-
-The following arguments are supported:
-
-* `name` - (Required) The name of the virtual network. Changing this forces a
- new resource to be created.
-
-* `resource_group_name` - (Required) The name of the resource group in which to
- create the virtual network.
-
-* `address_space` - (Required) The address space that is used the virtual
- network. You can supply more than one address space. Changing this forces
- a new resource to be created.
-
-* `location` - (Required) The location/region where the virtual network is
- created. Changing this forces a new resource to be created.
-
-* `dns_servers` - (Optional) List of IP addresses of DNS servers
-
-* `subnet` - (Optional) Can be specified multiple times to define multiple
- subnets. Each `subnet` block supports fields documented below.
-
-* `tags` - (Optional) A mapping of tags to assign to the resource.
-
-The `subnet` block supports:
-
-* `name` - (Required) The name of the subnet.
-
-* `address_prefix` - (Required) The address prefix to use for the subnet.
-
-* `security_group` - (Optional) The Network Security Group to associate with
- the subnet. (Referenced by `id`, ie. `azurerm_network_security_group.test.id`)
-
-## Attributes Reference
-
-The following attributes are exported:
-
-* `id` - The virtual NetworkConfiguration ID.
-
-* `name` - The name of the virtual network.
-
-* `resource_group_name` - The name of the resource group in which to create the virtual network.
-
-* `location` - The location/region where the virtual network is created
-
-* `address_space` - The address space that is used the virtual network.
-
-
-## Import
-
-Virtual Networks can be imported using the `resource id`, e.g.
-
-```
-terraform import azurerm_virtual_network.testNetwork /subscriptions/00000000-0000-0000-0000-000000000000/resourceGroups/mygroup1/providers/Microsoft.Network/virtualNetworks/myvnet1
-```
diff --git a/website/source/docs/providers/azurerm/r/virtual_network_peering.html.markdown b/website/source/docs/providers/azurerm/r/virtual_network_peering.html.markdown
deleted file mode 100644
index 4376a0ced..000000000
--- a/website/source/docs/providers/azurerm/r/virtual_network_peering.html.markdown
+++ /dev/null
@@ -1,102 +0,0 @@
----
-layout: "azurerm"
-page_title: "Azure Resource Manager: azure_virtual_network_peering"
-sidebar_current: "docs-azurerm-resource-network-virtual-network-peering"
-description: |-
- Creates a new virtual network peering which allows resources to access other
- resources in the linked virtual network.
----
-
-# azurerm\_virtual\_network\_peering
-
-Creates a new virtual network peering which allows resources to access other
-resources in the linked virtual network.
-
-## Example Usage
-
-```hcl
-resource "azurerm_resource_group" "test" {
- name = "peeredvnets-rg"
- location = "West US"
-}
-
-resource "azurerm_virtual_network" "test1" {
- name = "peternetwork1"
- resource_group_name = "${azurerm_resource_group.test.name}"
- address_space = ["10.0.1.0/24"]
- location = "West US"
-}
-
-resource "azurerm_virtual_network" "test2" {
- name = "peternetwork2"
- resource_group_name = "${azurerm_resource_group.test.name}"
- address_space = ["10.0.2.0/24"]
- location = "West US"
-}
-
-resource "azurerm_virtual_network_peering" "test1" {
- name = "peer1to2"
- resource_group_name = "${azurerm_resource_group.test.name}"
- virtual_network_name = "${azurerm_virtual_network.test1.name}"
- remote_virtual_network_id = "${azurerm_virtual_network.test2.id}"
-}
-
-resource "azurerm_virtual_network_peering" "test2" {
- name = "peer2to1"
- resource_group_name = "${azurerm_resource_group.test.name}"
- virtual_network_name = "${azurerm_virtual_network.test2.name}"
- remote_virtual_network_id = "${azurerm_virtual_network.test1.id}"
-}
-```
-
-## Argument Reference
-
-The following arguments are supported:
-
-* `name` - (Required) The name of the virtual network peering. Changing this
- forces a new resource to be created.
-
-* `virtual_network_name` - (Required) The name of the virtual network. Changing
- this forces a new resource to be created.
-
-* `remote_virtual_network_id` - (Required) The full Azure resource ID of the
- remote virtual network. Changing this forces a new resource to be created.
-
-* `resource_group_name` - (Required) The name of the resource group in which to
- create the virtual network. Changing this forces a new resource to be
- created.
-
-* `allow_virtual_network_access` - (Optional) Controls if the VMs in the remote
- virtual network can access VMs in the local virtual network. Defaults to
- false.
-
-* `allow_forwarded_traffic` - (Optional) Controls if forwarded traffic from VMs
- in the remote virtual network is allowed. Defaults to false.
-
-* `allow_gateway_transit` - (Optional) Controls gatewayLinks can be used in the
- remote virtual network’s link to the local virtual network.
-
-* `use_remote_gateways` - (Optional) Controls if remote gateways can be used on
- the local virtual network. If the flag is set to true, and
- allowGatewayTransit on the remote peering is also true, virtual network will
- use gateways of remote virtual network for transit. Only one peering can
- have this flag set to true. This flag cannot be set if virtual network
- already has a gateway. Defaults to false.
-
-## Attributes Reference
-
-The following attributes are exported:
-
-* `id` - The Virtual Network Peering resource ID.
-
-## Note
-
-Virtual Network peerings cannot be created, updated or deleted concurrently.
-
-## Import
-
-Virtual Network Peerings can be imported using the `resource id`, e.g.
-
-```
-terraform import azurerm_virtual_network_peering.testPeering /subscriptions/00000000-0000-0000-0000-000000000000/resourceGroups/mygroup1/providers/Microsoft.Network/virtualNetworks/myvnet1/virtualNetworkPeerings/myvnet1peering
-```
\ No newline at end of file
diff --git a/website/source/docs/providers/bitbucket/index.html.markdown b/website/source/docs/providers/bitbucket/index.html.markdown
deleted file mode 100644
index 638a1a6c1..000000000
--- a/website/source/docs/providers/bitbucket/index.html.markdown
+++ /dev/null
@@ -1,41 +0,0 @@
----
-layout: "bitbucket"
-page_title: "Provider: Bitbucket"
-sidebar_current: "docs-bitbucket-index"
-description: |-
- The Bitbucket provider to interact with repositories, projects, etc..
----
-
-# Bitbucket Provider
-
-The Bitbucket provider allows you to manage resources including repositories,
-webhooks, and default reviewers.
-
-Use the navigation to the left to read about the available resources.
-
-## Example Usage
-
-```hcl
-# Configure the Bitbucket Provider
-provider "bitbucket" {
- username = "GobBluthe"
- password = "idoillusions" # you can also use app passwords
-}
-
-resource "bitbucket_repository" "illusions" {
- owner = "theleagueofmagicians"
- name = "illusions"
- scm = "hg"
- is_private = true
-}
-```
-
-## Argument Reference
-
-The following arguments are supported in the `provider` block:
-
-* `username` - (Required) Your username used to connect to bitbucket. You can
- also set this via the environment variable. `BITBUCKET_USERNAME`
-
-* `password` - (Required) Your password used to connect to bitbucket. You can
- also set this via the environment variable. `BITBUCKET_PASSWORD`
diff --git a/website/source/docs/providers/bitbucket/r/default_reviewers.html.markdown b/website/source/docs/providers/bitbucket/r/default_reviewers.html.markdown
deleted file mode 100644
index 810813e89..000000000
--- a/website/source/docs/providers/bitbucket/r/default_reviewers.html.markdown
+++ /dev/null
@@ -1,36 +0,0 @@
----
-layout: "bitbucket"
-page_title: "Bitbucket: bitbucket_default_reviewers"
-sidebar_current: "docs-bitbucket-resource-default-reviewers"
-description: |-
- Provides support for setting up default reviews for bitbucket.
----
-
-# bitbucket\_default_reviewers
-
-Provides support for setting up default reviewers for your repository.
-
-## Example Usage
-
-```hcl
-# Manage your repository
-resource "bitbucket_default_reviewers" "infrastructure" {
- owner = "myteam"
- repository = "terraform-code"
-
- reviewers = [
- "gob",
- "michael",
- "michalejr",
- ]
-}
-```
-
-## Argument Reference
-
-The following arguments are supported:
-
-* `owner` - (Required) The owner of this repository. Can be you or any team you
- have write access to.
-* `repository` - (Required) The name of the repository.
-* `reviewers` - (Required) A list of reviewers to use.
diff --git a/website/source/docs/providers/bitbucket/r/hook.html.markdown b/website/source/docs/providers/bitbucket/r/hook.html.markdown
deleted file mode 100644
index 7e5a54d4e..000000000
--- a/website/source/docs/providers/bitbucket/r/hook.html.markdown
+++ /dev/null
@@ -1,40 +0,0 @@
----
-layout: "bitbucket"
-page_title: "Bitbucket: bitbucket_hook"
-sidebar_current: "docs-bitbucket-resource-hook"
-description: |-
- Provides a Bitbucket Webhook
----
-
-# bitbucket\_hook
-
-Provides a Bitbucket hook resource.
-
-This allows you to manage your webhooks on a repository.
-
-## Example Usage
-
-```hcl
-# Manage your repositories hooks
-resource "bitbucket_hook" "deploy_on_push" {
- owner = "myteam"
- repository = "terraform-code"
- url = "https://mywebhookservice.mycompany.com/deploy-on-push"
- description = "Deploy the code via my webhook"
-
- events = [
- "repo:push",
- ]
-}
-```
-
-## Argument Reference
-
-The following arguments are supported:
-
-* `owner` - (Required) The owner of this repository. Can be you or any team you
- have write access to.
-* `repository` - (Required) The name of the repository.
-* `url` - (Required) Where to POST to.
-* `description` - (Required) The name / description to show in the UI.
-* `events` - (Required) The event you want to react on.
diff --git a/website/source/docs/providers/bitbucket/r/repository.html.markdown b/website/source/docs/providers/bitbucket/r/repository.html.markdown
deleted file mode 100644
index fcff7c53b..000000000
--- a/website/source/docs/providers/bitbucket/r/repository.html.markdown
+++ /dev/null
@@ -1,57 +0,0 @@
----
-layout: "bitbucket"
-page_title: "Bitbucket: bitbucket_repository"
-sidebar_current: "docs-bitbucket-resource-repository"
-description: |-
- Provides a Bitbucket Repository
----
-
-# bitbucket\_repository
-
-Provides a Bitbucket repository resource.
-
-This resource allows you manage your repositories such as scm type, if it is
-private, how to fork the repository and other options.
-
-## Example Usage
-
-```hcl
-# Manage your repository
-resource "bitbucket_repository" "infrastructure" {
- owner = "myteam"
- name = "terraform-code"
-}
-```
-
-## Argument Reference
-
-The following arguments are supported:
-
-* `owner` - (Required) The owner of this repository. Can be you or any team you
- have write access to.
-* `name` - (Optional) The name of the repository.
-* `scm` - (Optional) What SCM you want to use. Valid options are hg or git.
- Defaults to git.
-* `is_private` - (Optional) If this should be private or not. Defaults to `true`.
-* `website` - (Optional) URL of website associated with this repository.
-* `language` - (Optional) What the language of this repository should be.
-* `has_issues` - (Optional) If this should have issues turned on or not.
-* `has_wiki` - (Optional) If this should have wiki turned on or not.
-* `project_key` - (Optional) If you want to have this repo associated with a
- project.
-* `fork_policy` - (Optional) What the fork policy should be. Defaults to
- allow_forks.
-* `description` - (Optional) What the description of the repo is.
-
-## Computed Arguments
-
-The following arguments are computed. You can access both `clone_ssh` and
-`clone_https` for getting a clone URL.
-
-## Import
-
-Repositories can be imported using the `name`, e.g.
-
-```
-$ terraform import bitbucket_repository.my-repo my-repo
-```
diff --git a/website/source/docs/providers/chef/index.html.markdown b/website/source/docs/providers/chef/index.html.markdown
deleted file mode 100644
index 8edc2d4b4..000000000
--- a/website/source/docs/providers/chef/index.html.markdown
+++ /dev/null
@@ -1,61 +0,0 @@
----
-layout: "chef"
-page_title: "Provider: Chef"
-sidebar_current: "docs-chef-index"
-description: |-
- Chef is a systems and cloud infrastructure automation framework.
----
-
-# Chef Provider
-
-[Chef](https://www.chef.io/) is a systems and cloud infrastructure automation
-framework. The Chef provider allows Terraform to manage various resources
-that exist within [Chef Server](http://docs.chef.io/chef_server.html).
-
-Use the navigation to the left to read about the available resources.
-
-## Example Usage
-
-```hcl
-# Configure the Chef provider
-provider "chef" {
- server_url = "https://api.chef.io/organizations/example/"
-
- # You can set up a "Client" within the Chef Server management console.
- client_name = "terraform"
- key_material = "${file("chef-terraform.pem")}"
-}
-
-# Create a Chef Environment
-resource "chef_environment" "production" {
- name = "production"
-}
-
-# Create a Chef Role
-resource "chef_role" "app_server" {
- name = "app_server"
-
- run_list = [
- "recipe[terraform]",
- ]
-}
-```
-
-## Argument Reference
-
-The following arguments are supported:
-
-* `server_url` - (Required) The HTTP(S) API URL of the Chef server to use. If
- the target Chef server supports organizations, use the full URL of the
- organization you wish to configure. May be provided instead via the
- ``CHEF_SERVER_URL`` environment variable.
-* `client_name` - (Required) The name of the client account to use when making
- requests. This must have been already configured on the Chef server.
- May be provided instead via the ``CHEF_CLIENT_NAME`` environment variable.
-* `key_material` - (Required) The PEM-formatted private key contents belonging to
- the configured client. This is issued by the server when a new client object
- is created. May be provided via the
- ``CHEF_PRIVATE_KEY_FILE`` environment variable.
-* `allow_unverified_ssl` - (Optional) Boolean indicating whether to make
- requests to a Chef server whose SSL certicate cannot be verified. Defaults
- to ``false``.
diff --git a/website/source/docs/providers/chef/r/data_bag.html.markdown b/website/source/docs/providers/chef/r/data_bag.html.markdown
deleted file mode 100644
index 75168632c..000000000
--- a/website/source/docs/providers/chef/r/data_bag.html.markdown
+++ /dev/null
@@ -1,38 +0,0 @@
----
-layout: "chef"
-page_title: "Chef: chef_data_bag"
-sidebar_current: "docs-chef-resource-data-bag"
-description: |-
- Creates and manages a data bag in Chef Server.
----
-
-# chef_data_bag
-
-A [data bag](http://docs.chef.io/data_bags.html) is a collection of
-configuration objects that are stored as JSON in Chef Server and can be
-retrieved and used in Chef recipes.
-
-This resource creates the data bag itself. Inside each data bag is a collection
-of items which can be created using the ``chef_data_bag_item`` resource.
-
-## Example Usage
-
-```hcl
-resource "chef_data_bag" "example" {
- name = "example-data-bag"
-}
-```
-
-## Argument Reference
-
-The following arguments are supported:
-
-* `name` - (Required) The unique name to assign to the data bag. This is the
- name that other server clients will use to find and retrieve data from the
- data bag.
-
-## Attributes Reference
-
-The following attributes are exported:
-
-* `api_url` - The URL representing this data bag in the Chef server API.
diff --git a/website/source/docs/providers/chef/r/data_bag_item.html.markdown b/website/source/docs/providers/chef/r/data_bag_item.html.markdown
deleted file mode 100644
index 7f36e737b..000000000
--- a/website/source/docs/providers/chef/r/data_bag_item.html.markdown
+++ /dev/null
@@ -1,49 +0,0 @@
----
-layout: "chef"
-page_title: "Chef: chef_data_bag_item"
-sidebar_current: "docs-chef-resource-data-bag-item"
-description: |-
- Creates and manages an object within a data bag in Chef Server.
----
-
-# chef_data_bag_item
-
-A [data bag](http://docs.chef.io/data_bags.html) is a collection of
-configuration objects that are stored as JSON in Chef Server and can be
-retrieved and used in Chef recipes.
-
-This resource creates objects within an existing data bag. To create the
-data bag itself, use the ``chef_data_bag`` resource.
-
-## Example Usage
-
-```hcl
-resource "chef_data_bag_item" "example" {
- data_bag_name = "example-data-bag"
-
- content_json = < **NOTE regarding `cirocnus_collector`:** The `circonus_collector` data source
-actually queries and operates on Circonus "brokers" at the broker group level.
-The `circonus_collector` is simply a renamed Circonus "broker" to make it clear
-what the function of the "broker" actually does: act as a fan-in agent that
-either pulls or has metrics pushed into it and funneled back through Circonus.
-
-## Example Usage
-
-The following example shows how the resource might be used to obtain
-the name of the Circonus Collector configured on the provider.
-
-```hcl
-data "circonus_collector" "ashburn" {
- id = "/broker/1"
-}
-```
-
-## Argument Reference
-
-The arguments of this data source act as filters for querying the available
-regions. The given filters must match exactly one region whose data will be
-exported as attributes.
-
-* `id` - (Optional) The Circonus ID of a given collector.
-
-At least one of the above attributes should be provided when searching for a
-collector.
-
-## Attributes Reference
-
-The following attributes are exported:
-
-* `id` - The Circonus ID of the selected Collector.
-
-* `details` - A list of details about the individual Collector instances that
- make up the group of collectors. See below for a list of attributes within
- each collector.
-
-* `latitude` - The latitude of the selected Collector.
-
-* `longitude` - The longitude of the selected Collector.
-
-* `name` - The name of the selected Collector.
-
-* `tags` - A list of tags assigned to the selected Collector.
-
-* `type` - The of the selected Collector. This value is either `circonus` for a
- Circonus-managed, public Collector, or `enterprise` for a private collector that is
- private to an account.
-
-## Collector Details
-
-* `cn` - The CN of an individual Collector in the Collector Group.
-
-* `external_host` - The external host information for an individual Collector in
- the Collector Group. This is useful or important when talking with a Collector
- through a NAT'ing firewall.
-
-* `external_port` - The external port number for an individual Collector in the
- Collector Group. This is useful or important when talking with a Collector through
- a NAT'ing firewall.
-
-* `ip` - The IP address of an individual Collector in the Collector Group. This is
- the IP address of the interface listening on the network.
-
-* `min_version` - ??
-
-* `modules` - A list of what modules (types of checks) this collector supports.
-
-* `port` - The port the collector responds to the Circonus HTTPS REST wire protocol
- on.
-
-* `skew` - The clock drift between this collector and the Circonus server.
-
-* `status` - The status of this particular collector. A string containing either
- `active`, `unprovisioned`, `pending`, `provisioned`, or `retired`.
-
-* `version` - The version of the collector software the collector is running.
diff --git a/website/source/docs/providers/circonus/index.html.markdown b/website/source/docs/providers/circonus/index.html.markdown
deleted file mode 100644
index 8f2a1716e..000000000
--- a/website/source/docs/providers/circonus/index.html.markdown
+++ /dev/null
@@ -1,28 +0,0 @@
----
-layout: "circonus"
-page_title: "Provider: Circonus"
-sidebar_current: "docs-circonus-index"
-description: |-
- A provider for Circonus.
----
-
-# Circonus Provider
-
-The Circonus provider gives the ability to manage a Circonus account.
-
-Use the navigation to the left to read about the available resources.
-
-## Usage
-
-```hcl
-provider "circonus" {
- key = "b8fec159-f9e5-4fe6-ad2c-dc1ec6751586"
-}
-```
-
-## Argument Reference
-
-The following arguments are supported:
-
-* `key` - (Required) The Circonus API Key.
-* `api_url` - (Optional) The API URL to use to talk with. The default is `https://api.circonus.com/v2`.
diff --git a/website/source/docs/providers/circonus/r/check.html.markdown b/website/source/docs/providers/circonus/r/check.html.markdown
deleted file mode 100644
index 41935da89..000000000
--- a/website/source/docs/providers/circonus/r/check.html.markdown
+++ /dev/null
@@ -1,686 +0,0 @@
----
-layout: "circonus"
-page_title: "Circonus: circonus_check"
-sidebar_current: "docs-circonus-resource-circonus_check"
-description: |-
- Manages a Circonus check.
----
-
-# circonus\_check
-
-The ``circonus_check`` resource creates and manages a
-[Circonus Check](https://login.circonus.com/resources/api/calls/check_bundle).
-
-~> **NOTE regarding `cirocnus_check` vs a Circonus Check Bundle:** The
-`circonus_check` resource is implemented in terms of a
-[Circonus Check Bundle](https://login.circonus.com/resources/api/calls/check_bundle).
-The `circonus_check` creates a higher-level abstraction over the implementation
-of a Check Bundle. As such, the naming and structure does not map 1:1 with the
-underlying Circonus API.
-
-## Usage
-
-```hcl
-variable api_token {
- default = "my-token"
-}
-
-resource "circonus_check" "usage" {
- name = "Circonus Usage Check"
-
- notes = <<-EOF
-A check to extract a usage metric.
-EOF
-
- collector {
- id = "/broker/1"
- }
-
- metric {
- name = "${circonus_metric.used.name}"
- tags = "${circonus_metric.used.tags}"
- type = "${circonus_metric.used.type}"
- unit = "${circonus_metric.used.unit}"
- }
-
- json {
- url = "https://api.circonus.com/v2"
-
- http_headers = {
- Accept = "application/json"
- X-Circonus-App-Name = "TerraformCheck"
- X-Circonus-Auth-Token = "${var.api_token}"
- }
- }
-
- period = 60
- tags = ["source:circonus", "author:terraform"]
- timeout = 10
-}
-
-resource "circonus_metric" "used" {
- name = "_usage`0`_used"
- type = "numeric"
- unit = "qty"
-
- tags = {
- source = "circonus"
- }
-}
-```
-
-## Argument Reference
-
-* `active` - (Optional) Whether or not the check is enabled or not (default
- `true`).
-
-* `caql` - (Optional) A [Circonus Analytics Query Language
- (CAQL)](https://login.circonus.com/user/docs/CAQL) check. See below for
- details on how to configure a `caql` check.
-
-* `cloudwatch` - (Optional) A [CloudWatch
- check](https://login.circonus.com/user/docs/Data/CheckTypes/CloudWatch) check.
- See below for details on how to configure a `cloudwatch` check.
-
-* `collector` - (Required) A collector ID. The collector(s) that are
- responsible for running a `circonus_check`. The `id` can be the Circonus ID
- for a Circonus collector (a.k.a. "broker") running in the cloud or an
- enterprise collector running in your datacenter. One collection of metrics
- will be automatically created for each `collector` specified.
-
-* `consul` - (Optional) A native Consul check. See below for details on how to
- configure a `consul` check.
-
-* `http` - (Optional) A poll-based HTTP check. See below for details on how to configure
- the `http` check.
-
-* `httptrap` - (Optional) An push-based HTTP check. This check method expects
- clients to send a specially crafted HTTP JSON payload. See below for details
- on how to configure the `httptrap` check.
-
-* `icmp_ping` - (Optional) An ICMP ping check. See below for details on how to
- configure the `icmp_ping` check.
-
-* `json` - (Optional) A JSON check. See below for details on how to configure
- the `json` check.
-
-* `metric` - (Required) A list of one or more `metric` configurations. All
- metrics obtained from this check instance will be available as individual
- metric streams. See below for a list of supported `metric` attrbutes.
-
-* `metric_limit` - (Optional) Setting a metric limit will tell the Circonus
- backend to periodically look at the check to see if there are additional
- metrics the collector has seen that we should collect. It will not reactivate
- metrics previously collected and then marked as inactive. Values are `0` to
- disable, `-1` to enable all metrics or `N+` to collect up to the value `N`
- (both `-1` and `N+` can not exceed other account restrictions).
-
-* `mysql` - (Optional) A MySQL check. See below for details on how to configure
- the `mysql` check.
-
-* `name` - (Optional) The name of the check that will be displayed in the web
- interface.
-
-* `notes` - (Optional) Notes about this check.
-
-* `period` - (Optional) The period between each time the check is made in
- seconds.
-
-* `postgresql` - (Optional) A PostgreSQL check. See below for details on how to
- configure the `postgresql` check.
-
-* `statsd` - (Optional) A statsd check. See below for details on how to
- configure the `statsd` check.
-
-* `tags` - (Optional) A list of tags assigned to this check.
-
-* `target` - (Required) A string containing the location of the thing being
- checked. This value changes based on the check type. For example, for an
- `http` check type this would be the URL you're checking. For a DNS check it
- would be the hostname you wanted to look up.
-
-* `tcp` - (Optional) A TCP check. See below for details on how to configure the
- `tcp` check (includes TLS support).
-
-* `timeout` - (Optional) A floating point number representing the maximum number
- of seconds this check should wait for a result. Defaults to `10.0`.
-
-## Supported `metric` Attributes
-
-The following attributes are available within a `metric`.
-
-* `active` - (Optional) Whether or not the metric is active or not. Defaults to `true`.
-* `name` - (Optional) The name of the metric. A string containing freeform text.
-* `tags` - (Optional) A list of tags assigned to the metric.
-* `type` - (Required) A string containing either `numeric`, `text`, `histogram`, `composite`, or `caql`.
-* `units` - (Optional) The unit of measurement the metric represents (e.g., bytes, seconds, milliseconds). A string containing freeform text.
-
-## Supported Check Types
-
-Circonus supports a variety of different checks. Each check type has its own
-set of options that must be configured. Each check type conflicts with every
-other check type (i.e. a `circonus_check` configured for a `json` check will
-conflict with all other check types, therefore a `postgresql` check must be a
-different `circonus_check` resource).
-
-### `caql` Check Type Attributes
-
-* `query` - (Required) The [CAQL
- Query](https://login.circonus.com/user/docs/caql_reference) to run.
-
-Available metrics depend on the payload returned in the `caql` check. See the
-[`caql` check type](https://login.circonus.com/resources/api/calls/check_bundle) for
-additional details.
-
-### `cloudwatch` Check Type Attributes
-
-* `api_key` - (Required) The AWS access key. If this value is not explicitly
- set, this value is populated by the environment variable `AWS_ACCESS_KEY_ID`.
-
-* `api_secret` - (Required) The AWS secret key. If this value is not explicitly
- set, this value is populated by the environment variable `AWS_SECRET_ACCESS_KEY`.
-
-* `dimmensions` - (Required) A map of the CloudWatch dimmensions to include in
- the check.
-
-* `metric` - (Required) A list of metric names to collect in this check.
-
-* `namespace` - (Required) The namespace to pull parameters from.
-
-* `url` - (Required) The AWS URL to pull from. This should be set to the
- region-specific endpoint (e.g. prefer
- `https://monitoring.us-east-1.amazonaws.com` over
- `https://monitoring.amazonaws.com`).
-
-* `version` - (Optional) The version of the Cloudwatch API to use. Defaults to
- `2010-08-01`.
-
-Available metrics depend on the payload returned in the `cloudwatch` check. See the
-[`cloudwatch` check type](https://login.circonus.com/resources/api/calls/check_bundle) for
-additional details. The `circonus_check` `period` attribute must be set to
-either `60s` or `300s` for CloudWatch metrics.
-
-Example CloudWatch check (partial metrics collection):
-
-```hcl
-variable "cloudwatch_rds_tags" {
- type = "list"
- default = [
- "app:postgresql",
- "app:rds",
- "source:cloudwatch",
- ]
-}
-
-resource "circonus_check" "rds_metrics" {
- active = true
- name = "Terraform test: RDS Metrics via CloudWatch"
- notes = "Collect RDS metrics"
- period = "60s"
-
- collector {
- id = "/broker/1"
- }
-
- cloudwatch {
- dimmensions = {
- DBInstanceIdentifier = "my-db-name",
- }
-
- metric = [
- "CPUUtilization",
- "DatabaseConnections",
- ]
-
- namespace = "AWS/RDS"
- url = "https://monitoring.us-east-1.amazonaws.com"
- }
-
- metric {
- name = "CPUUtilization"
- tags = [ "${var.cloudwatch_rds_tags}" ]
- type = "numeric"
- unit = "%"
- }
-
- metric {
- name = "DatabaseConnections"
- tags = [ "${var.cloudwatch_rds_tags}" ]
- type = "numeric"
- unit = "connections"
- }
-}
-```
-
-### `consul` Check Type Attributes
-
-* `acl_token` - (Optional) An ACL Token authenticate the API request. When an
- ACL Token is set, this value is transmitted as an HTTP Header in order to not
- show up in any logs. The default value is an empty string.
-
-* `allow_stale` - (Optional) A boolean value that indicates whether or not this
- check should require the health information come from the Consul leader node.
- For scalability reasons, this value defaults to `false`. See below for
- details on detecting the staleness of health information.
-
-* `ca_chain` - (Optional) A path to a file containing all the certificate
- authorities that should be loaded to validate the remote certificate (required
- when `http_addr` is a TLS-enabled endpoint).
-
-* `certificate_file` - (Optional) A path to a file containing the client
- certificate that will be presented to the remote server (required when
- `http_addr` is a TLS-enabled endpoint).
-
-* `check_blacklist` - (Optional) A list of check names to exclude from the
- result of checks (i.e. no metrics will be generated by whose check name is in
- the `check_blacklist`). This blacklist is applied to the `node`,
- `service`, and `state` check modes.
-
-* `ciphers` - (Optional) A list of ciphers to be used in the TLS protocol
- (only used when `http_addr` is a TLS-enabled endpoint).
-
-* `dc` - (Optional) Explicitly name the Consul datacenter to use. The default
- value is an empty string. When an empty value is specified, the Consul
- datacenter of the agent at the `http_addr` is implicitly used.
-
-* `headers` - (Optional) A map of the HTTP headers to be sent when executing the
- check. NOTE: the `headers` attribute is processed last and will takes
- precidence over any other derived value that is transmitted as an HTTP header
- to Consul (i.e. it is possible to override the `acl_token` by setting a
- headers value).
-
-* `http_addr` - (Optional) The Consul HTTP endpoint to to query for health
- information. The default value is `http://consul.service.consul:8500`. The
- scheme must change from `http` to `https` when the endpoint has been
- TLS-enabled.
-
-* `key_file` - (Optional) A path to a file containing key to be used in
- conjunction with the cilent certificate (required when `http_addr` is a
- TLS-enabled endpoint).
-
-* `node` - (Optional) Check the health of this node. The value can be either a
- Consul Node ID (Consul Version >= 0.7.4) or Node Name. See also the
- `service_blacklist`, `node_blacklist`, and `check_blacklist` attributes. This
- attribute conflicts with the `service` and `state` attributes.
-
-* `node_blacklist` - (Optional) A list of node IDs or node names to exclude from
- the results of checks (i.e. no metrics will be generated from nodes in the
- `node_blacklist`). This blacklist is applied to the `node`, `service`, and
- `state` check modes.
-
-* `service` - (Optional) Check the cluster-wide health of this named service.
- See also the `service_blacklist`, `node_blacklist`, and `check_blacklist`
- attributes. This attribute conflicts with the `node` and `state` attributes.
-
-* `service_blacklist` - (Optional) A list of service names to exclude from the
- result of checks (i.e. no metrics will be generated by services whose service
- name is in the `service_blacklist`). This blacklist is applied to the `node`,
- `service`, and `state` check modes.
-
-* `state` - (Optional) A Circonus check to monitor Consul checks across the
- entire Consul cluster. This value may be either `passing`, `warning`, or
- `critical`. This `consul` check mode is intended to act as the cluster check
- of last resort. This check type is useful when first starting and is intended
- to act as a check of last resort before transitioning to explicitly defined
- checks for individual services or nodes. The metrics returned from check will
- be sorted based on the `CreateIndex` of the entry in order to have a stable
- set of metrics in the array of returned values. See also the
- `service_blacklist`, `node_blacklist`, and `check_blacklist` attributes. This
- attribute conflicts with the `node` and `state` attributes.
-
-Available metrics depend on the consul check being performed (`node`, `service`,
-or `state`). In addition to the data avilable from the endpoints, the `consul`
-check also returns a set of metrics that are a variant of:
-`{Num,Pct}{,Passing,Warning,Critical}{Checks,Nodes,Services}` (see the
-`GLOB_BRACE` section of your local `glob(3)` documentation).
-
-Example Consul check (partial metrics collection):
-
-```hcl
-resource "circonus_check" "consul_server" {
- active = true
- name = "%s"
- period = "60s"
-
- collector {
- # Collector ID must be an Enterprise broker able to reach the Consul agent
- # listed in `http_addr`.
- id = "/broker/2110"
- }
-
- consul {
- service = "consul"
-
- # Other consul check modes:
- # node = "consul1"
- # state = "critical"
- }
-
- metric {
- name = "NumNodes"
- tags = [ "source:consul", "lifecycle:unittest" ]
- type = "numeric"
- }
-
- metric {
- name = "LastContact"
- tags = [ "source:consul", "lifecycle:unittest" ]
- type = "numeric"
- unit = "seconds"
- }
-
- metric {
- name = "Index"
- tags = [ "source:consul", "lifecycle:unittest" ]
- type = "numeric"
- unit = "transactions"
- }
-
- metric {
- name = "KnownLeader"
- tags = [ "source:consul", "lifecycle:unittest" ]
- type = "text"
- }
-
- tags = [ "source:consul", "lifecycle:unittest" ]
-}
-```
-
-### `http` Check Type Attributes
-
-* `auth_method` - (Optional) HTTP Authentication method to use. When set must
- be one of the values `Basic`, `Digest`, or `Auto`.
-
-* `auth_password` - (Optional) The password to use during authentication.
-
-* `auth_user` - (Optional) The user to authenticate as.
-
-* `body_regexp` - (Optional) This regular expression is matched against the body
- of the response. If a match is not found, the check will be marked as "bad."
-
-* `ca_chain` - (Optional) A path to a file containing all the certificate
- authorities that should be loaded to validate the remote certificate (for TLS
- checks).
-
-* `certificate_file` - (Optional) A path to a file containing the client
- certificate that will be presented to the remote server (for TLS checks).
-
-* `ciphers` - (Optional) A list of ciphers to be used in the TLS protocol (for
- HTTPS checks).
-
-* `code` - (Optional) The HTTP code that is expected. If the code received does
- not match this regular expression, the check is marked as "bad."
-
-* `extract` - (Optional) This regular expression is matched against the body of
- the response globally. The first capturing match is the key and the second
- capturing match is the value. Each key/value extracted is registered as a
- metric for the check.
-
-* `headers` - (Optional) A map of the HTTP headers to be sent when executing the
- check.
-
-* `key_file` - (Optional) A path to a file containing key to be used in
- conjunction with the cilent certificate (for TLS checks).
-
-* `method` - (Optional) The HTTP Method to use. Defaults to `GET`.
-
-* `payload` - (Optional) The information transferred as the payload of an HTTP
- request.
-
-* `read_limit` - (Optional) Sets an approximate limit on the data read (`0`
- means no limit). Default `0`.
-
-* `redirects` - (Optional) The maximum number of HTTP `Location` header
- redirects to follow. Default `0`.
-
-* `url` - (Required) The target for this `json` check. The `url` must include
- the scheme, host, port (optional), and path to use
- (e.g. `https://app1.example.org/healthz`)
-
-* `version` - (Optional) The HTTP version to use. Defaults to `1.1`.
-
-Available metrics include: `body_match`, `bytes`, `cert_end`, `cert_end_in`,
-`cert_error`, `cert_issuer`, `cert_start`, `cert_subject`, `code`, `duration`,
-`truncated`, `tt_connect`, and `tt_firstbyte`. See the
-[`http` check type](https://login.circonus.com/resources/api/calls/check_bundle) for
-additional details.
-
-### `httptrap` Check Type Attributes
-
-* `async_metrics` - (Optional) Boolean value specifies whether or not httptrap
- metrics are logged immediately or held until the status message is to be
- emitted. Default `false`.
-
-* `secret` - (Optional) Specify the secret with which metrics may be
- submitted.
-
-Available metrics depend on the payload returned in the `httptrap` doc. See
-the [`httptrap` check type](https://login.circonus.com/resources/api/calls/check_bundle)
-for additional details.
-
-### `json` Check Type Attributes
-
-* `auth_method` - (Optional) HTTP Authentication method to use. When set must
- be one of the values `Basic`, `Digest`, or `Auto`.
-
-* `auth_password` - (Optional) The password to use during authentication.
-
-* `auth_user` - (Optional) The user to authenticate as.
-
-* `ca_chain` - (Optional) A path to a file containing all the certificate
- authorities that should be loaded to validate the remote certificate (for TLS
- checks).
-
-* `certificate_file` - (Optional) A path to a file containing the client
- certificate that will be presented to the remote server (for TLS checks).
-
-* `ciphers` - (Optional) A list of ciphers to be used in the TLS protocol (for
- HTTPS checks).
-
-* `headers` - (Optional) A map of the HTTP headers to be sent when executing the
- check.
-
-* `key_file` - (Optional) A path to a file containing key to be used in
- conjunction with the cilent certificate (for TLS checks).
-
-* `method` - (Optional) The HTTP Method to use. Defaults to `GET`.
-
-* `port` - (Optional) The TCP Port number to use. Defaults to `81`.
-
-* `read_limit` - (Optional) Sets an approximate limit on the data read (`0`
- means no limit). Default `0`.
-
-* `redirects` - (Optional) The maximum number of HTTP `Location` header
- redirects to follow. Default `0`.
-
-* `url` - (Required) The target for this `json` check. The `url` must include
- the scheme, host, port (optional), and path to use
- (e.g. `https://app1.example.org/healthz`)
-
-* `version` - (Optional) The HTTP version to use. Defaults to `1.1`.
-
-Available metrics depend on the payload returned in the `json` doc. See the
-[`json` check type](https://login.circonus.com/resources/api/calls/check_bundle) for
-additional details.
-
-### `icmp_ping` Check Type Attributes
-
-The `icmp_ping` check requires the `target` top-level attribute to be set.
-
-* `availability` - (Optional) The percentage of ping packets that must be
- returned for this measurement to be considered successful. Defaults to
- `100.0`.
-* `count` - (Optional) The number of ICMP ping packets to send. Defaults to
- `5`.
-* `interval` - (Optional) Interval between packets. Defaults to `2s`.
-
-Available metrics include: `available`, `average`, `count`, `maximum`, and
-`minimum`. See the
-[`ping_icmp` check type](https://login.circonus.com/resources/api/calls/check_bundle)
-for additional details.
-
-### `mysql` Check Type Attributes
-
-The `mysql` check requires the `target` top-level attribute to be set.
-
-* `dsn` - (Required) The [MySQL DSN/connect
- string](https://github.com/go-sql-driver/mysql/blob/master/README.md) to
- use to talk to MySQL.
-* `query` - (Required) The SQL query to execute.
-
-### `postgresql` Check Type Attributes
-
-The `postgresql` check requires the `target` top-level attribute to be set.
-
-* `dsn` - (Required) The [PostgreSQL DSN/connect
- string](https://www.postgresql.org/docs/current/static/libpq-connect.html) to
- use to talk to PostgreSQL.
-* `query` - (Required) The SQL query to execute.
-
-Available metric names are dependent on the output of the `query` being run.
-
-### `statsd` Check Type Attributes
-
-* `source_ip` - (Required) Any statsd messages from this IP address (IPv4 or
- IPv6) will be associated with this check.
-
-Available metrics depend on the metrics sent to the `statsd` check.
-
-### `tcp` Check Type Attributes
-
-* `banner_regexp` - (Optional) This regular expression is matched against the
- response banner. If a match is not found, the check will be marked as bad.
-
-* `ca_chain` - (Optional) A path to a file containing all the certificate
- authorities that should be loaded to validate the remote certificate (for TLS
- checks).
-
-* `certificate_file` - (Optional) A path to a file containing the client
- certificate that will be presented to the remote server (for TLS checks).
-
-* `ciphers` - (Optional) A list of ciphers to be used in the TLS protocol (for
- HTTPS checks).
-
-* `host` - (Required) Hostname or IP address of the host to connect to.
-
-* `key_file` - (Optional) A path to a file containing key to be used in
- conjunction with the cilent certificate (for TLS checks).
-
-* `port` - (Required) Integer specifying the port on which the management
- interface can be reached.
-
-* `tls` - (Optional) When enabled establish a TLS connection.
-
-Available metrics include: `banner`, `banner_match`, `cert_end`, `cert_end_in`,
-`cert_error`, `cert_issuer`, `cert_start`, `cert_subject`, `duration`,
-`tt_connect`, `tt_firstbyte`. See the
-[`tcp` check type](https://login.circonus.com/resources/api/calls/check_bundle)
-for additional details.
-
-Sample `tcp` check:
-
-```hcl
-resource "circonus_check" "tcp_check" {
- name = "TCP and TLS check"
- notes = "Obtains the connect time and TTL for the TLS cert"
- period = "60s"
-
- collector {
- id = "/broker/1"
- }
-
- tcp {
- host = "127.0.0.1"
- port = 443
- tls = true
- }
-
- metric {
- name = "cert_end_in"
- tags = [ "${var.tcp_check_tags}" ]
- type = "numeric"
- unit = "seconds"
- }
-
- metric {
- name = "tt_connect"
- tags = [ "${var.tcp_check_tags}" ]
- type = "numeric"
- unit = "miliseconds"
- }
-
- tags = [ "${var.tcp_check_tags}" ]
-}
-```
-
-## Out Parameters
-
-* `check_by_collector` - Maps the ID of the collector (`collector_id`, the map
- key) to the `check_id` (value) that is registered to a collector.
-
-* `check_id` - If there is only one `collector` specified for the check, this
- value will be populated with the `check_id`. If more than one `collector` is
- specified in the check, then this value will be an empty string.
- `check_by_collector` will always be populated.
-
-* `checks` - List of `check_id`s created by this `circonus_check`. There is one
- element in this list per collector specified in the check.
-
-* `created` - UNIX time at which this check was created.
-
-* `last_modified` - UNIX time at which this check was last modified.
-
-* `last_modified_by` - User ID in Circonus who modified this check last.
-
-* `reverse_connect_urls` - Only relevant to Circonus support.
-
-* `uuids` - List of Check `uuid`s created by this `circonus_check`. There is
- one element in this list per collector specified in the check.
-
-## Import Example
-
-`circonus_check` supports importing resources. Supposing the following
-Terraform (and that the referenced [`circonus_metric`](metric.html) has already
-been imported):
-
-```hcl
-provider "circonus" {
- alias = "b8fec159-f9e5-4fe6-ad2c-dc1ec6751586"
-}
-
-resource "circonus_metric" "used" {
- name = "_usage`0`_used"
- type = "numeric"
-}
-
-resource "circonus_check" "usage" {
- collector {
- id = "/broker/1"
- }
-
- json {
- url = "https://api.circonus.com/account/current"
-
- http_headers = {
- "Accept" = "application/json"
- "X-Circonus-App-Name" = "TerraformCheck"
- "X-Circonus-Auth-Token" = "${var.api_token}"
- }
- }
-
- metric {
- name = "${circonus_metric.used.name}"
- type = "${circonus_metric.used.type}"
- }
-}
-```
-
-It is possible to import a `circonus_check` resource with the following command:
-
-```
-$ terraform import circonus_check.usage ID
-```
-
-Where `ID` is the `_cid` or Circonus ID of the Check Bundle
-(e.g. `/check_bundle/12345`) and `circonus_check.usage` is the name of the
-resource whose state will be populated as a result of the command.
diff --git a/website/source/docs/providers/circonus/r/contact_group.html.markdown b/website/source/docs/providers/circonus/r/contact_group.html.markdown
deleted file mode 100644
index a12c599e5..000000000
--- a/website/source/docs/providers/circonus/r/contact_group.html.markdown
+++ /dev/null
@@ -1,289 +0,0 @@
----
-layout: "circonus"
-page_title: "Circonus: circonus_contact_group"
-sidebar_current: "docs-circonus-resource-circonus_contact_group"
-description: |-
- Manages a Circonus Contact Group.
----
-
-# circonus\_contact_group
-
-The ``circonus_contact_group`` resource creates and manages a
-[Circonus Contact Group](https://login.circonus.com/user/docs/Alerting/ContactGroups).
-
-
-## Usage
-
-```hcl
-resource "circonus_contact_group" "myteam-alerts" {
- name = "MyTeam Alerts"
-
- email {
- user = "/user/1234"
- }
-
- email {
- user = "/user/5678"
- }
-
- email {
- address = "user@example.com"
- }
-
- http {
- address = "https://www.example.org/post/endpoint"
- format = "json"
- method = "POST"
- }
-
- irc {
- user = "/user/6331"
- }
-
- slack {
- channel = "#myteam"
- team = "T038UT13D"
- }
-
- sms {
- user = "/user/1234"
- }
-
- sms {
- address = "8005551212"
- }
-
- victorops {
- api_key = "xxxx"
- critical = 2
- info = 5
- team = "myteam"
- warning = 3
- }
-
- xmpp {
- user = "/user/9876"
- }
-
- aggregation_window = "5m"
-
- alert_option {
- severity = 1
- reminder = "5m"
- escalate_to = "/contact_group/4444"
- }
-
- alert_option {
- severity = 2
- reminder = "15m"
- escalate_after = "2h"
- escalate_to = "/contact_group/4444"
- }
-
- alert_option {
- severity = 3
- reminder = "24m"
- escalate_after = "3d"
- escalate_to = "/contact_group/4444"
- }
-}
-```
-
-## Argument Reference
-
-* `aggregation_window` - (Optional) The aggregation window for batching up alert
- notifications.
-
-* `alert_option` - (Optional) There is one `alert_option` per severity, where
- severity can be any number between 1 (high) and 5 (low). If configured, the
- alerting system will remind or escalate alerts to further contact groups if an
- alert sent to this contact group is not acknowledged or resolved. See below
- for details.
-
-* `email` - (Optional) Zero or more `email` attributes may be present to
- dispatch email to Circonus users by referencing their user ID, or by
- specifying an email address. See below for details on supported attributes.
-
-* `http` - (Optional) Zero or more `http` attributes may be present to dispatch
- [Webhook/HTTP requests](https://login.circonus.com/user/docs/Alerting/ContactGroups#WebhookNotifications)
- by Circonus. See below for details on supported attributes.
-
-* `irc` - (Optional) Zero or more `irc` attributes may be present to dispatch
- IRC notifications to users. See below for details on supported attributes.
-
-* `long_message` - (Optional) The bulk of the message used in long form alert
- messages.
-
-* `long_subject` - (Optional) The subject used in long form alert messages.
-
-* `long_summary` - (Optional) The brief summary used in long form alert messages.
-
-* `name` - (Required) The name of the contact group.
-
-* `pager_duty` - (Optional) Zero or more `pager_duty` attributes may be present
- to dispatch to
- [Pager Duty teams](https://login.circonus.com/user/docs/Alerting/ContactGroups#PagerDutyOptions).
- See below for details on supported attributes.
-
-* `short_message` - (Optional) The subject used in short form alert messages.
-
-* `short_summary` - (Optional) The brief summary used in short form alert
- messages.
-
-* `slack` - (Optional) Zero or more `slack` attributes may be present to
- dispatch to Slack teams. See below for details on supported attributes.
-
-* `sms` - (Optional) Zero or more `sms` attributes may be present to dispatch
- SMS messages to Circonus users by referencing their user ID, or by specifying
- an SMS Phone Number. See below for details on supported attributes.
-
-* `tags` - (Optional) A list of tags attached to the Contact Group.
-
-* `victorops` - (Optional) Zero or more `victorops` attributes may be present
- to dispatch to
- [VictorOps teams](https://login.circonus.com/user/docs/Alerting/ContactGroups#VictorOps).
- See below for details on supported attributes.
-
-## Supported Contact Group `alert_option` Attributes
-
-* `escalate_after` - (Optional) How long to wait before escalating an alert that
- is received at a given severity.
-
-* `escalate_to` - (Optional) The Contact Group ID who will receive the
- escalation.
-
-* `reminder` - (Optional) If specified, reminders will be sent after a user
- configurable number of minutes for open alerts.
-
-* `severity` - (Required) An `alert_option` must be assigned to a given severity
- level. Valid severity levels range from 1 (highest severity) to 5 (lowest
- severity).
-
-## Supported Contact Group `email` Attributes
-
-Either an `address` or `user` attribute is required.
-
-* `address` - (Optional) A well formed email address.
-
-* `user` - (Optional) An email will be sent to the email address of record for
- the corresponding user ID (e.g. `/user/1234`).
-
-A `user`'s email address is automatically maintained and kept up to date by the
-recipient, whereas an `address` provides no automatic layer of indirection for
-keeping the information accurate (including LDAP and SAML-based authentication
-mechanisms).
-
-## Supported Contact Group `http` Attributes
-
-* `address` - (Required) URL to send a webhook request to.
-
-* `format` - (Optional) The payload of the request is a JSON-encoded payload
- when the `format` is set to `json` (the default). The alternate payload
- encoding is `params`.
-
-* `method` - (Optional) The HTTP verb to use when making a request. Either
- `GET` or `POST` may be specified. The default verb is `POST`.
-
-## Supported Contact Group `irc` Attributes
-
-* `user` - (Required) When a user has configured IRC on their user account, they
- will receive an IRC notification.
-
-## Supported Contact Group `pager_duty` Attributes
-
-* `contact_group_fallback` - (Optional) If there is a problem contacting
- PagerDuty, relay the notification automatically to the specified Contact Group
- (e.g. `/contact_group/1234`).
-
-* `service_key` - (Required) The PagerDuty Service Key.
-
-* `webhook_url` - (Required) The PagerDuty webhook URL that PagerDuty uses to
- notify Circonus of acknowledged actions.
-
-## Supported Contact Group `slack` Attributes
-
-* `contact_group_fallback` - (Optional) If there is a problem contacting Slack,
- relay the notification automatically to the specified Contact Group
- (e.g. `/contact_group/1234`).
-
-* `buttons` - (Optional) Slack notifications can have acknowledgement buttons
- built into the notification message itself when enabled. Defaults to `true`.
-
-* `channel` - (Required) Specify what Slack channel Circonus should send alerts
- to.
-
-* `team` - (Required) Specify what Slack team Circonus should look in for the
- aforementioned `channel`.
-
-* `username` - (Optional) Specify the username Circonus should advertise itself
- as in Slack. Defaults to `Circonus`.
-
-## Supported Contact Group `sms` Attributes
-
-Either an `address` or `user` attribute is required.
-
-* `address` - (Optional) SMS Phone Number to send a short notification to.
-
-* `user` - (Optional) An SMS page will be sent to the phone number of record for
- the corresponding user ID (e.g. `/user/1234`).
-
-A `user`'s phone number is automatically maintained and kept up to date by the
-recipient, whereas an `address` provides no automatic layer of indirection for
-keeping the information accurate (including LDAP and SAML-based authentication
-mechanisms).
-
-## Supported Contact Group `victorops` Attributes
-
-* `contact_group_fallback` - (Optional) If there is a problem contacting
- VictorOps, relay the notification automatically to the specified Contact Group
- (e.g. `/contact_group/1234`).
-
-* `api_key` - (Required) The API Key for talking with VictorOps.
-
-* `critical` - (Required)
-* `info` - (Required)
-* `team` - (Required)
-* `warning` - (Required)
-
-## Supported Contact Group `xmpp` Attributes
-
-Either an `address` or `user` attribute is required.
-
-* `address` - (Optional) XMPP address to send a short notification to.
-
-* `user` - (Optional) An XMPP notification will be sent to the XMPP address of
- record for the corresponding user ID (e.g. `/user/1234`).
-
-## Import Example
-
-`circonus_contact_group` supports importing resources. Supposing the following
-Terraform:
-
-```hcl
-provider "circonus" {
- alias = "b8fec159-f9e5-4fe6-ad2c-dc1ec6751586"
-}
-
-resource "circonus_contact_group" "myteam" {
- name = "My Team's Contact Group"
-
- email {
- address = "myteam@example.com"
- }
-
- slack {
- channel = "#myteam"
- team = "T024UT03C"
- }
-}
-```
-
-It is possible to import a `circonus_contact_group` resource with the following command:
-
-```
-$ terraform import circonus_contact_group.myteam ID
-```
-
-Where `ID` is the `_cid` or Circonus ID of the Contact Group
-(e.g. `/contact_group/12345`) and `circonus_contact_group.myteam` is the name of
-the resource whose state will be populated as a result of the command.
diff --git a/website/source/docs/providers/circonus/r/graph.html.markdown b/website/source/docs/providers/circonus/r/graph.html.markdown
deleted file mode 100644
index 47d2b5b80..000000000
--- a/website/source/docs/providers/circonus/r/graph.html.markdown
+++ /dev/null
@@ -1,179 +0,0 @@
----
-layout: "circonus"
-page_title: "Circonus: circonus_graph"
-sidebar_current: "docs-circonus-resource-circonus_graph"
-description: |-
- Manages a Circonus graph.
----
-
-# circonus\_graph
-
-The ``circonus_graph`` resource creates and manages a
-[Circonus Graph](https://login.circonus.com/user/docs/Visualization/Graph/Create).
-
-https://login.circonus.com/resources/api/calls/graph).
-
-## Usage
-
-```hcl
-variable "myapp-tags" {
- type = "list"
- default = [ "app:myapp", "owner:myteam" ]
-}
-
-resource "circonus_graph" "latency-graph" {
- name = "Latency Graph"
- description = "A sample graph showing off two data points"
- notes = "Misc notes about this graph"
- graph_style = "line"
- line_style = "stepped"
-
- metric {
- check = "${circonus_check.api_latency.checks[0]}"
- metric_name = "maximum"
- metric_type = "numeric"
- name = "Maximum Latency"
- axis = "left"
- color = "#657aa6"
- }
-
- metric {
- check = "${circonus_check.api_latency.checks[0]}"
- metric_name = "minimum"
- metric_type = "numeric"
- name = "Minimum Latency"
- axis = "right"
- color = "#0000ff"
- }
-
- tags = [ "${var.myapp-tags}" ]
-}
-```
-
-## Argument Reference
-
-* `description` - (Optional) Description of what the graph is for.
-
-* `graph_style` - (Optional) How the graph should be rendered. Valid options
- are `area` or `line` (default).
-
-* `left` - (Optional) A map of graph left axis options. Valid values in `left`
- include: `logarithmic` can be set to `0` (default) or `1`; `min` is the `min`
- Y axis value on the left; and `max` is the Y axis max value on the left.
-
-* `line_style` - (Optional) How the line should change between points. Can be
- either `stepped` (default) or `interpolated`.
-
-* `name` - (Required) The title of the graph.
-
-* `notes` - (Optional) A place for storing notes about this graph.
-
-* `right` - (Optional) A map of graph right axis options. Valid values in
- `right` include: `logarithmic` can be set to `0` (default) or `1`; `min` is
- the `min` Y axis value on the right; and `max` is the Y axis max value on the
- right.
-
-* `metric` - (Optional) A list of metric streams to graph. See below for
- options.
-
-* `metric_cluster` - (Optional) A metric cluster to graph. See below for options.
-
-* `tags` - (Optional) A list of tags assigned to this graph.
-
-## `metric` Configuration
-
-An individual metric stream is the underlying source of data points used for
-visualization in a graph. Either a `caql` attribute is required or a `check` and
-`metric` must be set. The `metric` attribute can have the following options
-set.
-
-* `active` - (Optional) A boolean if the metric stream is enabled or not.
-
-* `alpha` - (Optional) A floating point number between 0 and 1.
-
-* `axis` - (Optional) The axis that the metric stream will use. Valid options
- are `left` (default) or `right`.
-
-* `caql` - (Optional) A CAQL formula. Conflicts with the `check` and `metric`
- attributes.
-
-* `check` - (Optional) The check that this metric stream belongs to.
-
-* `color` - (Optional) A hex-encoded color of the line / area on the graph.
-
-* `formula` - (Optional) Formula that should be aplied to both the values in the
- graph and the legend.
-
-* `legend_formula` - (Optional) Formula that should be applied to values in the
- legend.
-
-* `function` - (Optional) What derivative value, if any, should be used. Valid
- values are: `gauge` (default), `derive`, and `counter (_stddev)`
-
-* `metric_type` - (Required) The type of the metric. Valid values are:
- `numeric`, `text`, `histogram`, `composite`, or `caql`.
-
-* `name` - (Optional) A name which will appear in the graph legend.
-
-* `metric_name` - (Optional) The name of the metric stream within the check to
- graph.
-
-* `stack` - (Optional) If this metric is to be stacked, which stack set does it
- belong to (starting at `0`).
-
-## `metric_cluster` Configuration
-
-A metric cluster selects multiple metric streams together dynamically using a
-query language and returns the set of matching metric streams as a single result
-set to the graph rendering engine.
-
-* `active` - (Optional) A boolean if the metric cluster is enabled or not.
-
-* `aggregate` - (Optional) The aggregate function to apply across this metric
- cluster to create a single value. Valid values are: `none` (default), `min`,
- `max`, `sum`, `mean`, or `geometric_mean`.
-
-* `axis` - (Optional) The axis that the metric cluster will use. Valid options
- are `left` (default) or `right`.
-
-* `color` - (Optional) A hex-encoded color of the line / area on the graph.
- This is a required attribute when `aggregate` is specified.
-
-* `group` - (Optional) The `metric_cluster` that will provide datapoints for this
- graph.
-
-* `name` - (Optional) A name which will appear in the graph legend for this
- metric cluster.
-
-## Import Example
-
-`circonus_graph` supports importing resources. Supposing the following
-Terraform (and that the referenced [`circonus_metric`](metric.html)
-and [`circonus_check`](check.html) have already been imported):
-
-```text
-resource "circonus_graph" "icmp-graph" {
- name = "Test graph"
- graph_style = "line"
- line_style = "stepped"
-
- metric {
- check = "${circonus_check.api_latency.checks[0]}"
- metric_name = "maximum"
- metric_type = "numeric"
- name = "Maximum Latency"
- axis = "left"
- }
-}
-```
-
-It is possible to import a `circonus_graph` resource with the following command:
-
-```
-$ terraform import circonus_graph.icmp-graph ID
-```
-
-Where `ID` is the `_cid` or Circonus ID of the graph
-(e.g. `/graph/bd72aabc-90b9-4039-cc30-c9ab838c18f5`) and
-`circonus_graph.icmp-graph` is the name of the resource whose state will be
-populated as a result of the command.
diff --git a/website/source/docs/providers/circonus/r/metric.html.markdown b/website/source/docs/providers/circonus/r/metric.html.markdown
deleted file mode 100644
index be279ca23..000000000
--- a/website/source/docs/providers/circonus/r/metric.html.markdown
+++ /dev/null
@@ -1,73 +0,0 @@
----
-layout: "circonus"
-page_title: "Circonus: circonus_metric"
-sidebar_current: "docs-circonus-resource-circonus_metric"
-description: |-
- Manages a Circonus metric.
----
-
-# circonus\_metric
-
-The ``circonus_metric`` resource creates and manages a
-single [metric resource](https://login.circonus.com/resources/api/calls/metric)
-that will be instantiated only once a referencing `circonus_check` has been
-created.
-
-## Usage
-
-```hcl
-resource "circonus_metric" "used" {
- name = "_usage`0`_used"
- type = "numeric"
- units = "qty"
-
- tags = {
- author = "terraform"
- source = "circonus"
- }
-}
-```
-
-## Argument Reference
-
-* `active` - (Optional) A boolean indicating if the metric is being filtered out
- at the `circonus_check`'s collector(s) or not.
-
-* `name` - (Required) The name of the metric. A `name` must be unique within a
- `circonus_check` and its meaning is `circonus_check.type` specific.
-
-* `tags` - (Optional) A list of tags assigned to the metric.
-
-* `type` - (Required) The type of metric. This value must be present and can be
- one of the following values: `numeric`, `text`, `histogram`, `composite`, or
- `caql`.
-
-* `unit` - (Optional) The unit of measurement for this `circonus_metric`.
-
-## Import Example
-
-`circonus_metric` supports importing resources. Supposing the following
-Terraform:
-
-```hcl
-provider "circonus" {
- alias = "b8fec159-f9e5-4fe6-ad2c-dc1ec6751586"
-}
-
-resource "circonus_metric" "usage" {
- name = "_usage`0`_used"
- type = "numeric"
- unit = "qty"
- tags = { source = "circonus" }
-}
-```
-
-It is possible to import a `circonus_metric` resource with the following command:
-
-```
-$ terraform import circonus_metric.usage ID
-```
-
-Where `ID` is a random, never before used UUID and `circonus_metric.usage` is
-the name of the resource whose state will be populated as a result of the
-command.
diff --git a/website/source/docs/providers/circonus/r/metric_cluster.html.markdown b/website/source/docs/providers/circonus/r/metric_cluster.html.markdown
deleted file mode 100644
index e6ca78fb4..000000000
--- a/website/source/docs/providers/circonus/r/metric_cluster.html.markdown
+++ /dev/null
@@ -1,86 +0,0 @@
----
-layout: "circonus"
-page_title: "Circonus: circonus_metric_cluster"
-sidebar_current: "docs-circonus-resource-circonus_metric_cluster"
-description: |-
- Manages a Circonus Metric Cluster.
----
-
-# circonus\_metric\_cluster
-
-The ``circonus_metric_cluster`` resource creates and manages a
-[Circonus Metric Cluster](https://login.circonus.com/user/docs/Data/View/MetricClusters).
-
-## Usage
-
-```hcl
-resource "circonus_metric_cluster" "nomad-job-memory-rss" {
- name = "My Job's Resident Memory"
- description = <<-EOF
-An aggregation of all resident memory metric streams across allocations in a Nomad job.
-EOF
-
- query {
- definition = "*`nomad-jobname`memory`rss"
- type = "average"
- }
- tags = ["source:nomad","resource:memory"]
-}
-```
-
-## Argument Reference
-
-* `description` - (Optional) A long-form description of the metric cluster.
-
-* `name` - (Required) The name of the metric cluster. This name must be unique
- across all metric clusters in a given Circonus Account.
-
-* `query` - (Required) One or more `query` attributes must be present. Each
- `query` must contain both a `definition` and a `type`. See below for details
- on supported attributes.
-
-* `tags` - (Optional) A list of tags attached to the metric cluster.
-
-## Supported Metric Cluster `query` Attributes
-
-* `definition` - (Required) The definition of a metric cluster [query](https://login.circonus.com/resources/api/calls/metric_cluster).
-
-* `type` - (Required) The query type to execute per metric cluster. Valid query
- types are: `average`, `count`, `counter`, `counter2`, `counter2_stddev`,
- `counter_stddev`, `derive`, `derive2`, `derive2_stddev`, `derive_stddev`,
- `histogram`, `stddev`, `text`.
-
-## Out parameters
-
-* `id` - ID of the Metric Cluster.
-
-## Import Example
-
-`circonus_metric_cluster` supports importing resources. Supposing the following
-Terraform:
-
-```hcl
-provider "circonus" {
- alias = "b8fec159-f9e5-4fe6-ad2c-dc1ec6751586"
-}
-
-resource "circonus_metric_cluster" "mymetriccluster" {
- name = "Metric Cluster for a particular metric in a job"
-
- query {
- definition = "*`nomad-jobname`memory`rss"
- type = "average"
- }
-}
-```
-
-It is possible to import a `circonus_metric_cluster` resource with the following
-command:
-
-```
-$ terraform import circonus_metric_cluster.mymetriccluster ID
-```
-
-Where `ID` is the `_cid` or Circonus ID of the Metric Cluster
-(e.g. `/metric_cluster/12345`) and `circonus_metric_cluster.mymetriccluster` is the
-name of the resource whose state will be populated as a result of the command.
diff --git a/website/source/docs/providers/circonus/r/rule_set.html.markdown b/website/source/docs/providers/circonus/r/rule_set.html.markdown
deleted file mode 100644
index e07bba5fd..000000000
--- a/website/source/docs/providers/circonus/r/rule_set.html.markdown
+++ /dev/null
@@ -1,377 +0,0 @@
----
-layout: "circonus"
-page_title: "Circonus: circonus_rule_set"
-sidebar_current: "docs-circonus-resource-circonus_rule_set"
-description: |-
- Manages a Circonus rule set.
----
-
-# circonus\_rule_set
-
-The ``circonus_rule_set`` resource creates and manages a
-[Circonus Rule Set](https://login.circonus.com/resources/api/calls/rule_set).
-
-## Usage
-
-```hcl
-variable "myapp-tags" {
- type = "list"
- default = [ "app:myapp", "owner:myteam" ]
-}
-
-resource "circonus_rule_set" "myapp-cert-ttl-alert" {
- check = "${circonus_check.myapp-https.checks[0]}"
- metric_name = "cert_end_in"
- link = "https://wiki.example.org/playbook/how-to-renew-cert"
-
- if {
- value {
- min_value = "${2 * 24 * 3600}"
- }
-
- then {
- notify = [ "${circonus_contact_group.myapp-owners.id}" ]
- severity = 1
- }
- }
-
- if {
- value {
- min_value = "${7 * 24 * 3600}"
- }
-
- then {
- notify = [ "${circonus_contact_group.myapp-owners.id}" ]
- severity = 2
- }
- }
-
- if {
- value {
- min_value = "${21 * 24 * 3600}"
- }
-
- then {
- notify = [ "${circonus_contact_group.myapp-owners.id}" ]
- severity = 3
- }
- }
-
- if {
- value {
- absent = "24h"
- }
-
- then {
- notify = [ "${circonus_contact_group.myapp-owners.id}" ]
- severity = 1
- }
- }
-
- tags = [ "${var.myapp-tags}" ]
-}
-
-resource "circonus_rule_set" "myapp-healthy-alert" {
- check = "${circonus_check.myapp-https.checks[0]}"
- metric_name = "duration"
- link = "https://wiki.example.org/playbook/debug-down-app"
-
- if {
- value {
- # SEV1 if it takes more than 9.5s for us to complete an HTTP request
- max_value = "${9.5 * 1000}"
- }
-
- then {
- notify = [ "${circonus_contact_group.myapp-owners.id}" ]
- severity = 1
- }
- }
-
- if {
- value {
- # SEV2 if it takes more than 5s for us to complete an HTTP request
- max_value = "${5 * 1000}"
- }
-
- then {
- notify = [ "${circonus_contact_group.myapp-owners.id}" ]
- severity = 2
- }
- }
-
- if {
- value {
- # SEV3 if the average response time is more than 500ms using a moving
- # average over the last 10min. Any transient problems should have
- # resolved themselves by now. Something's wrong, need to page someone.
- over {
- last = "10m"
- using = "average"
- }
- max_value = "500"
- }
-
- then {
- notify = [ "${circonus_contact_group.myapp-owners.id}" ]
- severity = 3
- }
- }
-
- if {
- value {
- # SEV4 if it takes more than 500ms for us to complete an HTTP request. We
- # want to record that things were slow, but not wake anyone up if it
- # momentarily pops above 500ms.
- min_value = "500"
- }
-
- then {
- notify = [ "${circonus_contact_group.myapp-owners.id}" ]
- severity = 3
- }
- }
-
- if {
- value {
- # If for whatever reason we're not recording any values for the last
- # 24hrs, fire off a SEV1.
- absent = "24h"
- }
-
- then {
- notify = [ "${circonus_contact_group.myapp-owners.id}" ]
- severity = 1
- }
- }
-
- tags = [ "${var.myapp-tags}" ]
-}
-
-resource "circonus_contact_group" "myapp-owners" {
- name = "My App Owners"
- tags = [ "${var.myapp-tags}" ]
-}
-
-resource "circonus_check" "myapp-https" {
- name = "My App's HTTPS Check"
-
- notes = <<-EOF
-A check to create metric streams for Time to First Byte, HTTP transaction
-duration, and the TTL of a TLS cert.
-EOF
-
- collector {
- id = "/broker/1"
- }
-
- http {
- code = "^200$"
- headers = {
- X-Request-Type = "health-check",
- }
- url = "https://www.example.com/myapp/healthz"
- }
-
- metric {
- name = "${circonus_metric.myapp-cert-ttl.name}"
- tags = "${circonus_metric.myapp-cert-ttl.tags}"
- type = "${circonus_metric.myapp-cert-ttl.type}"
- unit = "${circonus_metric.myapp-cert-ttl.unit}"
- }
-
- metric {
- name = "${circonus_metric.myapp-duration.name}"
- tags = "${circonus_metric.myapp-duration.tags}"
- type = "${circonus_metric.myapp-duration.type}"
- unit = "${circonus_metric.myapp-duration.unit}"
- }
-
- period = 60
- tags = ["source:circonus", "author:terraform"]
- timeout = 10
-}
-
-resource "circonus_metric" "myapp-cert-ttl" {
- name = "cert_end_in"
- type = "numeric"
- unit = "seconds"
- tags = [ "${var.myapp-tags}", "resource:tls" ]
-}
-
-resource "circonus_metric" "myapp-duration" {
- name = "duration"
- type = "numeric"
- unit = "miliseconds"
- tags = [ "${var.myapp-tags}" ]
-}
-```
-
-## Argument Reference
-
-* `check` - (Required) The Circonus ID that this Rule Set will use to search for
- a metric stream to alert on.
-
-* `if` - (Required) One or more ordered predicate clauses that describe when
- Circonus should generate a notification. See below for details on the
- structure of an `if` configuration clause.
-
-* `link` - (Optional) A link to external documentation (or anything else you
- feel is important) when a notification is sent. This value will show up in
- email alerts and the Circonus UI.
-
-* `metric_type` - (Optional) The type of metric this rule set will operate on.
- Valid values are `numeric` (the default) and `text`.
-
-* `notes` - (Optional) Notes about this rule set.
-
-* `parent` - (Optional) A Circonus Metric ID that, if specified and active with
- a severity 1 alert, will silence this rule set until all of the severity 1
- alerts on the parent clear. This value must match the format
- `${check_id}_${metric_name}`.
-
-* `metric_name` - (Required) The name of the metric stream within a given check
- that this rule set is active on.
-
-* `tags` - (Optional) A list of tags assigned to this rule set.
-
-## `if` Configuration
-
-The `if` configuration block is an
-[ordered list of rules](https://login.circonus.com/user/docs/Alerting/Rules/Configure) that
-are evaluated in order, first to last. The first `if` condition to evaluate
-true shortcircuits all other `if` blocks in this rule set. An `if` block is also
-referred to as a "rule." It is advised that all high-severity rules are ordered
-before low-severity rules otherwise low-severity rules will mask notifications
-that should be delivered with a high-severity.
-
-`if` blocks are made up of two configuration blocks: `value` and `then`. The
-`value` configuration block specifies the criteria underwhich the metric streams
-are evaluated. The `then` configuration block, optional, specifies what action
-to take.
-
-### `value` Configuration
-
-A `value` block can have only one of several "predicate" attributes specified
-because they conflict with each other. The list of mutually exclusive
-predicates is dependent on the `metric_type`. To evaluate multiple predicates,
-create multiple `if` configuration blocks in the proper order.
-
-#### `numeric` Predicates
-
-Metric types of type `numeric` support the following predicates. Only one of
-the following predicates may be specified at a time.
-
-* `absent` - (Optional) If a metric has not been observed in this duration the
- rule will fire. When present, this duration is evaluated in terms of seconds.
-
-* `changed` - (Optional) A boolean indicating this rule should fire when the
- value changes (e.g. `n != n1`).
-
-* `min_value` - (Optional) When the value is less than this value, this rule will
- fire (e.g. `n < ${min_value}`).
-
-* `max_value` - (Optional) When the value is greater than this value, this rule
- will fire (e.g. `n > ${max_value}`).
-
-Additionally, a `numeric` check can also evaluate data based on a windowing
-function versus the last measured value in the metric stream. In order to have
-a rule evaluate on derived value from a window, include a nested `over`
-attribute inside of the `value` configuration block. An `over` attribute needs
-two attributes:
-
-* `last` - (Optional) A duration for the sliding window. Default `300s`.
-
-* `using` - (Optional) The window function to use over the `last` interval.
- Valid window functions include: `average` (the default), `stddev`, `derive`,
- `derive_stddev`, `counter`, `counter_stddev`, `derive_2`, `derive_2_stddev`,
- `counter_2`, and `counter_2_stddev`.
-
-#### `text` Predicates
-
-Metric types of type `text` support the following predicates:
-
-* `absent` - (Optional) If a metric has not been observed in this duration the
- rule will fire. When present, this duration is evaluated in terms of seconds.
-
-* `changed` - (Optional) A boolean indicating this rule should fire when the
- last value in the metric stream changed from it's previous value (e.g. `n !=
- n-1`).
-
-* `contains` - (Optional) When the last value in the metric stream the value is
- less than this value, this rule will fire (e.g. `strstr(n, ${contains}) !=
- NULL`).
-
-* `match` - (Optional) When the last value in the metric stream value exactly
- matches this configured value, this rule will fire (e.g. `strcmp(n, ${match})
- == 0`).
-
-* `not_contain` - (Optional) When the last value in the metric stream does not
- match this configured value, this rule will fire (e.g. `strstr(n, ${contains})
- == NULL`).
-
-* `not_match` - (Optional) When the last value in the metric stream does not match
- this configured value, this rule will fire (e.g. `strstr(n, ${not_match}) ==
- NULL`).
-
-### `then` Configuration
-
-A `then` block can have the following attributes:
-
-* `after` - (Optional) Only execute this notification after waiting for this
- number of minutes. Defaults to immediately, or `0m`.
-* `notify` - (Optional) A list of contact group IDs to notify when this rule is
- sends off a notification.
-* `severity` - (Optional) The severity level of the notification. This can be
- set to any value between `1` and `5`. Defaults to `1`.
-
-## Import Example
-
-`circonus_rule_set` supports importing resources. Supposing the following
-Terraform (and that the referenced [`circonus_metric`](metric.html)
-and [`circonus_check`](check.html) have already been imported):
-
-```hcl
-resource "circonus_rule_set" "icmp-latency-alert" {
- check = "${circonus_check.api_latency.checks[0]}"
- metric_name = "maximum"
-
- if {
- value {
- absent = "600s"
- }
-
- then {
- notify = [ "${circonus_contact_group.test-trigger.id}" ]
- severity = 1
- }
- }
-
- if {
- value {
- over {
- last = "120s"
- using = "average"
- }
-
- max_value = 0.5 # units are in miliseconds
- }
-
- then {
- notify = [ "${circonus_contact_group.test-trigger.id}" ]
- severity = 2
- }
- }
-}
-```
-
-It is possible to import a `circonus_rule_set` resource with the following command:
-
-```
-$ terraform import circonus_rule_set.icmp-latency-alert ID
-```
-
-Where `ID` is the `_cid` or Circonus ID of the Rule Set
-(e.g. `/rule_set/201285_maximum`) and `circonus_rule_set.icmp-latency-alert` is
-the name of the resource whose state will be populated as a result of the
-command.
diff --git a/website/source/docs/providers/clc/index.html.markdown b/website/source/docs/providers/clc/index.html.markdown
deleted file mode 100644
index 03d01aab1..000000000
--- a/website/source/docs/providers/clc/index.html.markdown
+++ /dev/null
@@ -1,59 +0,0 @@
----
-layout: "clc"
-page_title: "Provider: CenturyLinkCloud"
-sidebar_current: "docs-clc-index"
-description: |-
- The CenturyLinkCloud provider is used to interact with the many resources
- supported by CLC. The provider needs to be configured with account
- credentials before it can be used.
----
-
-# CLC Provider
-
-The clc provider is used to interact with the many resources supported
-by CenturyLinkCloud. The provider needs to be configured with account
-credentials before it can be used.
-
-Use the navigation to the left to read about the available resources.
-
-For additional documentation, see the [CLC Developer Center](https://www.ctl.io/developers/)
-
-## Example Usage
-
-```hcl
-# Configure the CLC Provider
-provider "clc" {
- username = "${var.clc_username}"
- password = "${var.clc_password}"
- account = "${var.clc_account}" # optional
-}
-
-# Create a server
-resource "clc_server" "node" {
- # ...
-}
-```
-
-
-## Account Bootstrap
-
-Trial accounts are available by signing up on the control portal [https://control.ctl.io](https://control.ctl.io).
-
-For new accounts, you should initially run these steps manually:
-
-- [Create a network.](https://control.ctl.io/Network/network)
-- [Provision a server.](https://control.ctl.io/create)
-
-
-## Argument Reference
-
-The following arguments are supported:
-
-* `clc_username` - (Required) This is the CLC account username. It must be provided, but
- it can also be sourced from the `CLC_USERNAME` environment variable.
-
-* `clc_password` - (Required) This is the CLC account password. It must be provided, but
- it can also be sourced from the `CLC_PASSWORD` environment variable.
-
-* `clc_account` - (Optional) Override CLC account alias. Also taken from the `CLC_ACCOUNT`
- environment variable if provided.
diff --git a/website/source/docs/providers/clc/r/group.html.markdown b/website/source/docs/providers/clc/r/group.html.markdown
deleted file mode 100644
index b942f073f..000000000
--- a/website/source/docs/providers/clc/r/group.html.markdown
+++ /dev/null
@@ -1,53 +0,0 @@
----
-layout: "clc"
-page_title: "clc: clc_group"
-sidebar_current: "docs-clc-resource-group"
-description: |-
- Manages a CLC server group.
----
-
-# clc_group
-
-Manages a CLC server group. Either provisions or resolves to an existing group.
-
-See also [Complete API documentation](https://www.ctl.io/api-docs/v2/#groups).
-
-## Example Usage
-
-```hcl
-# Provision/Resolve a server group
-resource "clc_group" "frontends" {
- location_id = "WA1"
- name = "frontends"
- parent = "Default Group"
-}
-
-output "group_id" {
- value = "clc_group.frontends.id"
-}
-```
-
-
-## Argument Reference
-
-
-The following arguments are supported:
-
-* `name` - (Required, string) The name (or GUID) of this server group. Will resolve to existing if present.
-* `parent` - (Required, string) The name or ID of the parent group. Will error if absent or unable to resolve.
-* `location_id` - (Required, string) The datacenter location of both parent group and this group.
- Examples: "WA1", "VA1"
-* `description` - (Optional, string) Description for server group (visible in control portal only)
-* `custom_fields` - (Optional) See [CustomFields](#custom_fields) below for details.
-
-
-
-
-## CustomFields
-
-`custom_fields` is a block within the configuration that may be
-repeated to bind custom fields for a server. CustomFields need be set
-up in advance. Each `custom_fields` block supports the following:
-
-* `id` - (Required, string) The ID of the custom field to set.
-* `value` - (Required, string) The value for the specified field.
diff --git a/website/source/docs/providers/clc/r/load_balancer.html.markdown b/website/source/docs/providers/clc/r/load_balancer.html.markdown
deleted file mode 100644
index f065d5afe..000000000
--- a/website/source/docs/providers/clc/r/load_balancer.html.markdown
+++ /dev/null
@@ -1,39 +0,0 @@
----
-layout: "clc"
-page_title: "clc: clc_load_balancer"
-sidebar_current: "docs-clc-resource-load-balancer"
-description: |-
- Manages a CLC load balacner.
----
-
-# clc_load_balancer
-
-Manages a CLC load balancer. Manage connected backends with [clc_load_balancer_pool](load_balancer_pool.html)
-
-See also [Complete API documentation](https://www.ctl.io/api-docs/v2/#shared-load-balancer).
-
-## Example Usage
-
-```hcl
-# Provision a load balancer
-resource "clc_load_balancer" "api" {
- data_center = "${clc_group.frontends.location_id}"
- name = "api"
- description = "api load balancer"
- status = "enabled"
-}
-
-output "api_ip" {
- value = "clc_load_balancer.api.ip_address"
-}
-```
-
-
-## Argument Reference
-
-The following arguments are supported:
-
-* `name` - (Required, string) The name of the load balancer.
-* `data_center` - (Required, string) The datacenter location of both parent group and this group.
-* `status` - (Required, string) Either "enabled" or "disabled"
-* `description` - (Optional, string) Description for server group (visible in control portal only)
diff --git a/website/source/docs/providers/clc/r/load_balancer_pool.html.markdown b/website/source/docs/providers/clc/r/load_balancer_pool.html.markdown
deleted file mode 100644
index 6ffe45e60..000000000
--- a/website/source/docs/providers/clc/r/load_balancer_pool.html.markdown
+++ /dev/null
@@ -1,70 +0,0 @@
----
-layout: "clc"
-page_title: "clc: clc_load_balancer_pool"
-sidebar_current: "docs-clc-resource-load-balancer-pool"
-description: |-
- Manages a CLC load balancer pool.
----
-
-# clc_load_balancer_pool
-
-Manages a CLC load balancer pool. Manage related frontend with [clc_load_balancer](load_balancer.html)
-
-See also [Complete API documentation](https://www.ctl.io/api-docs/v2/#shared-load-balancer).
-
-## Example Usage
-
-
-```hcl
-# Provision a load balancer pool
-resource "clc_load_balancer_pool" "pool" {
- data_center = "${clc_group.frontends.location_id}"
- load_balancer = "${clc_load_balancer.api.id}"
- method = "roundRobin"
- persistence = "standard"
- port = 80
-
- nodes {
- status = "enabled"
- ipAddress = "${clc_server.node.0.private_ip_address}"
- privatePort = 3000
- }
-
- nodes {
- status = "enabled"
- ipAddress = "${clc_server.node.1.private_ip_address}"
- privatePort = 3000
- }
-}
-
-output "pool" {
- value = "${join(" ", clc_load_balancer.pool.nodes)}"
-}
-```
-
-
-## Argument Reference
-
-The following arguments are supported:
-
-* `load_balancer` - (Required, string) The id of the load balancer.
-* `data_center` - (Required, string) The datacenter location for this pool.
-* `port` - (Required, int) Either 80 or 443
-* `method` - (Optional, string) The configured balancing method. Either
- "roundRobin" (default) or "leastConnection".
-* `persistence` - (Optional, string) The configured persistence
- method. Either "standard" (default) or "sticky".
-* nodes - (Optional) See [Nodes](#nodes) below for details.
-
-
-
-## Nodes
-
-
-`nodes` is a block within the configuration that may be repeated to
-specify connected nodes on this pool. Each `nodes` block supports the
-following:
-
-* `ipAddress` (Required, string) The destination internal ip of pool node.
-* `privatePort` (Required, int) The destination port on the pool node.
-* `status` (Optional, string) Either "enabled" or "disabled".
diff --git a/website/source/docs/providers/clc/r/public_ip.html.markdown b/website/source/docs/providers/clc/r/public_ip.html.markdown
deleted file mode 100644
index 18256c5ef..000000000
--- a/website/source/docs/providers/clc/r/public_ip.html.markdown
+++ /dev/null
@@ -1,81 +0,0 @@
----
-layout: "clc"
-page_title: "clc: clc_public_ip"
-sidebar_current: "docs-clc-resource-public-ip"
-description: |-
- Manages a CLC public ip.
----
-
-# clc_public_ip
-
-Manages a CLC public ip (for an existing server).
-
-See also [Complete API documentation](https://www.ctl.io/api-docs/v2/#public-ip).
-
-## Example Usage
-
-```hcl
-# Provision a public ip
-resource "clc_public_ip" "backdoor" {
- server_id = "${clc_server.node.0.id}"
- internal_ip_address = "${clc_server.node.0.private_ip_address}"
-
- ports {
- protocol = "ICMP"
- port = -1
- }
-
- ports {
- protocol = "TCP"
- port = 22
- }
-
- ports {
- protocol = "TCP"
- port = 2000
- port_to = 9000
- }
-
- source_restrictions {
- cidr = "85.39.22.15/30"
- }
-}
-
-output "ip" {
- value = "clc_public_ip.backdoor.id"
-}
-```
-
-## Argument Reference
-
-The following arguments are supported:
-
-* `server_id` - (Required, string) The name or ID of the server to bind IP to.
-* `internal_ip_address` - (Required, string) The internal IP of the
- NIC to attach to. If not provided, a new internal NIC will be
- provisioned and used.
-* `ports` - (Optional) See [Ports](#ports) below for details.
-* `source_restrictions` - (Optional) See
- [SourceRestrictions](#source_restrictions) below for details.
-
-
-
-## Ports
-
-`ports` is a block within the configuration that may be
-repeated to specify open ports on the target IP. Each
-`ports` block supports the following:
-
-* `protocol` (Required, string) One of "tcp", "udp", "icmp".
-* `port` (Required, int) The port to open. If defining a range, demarks starting port
-* `portTo` (Optional, int) Given a port range, demarks the ending port.
-
-
-
-## SourceRestrictions
-
-`source_restrictions` is a block within the configuration that may be
-repeated to restrict ingress traffic on specified CIDR blocks. Each
-`source_restrictions` block supports the following:
-
-* `cidr` (Required, string) The IP or range of IPs in CIDR notation.
diff --git a/website/source/docs/providers/clc/r/server.html.markdown b/website/source/docs/providers/clc/r/server.html.markdown
deleted file mode 100644
index 6d710ea39..000000000
--- a/website/source/docs/providers/clc/r/server.html.markdown
+++ /dev/null
@@ -1,174 +0,0 @@
----
-layout: "clc"
-page_title: "clc: clc_server"
-sidebar_current: "docs-clc-resource-server"
-description: |-
- Manages the lifecycle of a CLC server.
----
-
-# clc_server
-
-Manages a CLC server.
-
-Resources and Documentation:
-
-- [Datacenter / Capability Map](https://www.ctl.io/data-centers/)
-- [Hyperscale](https://www.ctl.io/hyperscale/) and [Bare Metal](https://www.ctl.io/bare-metal/) Servers
-- [REST API](https://www.ctl.io/api-docs/v2/#servers-create-server)
-
-
-## Example Usage
-
-```hcl
-# Provision a server
-resource "clc_server" "node" {
- name_template = "trusty"
- source_server_id = "UBUNTU-14-64-TEMPLATE"
- group_id = "${clc_group.frontends.id}"
- cpu = 2
- memory_mb = 2048
- password = "Green123$"
-
- additional_disks {
- path = "/var"
- size_gb = 100
- type = "partitioned"
- }
-
- additional_disks {
- size_gb = 10
- type = "raw"
- }
-}
-
-output "server_id" {
- value = "clc_server.node.id"
-}
-```
-
-## Argument Reference
-
-
-The following arguments are supported:
-
-* `name_template` - (Required, string) The basename of the server. A unique name will be generated by the platform.
-* `source_server_id` - (Required, string) The name or ID of the base OS image.
- Examples: "ubuntu-14-64-template", "rhel-7-64-template", "win2012r2dtc-64"
-* `group_id` - (Required, string) The name or ID of the server group to spawn server into.
-* `cpu` - (Required, int) The number of virtual cores
-* `memory_mb` - (Required, int) Provisioned RAM
-* `type` - (Required, string) The virtualization type
- One of "standard", "hyperscale", "bareMetal"
-* `password` - (Optional, string) The root/administrator password. Will be generated by platform if not provided.
-* `description` - (Optional, string) Description for server (visible in control portal only)
-* `power_state` - (Optional, string) See [PowerStates](#power_states) below for details.
- If absent, defaults to `started`.
-* `private_ip_address` - (Optional, string) Set internal IP address. If absent, allocated and assigned from pool.
-* `network_id` - (Optional, string) GUID of network to use. (Must be set up in advance from control portal.)
- When absent, the default network will be used.
-* `storage_type` - (Optional, string) Backup and replication strategy for disks.
- One of "standard", "premium"
-* `aa_policy_id` - (Optional, string | hyperscale) Anti-Affinity policy ID
-* `configuration_id` - (Optional, string | bareMetal) Hardware configuration ID
-* `os_type` - (Optional, string | bareMetal) Operating system to install.
-* `additional_disks` - (Optional) See [Disks](#disks) below for details.
-* `custom_fields` - (Optional) See [CustomFields](#custom_fields) below for details.
-* `metadata` - (Optional) Misc state storage for non-CLC metadata.
-
-
-
-## Server Types
-
-#### standard
-
-Cloud servers `standard` offer basic, commodity level
-performance with mixed spindle/SSD storage profiles. Additional
-features storage backups, snapshot/clone/archive, and widespread
-availability.
-
-
-#### hyperscale
-
-Hyperscale `hyperscale` servers offer significantly higher IOPS than standard
-servers for CPU and IO intensive servers. See the
-[FAQ](https://www.ctl.io/knowledge-base/servers/hyperscale-server-faq/)
-for more details.
-
-Physical host redundancy can be managed via
-[Anti-Affinity policies](https://www.ctl.io/knowledge-base/servers/centurylink-cloud-anti-affinity-policies/).
-
-#### bareMetal
-
-Bare metal `bareMetal` offers optimal compute performance and is
-available in select datacenters in CLC for approved customers. For
-more info see the
-[FAQ](https://www.ctl.io/knowledge-base/servers/bare-metal-faq/).
-
-For `bareMetal`, the required fields `source_server_id`, `cpu`, and
-`memory_mb` are ignored and instead the following fields are required:
-
-- configuration_id
-- os_type
-
-Values for `configuration_id` and `os_type` are specific to each
-datacenter and are available via the API endpoints
-[here](https://www.ctl.io/api-docs/v2/#data-centers-get-data-center-bare-metal-capabilities).
-
-
-
-
-
-## PowerStates
-
-`power_state` may be used to set initial power state or modify existing instances.
-
-* `on` | `started` - machine powered on
-* `off` | `stopped` - machine powered off forcefully
-* `paused` - freeze machine: memory, processes, billing, monitoring.
-* `shutdown` - shutdown gracefully
-* `reboot` - restart gracefully
-* `reset` - restart forcefully
-
-
-## Disks
-
-`additional_disks` is a block within the configuration that may be
-repeated to specify the attached disks on a server. Each
-`additional_disks` block supports the following:
-
-* `type` - (Required, string) Either "raw" or "partitioned".
-* `size_gb` - (Required, int) Size of allocated disk.
-* `path` - (Required, string, type:`partitioned`) The mountpoint for the disk.
-
-
-
-## CustomFields
-
-`custom_fields` is a block within the configuration that may be
-repeated to bind custom fields for a server. CustomFields need be set
-up in advance. Each `custom_fields` block supports the following:
-
-* `id` - (Required, string) The ID of the custom field to set.
-* `value` - (Required, string) The value for the specified field.
-
-
-## Packages
-
-`packages` is a block within the configuration that may be repeated to
-specify packages and their associated parameters to be run at
-instantiation. Packages facilitate various tasks like ssh key
-installation, kernel upgrades, etc. Package ID as well as parameters
-are configured via this block.
-
-Example:
-
-```hcl
-# Configure the CLC Provider
-provider "clc_server" "ubuntu" {
- # ...
- packages {
- id = "77abb844-579d-478d-3955-c69ab4a7ba1a"
- SshKey = "ssh-rsa AAAAB3NzaC1yc2EAAAABIwAA..."
- }
-}
-```
diff --git a/website/source/docs/providers/cloudflare/index.html.markdown b/website/source/docs/providers/cloudflare/index.html.markdown
deleted file mode 100644
index bd988b744..000000000
--- a/website/source/docs/providers/cloudflare/index.html.markdown
+++ /dev/null
@@ -1,39 +0,0 @@
----
-layout: "cloudflare"
-page_title: "Provider: Cloudflare"
-sidebar_current: "docs-cloudflare-index"
-description: |-
- The Cloudflare provider is used to interact with the DNS resources supported by Cloudflare. The provider needs to be configured with the proper credentials before it can be used.
----
-
-# Cloudflare Provider
-
-The Cloudflare provider is used to interact with the
-DNS resources supported by Cloudflare. The provider needs to be configured
-with the proper credentials before it can be used.
-
-Use the navigation to the left to read about the available resources.
-
-## Example Usage
-
-```hcl
-# Configure the Cloudflare provider
-provider "cloudflare" {
- email = "${var.cloudflare_email}"
- token = "${var.cloudflare_token}"
-}
-
-# Create a record
-resource "cloudflare_record" "www" {
- # ...
-}
-```
-
-## Argument Reference
-
-The following arguments are supported:
-
-* `email` - (Required) The email associated with the account. This can also be
- specified with the `CLOUDFLARE_EMAIL` shell environment variable.
-* `token` - (Required) The Cloudflare API token. This can also be specified
- with the `CLOUDFLARE_TOKEN` shell environment variable.
diff --git a/website/source/docs/providers/cloudflare/r/record.html.markdown b/website/source/docs/providers/cloudflare/r/record.html.markdown
deleted file mode 100644
index 95f36ef95..000000000
--- a/website/source/docs/providers/cloudflare/r/record.html.markdown
+++ /dev/null
@@ -1,49 +0,0 @@
----
-layout: "cloudflare"
-page_title: "Cloudflare: cloudflare_record"
-sidebar_current: "docs-cloudflare-resource-record"
-description: |-
- Provides a Cloudflare record resource.
----
-
-# cloudflare_record
-
-Provides a Cloudflare record resource.
-
-## Example Usage
-
-```hcl
-# Add a record to the domain
-resource "cloudflare_record" "foobar" {
- domain = "${var.cloudflare_domain}"
- name = "terraform"
- value = "192.168.0.11"
- type = "A"
- ttl = 3600
-}
-```
-
-## Argument Reference
-
-The following arguments are supported:
-
-* `domain` - (Required) The domain to add the record to
-* `name` - (Required) The name of the record
-* `value` - (Required) The value of the record
-* `type` - (Required) The type of the record
-* `ttl` - (Optional) The TTL of the record
-* `priority` - (Optional) The priority of the record
-* `proxied` - (Optional) Whether the record gets Cloudflare's origin protection.
-
-## Attributes Reference
-
-The following attributes are exported:
-
-* `id` - The record ID
-* `name` - The name of the record
-* `value` - The value of the record
-* `type` - The type of the record
-* `ttl` - The TTL of the record
-* `priority` - The priority of the record
-* `hostname` - The FQDN of the record
-* `proxied` - (Optional) Whether the record gets Cloudflare's origin protection.
diff --git a/website/source/docs/providers/cloudstack/index.html.markdown b/website/source/docs/providers/cloudstack/index.html.markdown
deleted file mode 100644
index 8db82a0fa..000000000
--- a/website/source/docs/providers/cloudstack/index.html.markdown
+++ /dev/null
@@ -1,66 +0,0 @@
----
-layout: "cloudstack"
-page_title: "Provider: CloudStack"
-sidebar_current: "docs-cloudstack-index"
-description: |-
- The CloudStack provider is used to interact with the many resources supported by CloudStack. The provider needs to be configured with a URL pointing to a running CloudStack API and the proper credentials before it can be used.
----
-
-# CloudStack Provider
-
-The CloudStack provider is used to interact with the many resources
-supported by CloudStack. The provider needs to be configured with a
-URL pointing to a running CloudStack API and the proper credentials
-before it can be used.
-
-In order to provide the required configuration options you can either
-supply values for the `api_url`, `api_key` and `secret_key` fields, or
-for the `config` and `profile` fields. A combination of both is not
-allowed and will not work.
-
-Use the navigation to the left to read about the available resources.
-
-## Example Usage
-
-```hcl
-# Configure the CloudStack Provider
-provider "cloudstack" {
- api_url = "${var.cloudstack_api_url}"
- api_key = "${var.cloudstack_api_key}"
- secret_key = "${var.cloudstack_secret_key}"
-}
-
-# Create a web server
-resource "cloudstack_instance" "web" {
- # ...
-}
-```
-
-## Argument Reference
-
-The following arguments are supported:
-
-* `api_url` - (Optional) This is the CloudStack API URL. It can also be sourced
- from the `CLOUDSTACK_API_URL` environment variable.
-
-* `api_key` - (Optional) This is the CloudStack API key. It can also be sourced
- from the `CLOUDSTACK_API_KEY` environment variable.
-
-* `secret_key` - (Optional) This is the CloudStack secret key. It can also be
- sourced from the `CLOUDSTACK_SECRET_KEY` environment variable.
-
-* `config` - (Optional) The path to a `CloudMonkey` config file. If set the API
- URL, key and secret will be retrieved from this file.
-
-* `profile` - (Optional) Used together with the `config` option. Specifies which
- `CloudMonkey` profile in the config file to use.
-
-* `http_get_only` - (Optional) Some cloud providers only allow HTTP GET calls to
- their CloudStack API. If using such a provider, you need to set this to `true`
- in order for the provider to only make GET calls and no POST calls. It can also
- be sourced from the `CLOUDSTACK_HTTP_GET_ONLY` environment variable.
-
-* `timeout` - (Optional) A value in seconds. This is the time allowed for Cloudstack
- to complete each asynchronous job triggered. If unset, this can be sourced from the
- `CLOUDSTACK_TIMEOUT` environment variable. Otherwise, this will default to 300
- seconds.
diff --git a/website/source/docs/providers/cloudstack/r/affinity_group.html.markdown b/website/source/docs/providers/cloudstack/r/affinity_group.html.markdown
deleted file mode 100644
index 1c687e0a3..000000000
--- a/website/source/docs/providers/cloudstack/r/affinity_group.html.markdown
+++ /dev/null
@@ -1,42 +0,0 @@
----
-layout: "cloudstack"
-page_title: "CloudStack: cloudstack_affinity_group"
-sidebar_current: "docs-cloudstack-resource-affinity-group"
-description: |-
- Creates an affinity group.
----
-
-# cloudstack_affinity_group
-
-Creates an affinity group.
-
-## Example Usage
-
-```hcl
-resource "cloudstack_affinity_group" "default" {
- name = "test-affinity-group"
- type = "host anti-affinity"
-}
-```
-
-## Argument Reference
-
-The following arguments are supported:
-
-* `name` - (Required) The name of the affinity group. Changing this
- forces a new resource to be created.
-
-* `description` - (Optional) The description of the affinity group.
-
-* `type` - (Required) The affinity group type. Changing this
- forces a new resource to be created.
-
-* `project` - (Optional) The name or ID of the project to register this
- affinity group to. Changing this forces a new resource to be created.
-
-## Attributes Reference
-
-The following attributes are exported:
-
-* `id` - The id of the affinity group.
-* `description` - The description of the affinity group.
diff --git a/website/source/docs/providers/cloudstack/r/disk.html.markdown b/website/source/docs/providers/cloudstack/r/disk.html.markdown
deleted file mode 100644
index 7e48a1d6f..000000000
--- a/website/source/docs/providers/cloudstack/r/disk.html.markdown
+++ /dev/null
@@ -1,61 +0,0 @@
----
-layout: "cloudstack"
-page_title: "CloudStack: cloudstack_disk"
-sidebar_current: "docs-cloudstack-resource-disk"
-description: |-
- Creates a disk volume from a disk offering. This disk volume will be attached to a virtual machine if the optional parameters are configured.
----
-
-# cloudstack_disk
-
-Creates a disk volume from a disk offering. This disk volume will be attached to
-a virtual machine if the optional parameters are configured.
-
-## Example Usage
-
-```hcl
-resource "cloudstack_disk" "default" {
- name = "test-disk"
- attach = "true"
- disk_offering = "custom"
- size = 50
- virtual_machine_id = "server-1"
- zone = "zone-1"
-}
-```
-
-## Argument Reference
-
-The following arguments are supported:
-
-* `name` - (Required) The name of the disk volume. Changing this forces a new
- resource to be created.
-
-* `attach` - (Optional) Determines whether or not to attach the disk volume to a
- virtual machine (defaults false).
-
-* `device_id` - (Optional) The device ID to map the disk volume to within the guest OS.
-
-* `disk_offering` - (Required) The name or ID of the disk offering to use for
- this disk volume.
-
-* `size` - (Optional) The size of the disk volume in gigabytes.
-
-* `shrink_ok` - (Optional) Verifies if the disk volume is allowed to shrink when
- resizing (defaults false).
-
-* `virtual_machine_id` - (Optional) The ID of the virtual machine to which you want
- to attach the disk volume.
-
-* `project` - (Optional) The name or ID of the project to deploy this
- instance to. Changing this forces a new resource to be created.
-
-* `zone` - (Required) The name or ID of the zone where this disk volume will be available.
- Changing this forces a new resource to be created.
-
-## Attributes Reference
-
-The following attributes are exported:
-
-* `id` - The ID of the disk volume.
-* `device_id` - The device ID the disk volume is mapped to within the guest OS.
diff --git a/website/source/docs/providers/cloudstack/r/egress_firewall.html.markdown b/website/source/docs/providers/cloudstack/r/egress_firewall.html.markdown
deleted file mode 100644
index 10badd171..000000000
--- a/website/source/docs/providers/cloudstack/r/egress_firewall.html.markdown
+++ /dev/null
@@ -1,64 +0,0 @@
----
-layout: "cloudstack"
-page_title: "CloudStack: cloudstack_egress_firewall"
-sidebar_current: "docs-cloudstack-resource-egress-firewall"
-description: |-
- Creates egress firewall rules for a given network.
----
-
-# cloudstack_egress_firewall
-
-Creates egress firewall rules for a given network.
-
-## Example Usage
-
-```hcl
-resource "cloudstack_egress_firewall" "default" {
- network_id = "6eb22f91-7454-4107-89f4-36afcdf33021"
-
- rule {
- cidr_list = ["10.0.0.0/8"]
- protocol = "tcp"
- ports = ["80", "1000-2000"]
- }
-}
-```
-
-## Argument Reference
-
-The following arguments are supported:
-
-* `network_id` - (Required) The network ID for which to create the egress
- firewall rules. Changing this forces a new resource to be created.
-
-* `managed` - (Optional) USE WITH CAUTION! If enabled all the egress firewall
- rules for this network will be managed by this resource. This means it will
- delete all firewall rules that are not in your config! (defaults false)
-
-* `rule` - (Optional) Can be specified multiple times. Each rule block supports
- fields documented below. If `managed = false` at least one rule is required!
-
-* `parallelism` (Optional) Specifies how much rules will be created or deleted
- concurrently. (defaults 2)
-
-The `rule` block supports:
-
-* `cidr_list` - (Required) A CIDR list to allow access to the given ports.
-
-* `protocol` - (Required) The name of the protocol to allow. Valid options are:
- `tcp`, `udp` and `icmp`.
-
-* `icmp_type` - (Optional) The ICMP type to allow. This can only be specified if
- the protocol is ICMP.
-
-* `icmp_code` - (Optional) The ICMP code to allow. This can only be specified if
- the protocol is ICMP.
-
-* `ports` - (Optional) List of ports and/or port ranges to allow. This can only
- be specified if the protocol is TCP or UDP.
-
-## Attributes Reference
-
-The following attributes are exported:
-
-* `id` - The network ID for which the egress firewall rules are created.
diff --git a/website/source/docs/providers/cloudstack/r/firewall.html.markdown b/website/source/docs/providers/cloudstack/r/firewall.html.markdown
deleted file mode 100644
index d3c82fe2d..000000000
--- a/website/source/docs/providers/cloudstack/r/firewall.html.markdown
+++ /dev/null
@@ -1,64 +0,0 @@
----
-layout: "cloudstack"
-page_title: "CloudStack: cloudstack_firewall"
-sidebar_current: "docs-cloudstack-resource-firewall"
-description: |-
- Creates firewall rules for a given IP address.
----
-
-# cloudstack_firewall
-
-Creates firewall rules for a given IP address.
-
-## Example Usage
-
-```hcl
-resource "cloudstack_firewall" "default" {
- ip_address_id = "30b21801-d4b3-4174-852b-0c0f30bdbbfb"
-
- rule {
- cidr_list = ["10.0.0.0/8"]
- protocol = "tcp"
- ports = ["80", "1000-2000"]
- }
-}
-```
-
-## Argument Reference
-
-The following arguments are supported:
-
-* `ip_address_id` - (Required) The IP address ID for which to create the
- firewall rules. Changing this forces a new resource to be created.
-
-* `managed` - (Optional) USE WITH CAUTION! If enabled all the firewall rules for
- this IP address will be managed by this resource. This means it will delete
- all firewall rules that are not in your config! (defaults false)
-
-* `rule` - (Optional) Can be specified multiple times. Each rule block supports
- fields documented below. If `managed = false` at least one rule is required!
-
-* `parallelism` (Optional) Specifies how much rules will be created or deleted
- concurrently. (defaults 2)
-
-The `rule` block supports:
-
-* `cidr_list` - (Required) A CIDR list to allow access to the given ports.
-
-* `protocol` - (Required) The name of the protocol to allow. Valid options are:
- `tcp`, `udp` and `icmp`.
-
-* `icmp_type` - (Optional) The ICMP type to allow. This can only be specified if
- the protocol is ICMP.
-
-* `icmp_code` - (Optional) The ICMP code to allow. This can only be specified if
- the protocol is ICMP.
-
-* `ports` - (Optional) List of ports and/or port ranges to allow. This can only
- be specified if the protocol is TCP or UDP.
-
-## Attributes Reference
-
-The following attributes are exported:
-
-* `id` - The IP address ID for which the firewall rules are created.
diff --git a/website/source/docs/providers/cloudstack/r/instance.html.markdown b/website/source/docs/providers/cloudstack/r/instance.html.markdown
deleted file mode 100644
index ffcb037e4..000000000
--- a/website/source/docs/providers/cloudstack/r/instance.html.markdown
+++ /dev/null
@@ -1,84 +0,0 @@
----
-layout: "cloudstack"
-page_title: "CloudStack: cloudstack_instance"
-sidebar_current: "docs-cloudstack-resource-instance"
-description: |-
- Creates and automatically starts a virtual machine based on a service offering, disk offering, and template.
----
-
-# cloudstack_instance
-
-Creates and automatically starts a virtual machine based on a service offering,
-disk offering, and template.
-
-## Example Usage
-
-```hcl
-resource "cloudstack_instance" "web" {
- name = "server-1"
- service_offering = "small"
- network_id = "6eb22f91-7454-4107-89f4-36afcdf33021"
- template = "CentOS 6.5"
- zone = "zone-1"
-}
-```
-
-## Argument Reference
-
-The following arguments are supported:
-
-* `name` - (Required) The name of the instance.
-
-* `display_name` - (Optional) The display name of the instance.
-
-* `service_offering` - (Required) The name or ID of the service offering used
- for this instance.
-
-* `network_id` - (Optional) The ID of the network to connect this instance
- to. Changing this forces a new resource to be created.
-
-* `ip_address` - (Optional) The IP address to assign to this instance. Changing
- this forces a new resource to be created.
-
-* `template` - (Required) The name or ID of the template used for this
- instance. Changing this forces a new resource to be created.
-
-* `root_disk_size` - (Optional) The size of the root disk in gigabytes. The
- root disk is resized on deploy. Only applies to template-based deployments.
- Changing this forces a new resource to be created.
-
-* `group` - (Optional) The group name of the instance.
-
-* `affinity_group_ids` - (Optional) List of affinity group IDs to apply to this
- instance.
-
-* `affinity_group_names` - (Optional) List of affinity group names to apply to
- this instance.
-
-* `security_group_ids` - (Optional) List of security group IDs to apply to this
- instance. Changing this forces a new resource to be created.
-
-* `security_group_names` - (Optional) List of security group names to apply to
- this instance. Changing this forces a new resource to be created.
-
-* `project` - (Optional) The name or ID of the project to deploy this
- instance to. Changing this forces a new resource to be created.
-
-* `zone` - (Required) The name or ID of the zone where this instance will be
- created. Changing this forces a new resource to be created.
-
-* `user_data` - (Optional) The user data to provide when launching the
- instance.
-
-* `keypair` - (Optional) The name of the SSH key pair that will be used to
- access this instance.
-
-* `expunge` - (Optional) This determines if the instance is expunged when it is
- destroyed (defaults false)
-
-## Attributes Reference
-
-The following attributes are exported:
-
-* `id` - The instance ID.
-* `display_name` - The display name of the instance.
diff --git a/website/source/docs/providers/cloudstack/r/ipaddress.html.markdown b/website/source/docs/providers/cloudstack/r/ipaddress.html.markdown
deleted file mode 100644
index baa8ab4f9..000000000
--- a/website/source/docs/providers/cloudstack/r/ipaddress.html.markdown
+++ /dev/null
@@ -1,41 +0,0 @@
----
-layout: "cloudstack"
-page_title: "CloudStack: cloudstack_ipaddress"
-sidebar_current: "docs-cloudstack-resource-ipaddress"
-description: |-
- Acquires and associates a public IP.
----
-
-# cloudstack_ipaddress
-
-Acquires and associates a public IP.
-
-## Example Usage
-
-```hcl
-resource "cloudstack_ipaddress" "default" {
- network_id = "6eb22f91-7454-4107-89f4-36afcdf33021"
-}
-```
-
-## Argument Reference
-
-The following arguments are supported:
-
-* `network_id` - (Optional) The ID of the network for which an IP address should
- be acquired and associated. Changing this forces a new resource to be created.
-
-* `vpc_id` - (Optional) The ID of the VPC for which an IP address should be
- acquired and associated. Changing this forces a new resource to be created.
-
-* `project` - (Optional) The name or ID of the project to deploy this
- instance to. Changing this forces a new resource to be created.
-
-*NOTE: Either `network_id` or `vpc_id` should have a value!*
-
-## Attributes Reference
-
-The following attributes are exported:
-
-* `id` - The ID of the acquired and associated IP address.
-* `ip_address` - The IP address that was acquired and associated.
diff --git a/website/source/docs/providers/cloudstack/r/loadbalancer_rule.html.markdown b/website/source/docs/providers/cloudstack/r/loadbalancer_rule.html.markdown
deleted file mode 100644
index 3ad957381..000000000
--- a/website/source/docs/providers/cloudstack/r/loadbalancer_rule.html.markdown
+++ /dev/null
@@ -1,66 +0,0 @@
----
-layout: "cloudstack"
-page_title: "CloudStack: cloudstack_loadbalancer_rule"
-sidebar_current: "docs-cloudstack-resource-loadbalancer-rule"
-description: |-
- Creates a load balancer rule.
----
-
-# cloudstack_loadbalancer_rule
-
-Creates a loadbalancer rule.
-
-## Example Usage
-
-```hcl
-resource "cloudstack_loadbalancer_rule" "default" {
- name = "loadbalancer-rule-1"
- description = "Loadbalancer rule 1"
- ip_address_id = "30b21801-d4b3-4174-852b-0c0f30bdbbfb"
- algorithm = "roundrobin"
- private_port = 80
- public_port = 80
- member_ids = ["f8141e2f-4e7e-4c63-9362-986c908b7ea7"]
-}
-```
-
-## Argument Reference
-
-The following arguments are supported:
-
-* `name` - (Required) Name of the loadbalancer rule.
- Changing this forces a new resource to be created.
-
-* `description` - (Optional) The description of the load balancer rule.
-
-* `ip_address_id` - (Required) Public IP address ID from where the network
- traffic will be load balanced from. Changing this forces a new resource
- to be created.
-
-* `network_id` - (Optional) The network ID this rule will be created for.
- Required when public IP address is not associated with any network yet
- (VPC case).
-
-* `algorithm` - (Required) Load balancer rule algorithm (source, roundrobin,
- leastconn). Changing this forces a new resource to be created.
-
-* `private_port` - (Required) The private port of the private IP address
- (virtual machine) where the network traffic will be load balanced to.
- Changing this forces a new resource to be created.
-
-* `public_port` - (Required) The public port from where the network traffic
- will be load balanced from. Changing this forces a new resource to be
- created.
-
-* `member_ids` - (Required) List of instance IDs to assign to the load balancer
- rule. Changing this forces a new resource to be created.
-
-* `project` - (Optional) The name or ID of the project to deploy this
- instance to. Changing this forces a new resource to be created.
-
-## Attributes Reference
-
-The following attributes are exported:
-
-* `id` - The load balancer rule ID.
-* `description` - The description of the load balancer rule.
diff --git a/website/source/docs/providers/cloudstack/r/network.html.markdown b/website/source/docs/providers/cloudstack/r/network.html.markdown
deleted file mode 100644
index 354f4f561..000000000
--- a/website/source/docs/providers/cloudstack/r/network.html.markdown
+++ /dev/null
@@ -1,77 +0,0 @@
----
-layout: "cloudstack"
-page_title: "CloudStack: cloudstack_network"
-sidebar_current: "docs-cloudstack-resource-network"
-description: |-
- Creates a network.
----
-
-# cloudstack_network
-
-Creates a network.
-
-## Example Usage
-
-Basic usage:
-
-```hcl
-resource "cloudstack_network" "default" {
- name = "test-network"
- cidr = "10.0.0.0/16"
- network_offering = "Default Network"
- zone = "zone-1"
-}
-```
-
-## Argument Reference
-
-The following arguments are supported:
-
-* `name` - (Required) The name of the network.
-
-* `display_text` - (Optional) The display text of the network.
-
-* `cidr` - (Required) The CIDR block for the network. Changing this forces a new
- resource to be created.
-
-* `gateway` - (Optional) Gateway that will be provided to the instances in this
- network. Defaults to the first usable IP in the range.
-
-* `startip` - (Optional) Start of the IP block that will be available on the
- network. Defaults to the second available IP in the range.
-
-* `endip` - (Optional) End of the IP block that will be available on the
- network. Defaults to the last available IP in the range.
-
-* `network_domain` - (Optional) DNS domain for the network.
-
-* `network_offering` - (Required) The name or ID of the network offering to use
- for this network.
-
-* `vlan` - (Optional) The VLAN number (1-4095) the network will use. This might be
- required by the Network Offering if specifyVlan=true is set. Only the ROOT
- admin can set this value.
-
-* `vpc_id` - (Optional) The VPC ID in which to create this network. Changing
- this forces a new resource to be created.
-
-* `acl_id` - (Optional) The ACL ID that should be attached to the network or
- `none` if you do not want to attach an ACL. You can dynamically attach and
- swap ACL's, but if you want to detach an attached ACL and revert to using
- `none`, this will force a new resource to be created. (defaults `none`)
-
-* `project` - (Optional) The name or ID of the project to deploy this
- instance to. Changing this forces a new resource to be created.
-
-* `zone` - (Required) The name or ID of the zone where this network will be
- available. Changing this forces a new resource to be created.
-
-* `tags` - (Optional) A mapping of tags to assign to the resource.
-
-## Attributes Reference
-
-The following attributes are exported:
-
-* `id` - The ID of the network.
-* `display_text` - The display text of the network.
-* `network_domain` - DNS domain for the network.
diff --git a/website/source/docs/providers/cloudstack/r/network_acl.html.markdown b/website/source/docs/providers/cloudstack/r/network_acl.html.markdown
deleted file mode 100644
index 6cea1befa..000000000
--- a/website/source/docs/providers/cloudstack/r/network_acl.html.markdown
+++ /dev/null
@@ -1,42 +0,0 @@
----
-layout: "cloudstack"
-page_title: "CloudStack: cloudstack_network_acl"
-sidebar_current: "docs-cloudstack-resource-network-acl"
-description: |-
- Creates a Network ACL for the given VPC.
----
-
-# cloudstack_network_acl
-
-Creates a Network ACL for the given VPC.
-
-## Example Usage
-
-```hcl
-resource "cloudstack_network_acl" "default" {
- name = "test-acl"
- vpc_id = "76f6e8dc-07e3-4971-b2a2-8831b0cc4cb4"
-}
-```
-
-## Argument Reference
-
-The following arguments are supported:
-
-* `name` - (Required) The name of the ACL. Changing this forces a new resource
- to be created.
-
-* `description` - (Optional) The description of the ACL. Changing this forces a
- new resource to be created.
-
-* `project` - (Optional) The name or ID of the project to deploy this
- instance to. Changing this forces a new resource to be created.
-
-* `vpc_id` - (Required) The ID of the VPC to create this ACL for. Changing this
- forces a new resource to be created.
-
-## Attributes Reference
-
-The following attributes are exported:
-
-* `id` - The ID of the Network ACL
diff --git a/website/source/docs/providers/cloudstack/r/network_acl_rule.html.markdown b/website/source/docs/providers/cloudstack/r/network_acl_rule.html.markdown
deleted file mode 100644
index 07a2b3929..000000000
--- a/website/source/docs/providers/cloudstack/r/network_acl_rule.html.markdown
+++ /dev/null
@@ -1,75 +0,0 @@
----
-layout: "cloudstack"
-page_title: "CloudStack: cloudstack_network_acl_rule"
-sidebar_current: "docs-cloudstack-resource-network-acl-rule"
-description: |-
- Creates network ACL rules for a given network ACL.
----
-
-# cloudstack_network_acl_rule
-
-Creates network ACL rules for a given network ACL.
-
-## Example Usage
-
-```hcl
-resource "cloudstack_network_acl_rule" "default" {
- acl_id = "f3843ce0-334c-4586-bbd3-0c2e2bc946c6"
-
- rule {
- action = "allow"
- cidr_list = ["10.0.0.0/8"]
- protocol = "tcp"
- ports = ["80", "1000-2000"]
- traffic_type = "ingress"
- }
-}
-```
-
-## Argument Reference
-
-The following arguments are supported:
-
-* `acl_id` - (Required) The network ACL ID for which to create the rules.
- Changing this forces a new resource to be created.
-
-* `managed` - (Optional) USE WITH CAUTION! If enabled all the firewall rules for
- this network ACL will be managed by this resource. This means it will delete
- all firewall rules that are not in your config! (defaults false)
-
-* `rule` - (Optional) Can be specified multiple times. Each rule block supports
- fields documented below. If `managed = false` at least one rule is required!
-
-* `project` - (Optional) The name or ID of the project to deploy this
- instance to. Changing this forces a new resource to be created.
-
-* `parallelism` (Optional) Specifies how much rules will be created or deleted
- concurrently. (defaults 2)
-
-The `rule` block supports:
-
-* `action` - (Optional) The action for the rule. Valid options are: `allow` and
- `deny` (defaults allow).
-
-* `cidr_list` - (Required) A CIDR list to allow access to the given ports.
-
-* `protocol` - (Required) The name of the protocol to allow. Valid options are:
- `tcp`, `udp`, `icmp`, `all` or a valid protocol number.
-
-* `icmp_type` - (Optional) The ICMP type to allow, or `-1` to allow `any`. This
- can only be specified if the protocol is ICMP. (defaults 0)
-
-* `icmp_code` - (Optional) The ICMP code to allow, or `-1` to allow `any`. This
- can only be specified if the protocol is ICMP. (defaults 0)
-
-* `ports` - (Optional) List of ports and/or port ranges to allow. This can only
- be specified if the protocol is TCP, UDP, ALL or a valid protocol number.
-
-* `traffic_type` - (Optional) The traffic type for the rule. Valid options are:
- `ingress` or `egress` (defaults ingress).
-
-## Attributes Reference
-
-The following attributes are exported:
-
-* `id` - The ACL ID for which the rules are created.
diff --git a/website/source/docs/providers/cloudstack/r/nic.html.markdown b/website/source/docs/providers/cloudstack/r/nic.html.markdown
deleted file mode 100644
index 8254b6843..000000000
--- a/website/source/docs/providers/cloudstack/r/nic.html.markdown
+++ /dev/null
@@ -1,43 +0,0 @@
----
-layout: "cloudstack"
-page_title: "CloudStack: cloudstack_nic"
-sidebar_current: "docs-cloudstack-resource-nic"
-description: |-
- Creates an additional NIC to add a VM to the specified network.
----
-
-# cloudstack_nic
-
-Creates an additional NIC to add a VM to the specified network.
-
-## Example Usage
-
-Basic usage:
-
-```hcl
-resource "cloudstack_nic" "test" {
- network_id = "6eb22f91-7454-4107-89f4-36afcdf33021"
- ip_address = "192.168.1.1"
- virtual_machine_id = "f8141e2f-4e7e-4c63-9362-986c908b7ea7"
-}
-```
-
-## Argument Reference
-
-The following arguments are supported:
-
-* `network_id` - (Required) The ID of the network to plug the NIC into. Changing
- this forces a new resource to be created.
-
-* `ip_address` - (Optional) The IP address to assign to the NIC. Changing this
- forces a new resource to be created.
-
-* `virtual_machine_id` - (Required) The ID of the virtual machine to which to
- attach the NIC. Changing this forces a new resource to be created.
-
-## Attributes Reference
-
-The following attributes are exported:
-
-* `id` - The ID of the NIC.
-* `ip_address` - The assigned IP address.
diff --git a/website/source/docs/providers/cloudstack/r/port_forward.html.markdown b/website/source/docs/providers/cloudstack/r/port_forward.html.markdown
deleted file mode 100644
index 9b747c58c..000000000
--- a/website/source/docs/providers/cloudstack/r/port_forward.html.markdown
+++ /dev/null
@@ -1,63 +0,0 @@
----
-layout: "cloudstack"
-page_title: "CloudStack: cloudstack_port_forward"
-sidebar_current: "docs-cloudstack-resource-port-forward"
-description: |-
- Creates port forwards.
----
-
-# cloudstack_port_forward
-
-Creates port forwards.
-
-## Example Usage
-
-```hcl
-resource "cloudstack_port_forward" "default" {
- ip_address_id = "30b21801-d4b3-4174-852b-0c0f30bdbbfb"
-
- forward {
- protocol = "tcp"
- private_port = 80
- public_port = 8080
- virtual_machine_id = "f8141e2f-4e7e-4c63-9362-986c908b7ea7"
- }
-}
-```
-
-## Argument Reference
-
-The following arguments are supported:
-
-* `ip_address_id` - (Required) The IP address ID for which to create the port
- forwards. Changing this forces a new resource to be created.
-
-* `managed` - (Optional) USE WITH CAUTION! If enabled all the port forwards for
- this IP address will be managed by this resource. This means it will delete
- all port forwards that are not in your config! (defaults false)
-
-* `forward` - (Required) Can be specified multiple times. Each forward block supports
- fields documented below.
-
-The `forward` block supports:
-
-* `protocol` - (Required) The name of the protocol to allow. Valid options are:
- `tcp` and `udp`.
-
-* `private_port` - (Required) The private port to forward to.
-
-* `public_port` - (Required) The public port to forward from.
-
-* `virtual_machine_id` - (Required) The ID of the virtual machine to forward to.
-
-* `vm_guest_ip` - (Optional) The virtual machine IP address for the port
- forwarding rule (useful when the virtual machine has secondairy NICs
- or IP addresses).
-
-## Attributes Reference
-
-The following attributes are exported:
-
-* `id` - The ID of the IP address for which the port forwards are created.
-* `vm_guest_ip` - The IP address of the virtual machine that is used
- for the port forwarding rule.
diff --git a/website/source/docs/providers/cloudstack/r/private_gateway.html.markdown b/website/source/docs/providers/cloudstack/r/private_gateway.html.markdown
deleted file mode 100644
index e8ce6ffd9..000000000
--- a/website/source/docs/providers/cloudstack/r/private_gateway.html.markdown
+++ /dev/null
@@ -1,57 +0,0 @@
----
-layout: "cloudstack"
-page_title: "CloudStack: cloudstack_private_gateway"
-sidebar_current: "docs-cloudstack-resource-private-gateway"
-description: |-
- Creates a private gateway.
----
-
-# cloudstack_private_gateway
-
-Creates a private gateway for the given VPC.
-
-*NOTE: private gateway can only be created using a ROOT account!*
-
-## Example Usage
-
-```hcl
-resource "cloudstack_private_gateway" "default" {
- gateway = "10.0.0.1"
- ip_address = "10.0.0.2"
- netmask = "255.255.255.252"
- vlan = "200"
- vpc_id = "76f6e8dc-07e3-4971-b2a2-8831b0cc4cb4"
-}
-```
-
-## Argument Reference
-
-The following arguments are supported:
-
-* `gateway` - (Required) the gateway of the Private gateway. Changing this
- forces a new resource to be created.
-
-* `ip_address` - (Required) the IP address of the Private gateway. Changing this forces
- a new resource to be created.
-
-* `netmask` - (Required) The netmask of the Private gateway. Changing
- this forces a new resource to be created.
-
-* `vlan` - (Required) The VLAN number (1-4095) the network will use.
-
-* `physical_network_id` - (Optional) The ID of the physical network this private
- gateway belongs to.
-
-* `network_offering` - (Optional) The name or ID of the network offering to use for
- the private gateways network connection.
-
-* `acl_id` - (Required) The ACL ID that should be attached to the network.
-
-* `vpc_id` - (Required) The VPC ID in which to create this Private gateway. Changing
- this forces a new resource to be created.
-
-## Attributes Reference
-
-The following attributes are exported:
-
-* `id` - The ID of the private gateway.
diff --git a/website/source/docs/providers/cloudstack/r/secondary_ipaddress.html.markdown b/website/source/docs/providers/cloudstack/r/secondary_ipaddress.html.markdown
deleted file mode 100644
index 3a6e686b6..000000000
--- a/website/source/docs/providers/cloudstack/r/secondary_ipaddress.html.markdown
+++ /dev/null
@@ -1,41 +0,0 @@
----
-layout: "cloudstack"
-page_title: "CloudStack: cloudstack_secondary_ipaddress"
-sidebar_current: "docs-cloudstack-resource-secondary-ipaddress"
-description: |-
- Assigns a secondary IP to a NIC.
----
-
-# cloudstack_secondary_ipaddress
-
-Assigns a secondary IP to a NIC.
-
-## Example Usage
-
-```hcl
-resource "cloudstack_secondary_ipaddress" "default" {
- virtual_machine_id = "server-1"
-}
-```
-
-## Argument Reference
-
-The following arguments are supported:
-
-* `ip_address` - (Optional) The IP address to bind the to NIC. If not supplied
- an IP address will be selected randomly. Changing this forces a new resource
- to be created.
-
-* `nic_id` - (Optional) The NIC ID to which you want to attach the secondary IP
- address. Changing this forces a new resource to be created (defaults to the
- ID of the primary NIC)
-
-* `virtual_machine_id` - (Required) The ID of the virtual machine to which you
- want to attach the secondary IP address. Changing this forces a new resource
- to be created.
-
-## Attributes Reference
-
-The following attributes are exported:
-
-* `id` - The secondary IP address ID.
diff --git a/website/source/docs/providers/cloudstack/r/security_group.html.markdown b/website/source/docs/providers/cloudstack/r/security_group.html.markdown
deleted file mode 100644
index e189a4dee..000000000
--- a/website/source/docs/providers/cloudstack/r/security_group.html.markdown
+++ /dev/null
@@ -1,39 +0,0 @@
----
-layout: "cloudstack"
-page_title: "CloudStack: cloudstack_security_group"
-sidebar_current: "docs-cloudstack-resource-security-group"
-description: |-
- Creates a security group.
----
-
-# cloudstack_security_group
-
-Creates a security group.
-
-## Example Usage
-
-```hcl
-resource "cloudstack_security_group" "default" {
- name = "allow_web"
- description = "Allow access to HTTP and HTTPS"
-}
-```
-
-## Argument Reference
-
-The following arguments are supported:
-
-* `name` - (Required) The name of the security group. Changing this forces a
- new resource to be created.
-
-* `description` - (Optional) The description of the security group. Changing
- this forces a new resource to be created.
-
-* `project` - (Optional) The name or ID of the project to create this security
- group in. Changing this forces a new resource to be created.
-
-## Attributes Reference
-
-The following attributes are exported:
-
-* `id` - The ID of the security group.
diff --git a/website/source/docs/providers/cloudstack/r/security_group_rule.html.markdown b/website/source/docs/providers/cloudstack/r/security_group_rule.html.markdown
deleted file mode 100644
index 3d1db5d86..000000000
--- a/website/source/docs/providers/cloudstack/r/security_group_rule.html.markdown
+++ /dev/null
@@ -1,71 +0,0 @@
----
-layout: "cloudstack"
-page_title: "CloudStack: cloudstack_security_group_rule"
-sidebar_current: "docs-cloudstack-resource-security-group-rule"
-description: |-
- Authorizes and revokes both ingress and egress rulea for a given security group.
----
-
-# cloudstack_security_group_rule
-
-Authorizes and revokes both ingress and egress rulea for a given security group.
-
-## Example Usage
-
-```hcl
-resource "cloudstack_security_group_rule" "web" {
- security_group_id = "e340b62b-fbc2-4081-8f67-e40455c44bce"
-
- rule {
- cidr_list = ["0.0.0.0/0"]
- protocol = "tcp"
- ports = ["80", "443"]
- }
-
- rule {
- cidr_list = ["192.168.0.0/24", "192.168.1.0/25"]
- protocol = "tcp"
- ports = ["80-90", "443"]
- traffic_type = "egress"
- user_security_group_list = ["group01", "group02"]
- }
-}
-```
-
-## Argument Reference
-
-The following arguments are supported:
-
-* `security_group_id` - (Required) The security group ID for which to create
- the rules. Changing this forces a new resource to be created.
-
-* `rule` - (Required) Can be specified multiple times. Each rule block supports
- fields documented below.
-
-The `rule` block supports:
-
-* `cidr_list` - (Optional) A CIDR list to allow access to the given ports.
-
-* `protocol` - (Required) The name of the protocol to allow. Valid options are:
- `tcp`, `udp`, `icmp`, `all` or a valid protocol number.
-
-* `icmp_type` - (Optional) The ICMP type to allow, or `-1` to allow `any`. This
- can only be specified if the protocol is ICMP. (defaults 0)
-
-* `icmp_code` - (Optional) The ICMP code to allow, or `-1` to allow `any`. This
- can only be specified if the protocol is ICMP. (defaults 0)
-
-* `ports` - (Optional) List of ports and/or port ranges to allow. This can only
- be specified if the protocol is TCP, UDP, ALL or a valid protocol number.
-
-* `traffic_type` - (Optional) The traffic type for the rule. Valid options are:
- `ingress` or `egress` (defaults ingress).
-
-* `user_security_group_list` - (Optional) A list of security groups to apply
- the rules to.
-
-## Attributes Reference
-
-The following attributes are exported:
-
-* `id` - The security group ID for which the rules are created.
diff --git a/website/source/docs/providers/cloudstack/r/ssh_keypair.html.markdown b/website/source/docs/providers/cloudstack/r/ssh_keypair.html.markdown
deleted file mode 100644
index 3ad2e352f..000000000
--- a/website/source/docs/providers/cloudstack/r/ssh_keypair.html.markdown
+++ /dev/null
@@ -1,47 +0,0 @@
----
-layout: "cloudstack"
-page_title: "CloudStack: cloudstack_ssh_keypair"
-sidebar_current: "docs-cloudstack-resource-ssh-keypair"
-description: |-
- Creates or registers an SSH key pair.
----
-
-# cloudstack_ssh_keypair
-
-Creates or registers an SSH key pair.
-
-## Example Usage
-
-```hcl
-resource "cloudstack_ssh_keypair" "default" {
- name = "myKey"
- public_key = "${file("~/.ssh/id_rsa.pub")}"
- project = "myProject"
-}
-```
-
-## Argument Reference
-
-The following arguments are supported:
-
-* `name` - (Required) The name of the SSH key pair. This is a unique value
- within a CloudStack account. Changing this forces a new resource to be
- created.
-
-* `public_key` - (Optional) The public key to register with CloudStack. If
- this is omitted, CloudStack will generate a new key pair. The key can
- be loaded from a file on disk using the [`file()` interpolation
- function](/docs/configuration/interpolation.html#file_path_). Changing
- this forces a new resource to be created.
-
-* `project` - (Optional) The name or ID of the project to register this
- key to. Changing this forces a new resource to be created.
-
-## Attributes Reference
-
-The following attributes are exported:
-
-* `id` - The key pair ID.
-* `fingerprint` - The fingerprint of the public key specified or created.
-* `private_key` - The private key generated by CloudStack. Only available
- if CloudStack generated the key pair.
diff --git a/website/source/docs/providers/cloudstack/r/static_nat.html.markdown b/website/source/docs/providers/cloudstack/r/static_nat.html.markdown
deleted file mode 100644
index 74847820d..000000000
--- a/website/source/docs/providers/cloudstack/r/static_nat.html.markdown
+++ /dev/null
@@ -1,45 +0,0 @@
----
-layout: "cloudstack"
-page_title: "CloudStack: cloudstack_static_nat"
-sidebar_current: "docs-cloudstack-resource-static-nat"
-description: |-
- Enables static NAT for a given IP address.
----
-
-# cloudstack_static_nat
-
-Enables static NAT for a given IP address
-
-## Example Usage
-
-```hcl
-resource "cloudstack_static_nat" "default" {
- ip_address_id = "f8141e2f-4e7e-4c63-9362-986c908b7ea7"
- virtual_machine_id = "6ca2a163-bc68-429c-adc8-ab4a620b1bb3"
-}
-```
-
-## Argument Reference
-
-The following arguments are supported:
-
-* `ip_address_id` - (Required) The public IP address ID for which static
- NAT will be enabled. Changing this forces a new resource to be created.
-
-* `virtual_machine_id` - (Required) The virtual machine ID to enable the
- static NAT feature for. Changing this forces a new resource to be created.
-
-* `vm_guest_ip` - (Optional) The virtual machine IP address to forward the
- static NAT traffic to (useful when the virtual machine has secondary
- NICs or IP addresses). Changing this forces a new resource to be created.
-
-* `project` - (Optional) The name or ID of the project to deploy this
- instance to. Changing this forces a new resource to be created.
-
-## Attributes Reference
-
-The following attributes are exported:
-
-* `id` - The static nat ID.
-* `vm_guest_ip` - The IP address of the virtual machine that is used
- to forward the static NAT traffic to.
diff --git a/website/source/docs/providers/cloudstack/r/static_route.html.markdown b/website/source/docs/providers/cloudstack/r/static_route.html.markdown
deleted file mode 100644
index dab12a959..000000000
--- a/website/source/docs/providers/cloudstack/r/static_route.html.markdown
+++ /dev/null
@@ -1,36 +0,0 @@
----
-layout: "cloudstack"
-page_title: "CloudStack: cloudstack_static_route"
-sidebar_current: "docs-cloudstack-resource-static-route"
-description: |-
- Creates a static route.
----
-
-# cloudstack_static_route
-
-Creates a static route for the given private gateway or VPC.
-
-## Example Usage
-
-```hcl
-resource "cloudstack_static_route" "default" {
- cidr = "10.0.0.0/16"
- gateway_id = "76f607e3-e8dc-4971-8831-b2a2b0cc4cb4"
-}
-```
-
-## Argument Reference
-
-The following arguments are supported:
-
-* `cidr` - (Required) The CIDR for the static route. Changing this forces
- a new resource to be created.
-
-* `gateway_id` - (Required) The ID of the Private gateway. Changing this forces
- a new resource to be created.
-
-## Attributes Reference
-
-The following attributes are exported:
-
-* `id` - The ID of the static route.
diff --git a/website/source/docs/providers/cloudstack/r/template.html.markdown b/website/source/docs/providers/cloudstack/r/template.html.markdown
deleted file mode 100644
index 0e2d9afd1..000000000
--- a/website/source/docs/providers/cloudstack/r/template.html.markdown
+++ /dev/null
@@ -1,81 +0,0 @@
----
-layout: "cloudstack"
-page_title: "CloudStack: cloudstack_template"
-sidebar_current: "docs-cloudstack-resource-template"
-description: |-
- Registers an existing template into the CloudStack cloud.
----
-
-# cloudstack_template
-
-Registers an existing template into the CloudStack cloud.
-
-## Example Usage
-
-```hcl
-resource "cloudstack_template" "centos64" {
- name = "CentOS 6.4 x64"
- format = "VHD"
- hypervisor = "XenServer"
- os_type = "CentOS 6.4 (64bit)"
- url = "http://someurl.com/template.vhd"
- zone = "zone-1"
-}
-```
-
-## Argument Reference
-
-The following arguments are supported:
-
-* `name` - (Required) The name of the template.
-
-* `display_text` - (Optional) The display name of the template.
-
-* `format` - (Required) The format of the template. Valid values are `QCOW2`,
- `RAW`, and `VHD`.
-
-* `hypervisor` - (Required) The target hypervisor for the template. Changing
- this forces a new resource to be created.
-
-* `os_type` - (Required) The OS Type that best represents the OS of this
- template.
-
-* `url` - (Required) The URL of where the template is hosted. Changing this
- forces a new resource to be created.
-
-* `project` - (Optional) The name or ID of the project to create this template for.
- Changing this forces a new resource to be created.
-
-* `zone` - (Required) The name or ID of the zone where this template will be created.
- Changing this forces a new resource to be created.
-
-* `is_dynamically_scalable` - (Optional) Set to indicate if the template contains
- tools to support dynamic scaling of VM cpu/memory (defaults false)
-
-* `is_extractable` - (Optional) Set to indicate if the template is extractable
- (defaults false)
-
-* `is_featured` - (Optional) Set to indicate if the template is featured
- (defaults false)
-
-* `is_public` - (Optional) Set to indicate if the template is available for
- all accounts (defaults true)
-
-* `password_enabled` - (Optional) Set to indicate if the template should be
- password enabled (defaults false)
-
-* `is_ready_timeout` - (Optional) The maximum time in seconds to wait until the
- template is ready for use (defaults 300 seconds)
-
-## Attributes Reference
-
-The following attributes are exported:
-
-* `id` - The template ID.
-* `display_text` - The display text of the template.
-* `is_dynamically_scalable` - Set to "true" if the template is dynamically scalable.
-* `is_extractable` - Set to "true" if the template is extractable.
-* `is_featured` - Set to "true" if the template is featured.
-* `is_public` - Set to "true" if the template is public.
-* `password_enabled` - Set to "true" if the template is password enabled.
-* `is_ready` - Set to "true" once the template is ready for use.
diff --git a/website/source/docs/providers/cloudstack/r/vpc.html.markdown b/website/source/docs/providers/cloudstack/r/vpc.html.markdown
deleted file mode 100644
index 5fb829861..000000000
--- a/website/source/docs/providers/cloudstack/r/vpc.html.markdown
+++ /dev/null
@@ -1,55 +0,0 @@
----
-layout: "cloudstack"
-page_title: "CloudStack: cloudstack_vpc"
-sidebar_current: "docs-cloudstack-resource-vpc"
-description: |-
- Creates a VPC.
----
-
-# cloudstack_vpc
-
-Creates a VPC.
-
-## Example Usage
-
-Basic usage:
-
-```hcl
-resource "cloudstack_vpc" "default" {
- name = "test-vpc"
- cidr = "10.0.0.0/16"
- vpc_offering = "Default VPC Offering"
- zone = "zone-1"
-}
-```
-
-## Argument Reference
-
-The following arguments are supported:
-
-* `name` - (Required) The name of the VPC.
-
-* `display_text` - (Optional) The display text of the VPC.
-
-* `cidr` - (Required) The CIDR block for the VPC. Changing this forces a new
- resource to be created.
-
-* `vpc_offering` - (Required) The name or ID of the VPC offering to use for this VPC.
- Changing this forces a new resource to be created.
-
-* `network_domain` - (Optional) The default DNS domain for networks created in
- this VPC. Changing this forces a new resource to be created.
-
-* `project` - (Optional) The name or ID of the project to deploy this
- instance to. Changing this forces a new resource to be created.
-
-* `zone` - (Required) The name or ID of the zone where this disk volume will be
- available. Changing this forces a new resource to be created.
-
-## Attributes Reference
-
-The following attributes are exported:
-
-* `id` - The ID of the VPC.
-* `display_text` - The display text of the VPC.
-* `source_nat_ip` - The source NAT IP assigned to the VPC.
diff --git a/website/source/docs/providers/cloudstack/r/vpn_connection.html.markdown b/website/source/docs/providers/cloudstack/r/vpn_connection.html.markdown
deleted file mode 100644
index 7a2f70113..000000000
--- a/website/source/docs/providers/cloudstack/r/vpn_connection.html.markdown
+++ /dev/null
@@ -1,38 +0,0 @@
----
-layout: "cloudstack"
-page_title: "CloudStack: cloudstack_vpn_connection"
-sidebar_current: "docs-cloudstack-resource-vpn-connection"
-description: |-
- Creates a site to site VPN connection.
----
-
-# cloudstack_vpn_connection
-
-Creates a site to site VPN connection.
-
-## Example Usage
-
-Basic usage:
-
-```hcl
-resource "cloudstack_vpn_connection" "default" {
- customer_gateway_id = "8dab9381-ae73-48b8-9a3d-c460933ef5f7"
- vpn_gateway_id = "a7900060-f8a8-44eb-be15-ea54cf499703"
-}
-```
-
-## Argument Reference
-
-The following arguments are supported:
-
-* `customer_gateway_id` - (Required) The Customer Gateway ID to connect.
- Changing this forces a new resource to be created.
-
-* `vpn_gateway_id` - (Required) The VPN Gateway ID to connect. Changing
- this forces a new resource to be created.
-
-## Attributes Reference
-
-The following attributes are exported:
-
-* `id` - The ID of the VPN Connection.
diff --git a/website/source/docs/providers/cloudstack/r/vpn_customer_gateway.html.markdown b/website/source/docs/providers/cloudstack/r/vpn_customer_gateway.html.markdown
deleted file mode 100644
index 752730bdc..000000000
--- a/website/source/docs/providers/cloudstack/r/vpn_customer_gateway.html.markdown
+++ /dev/null
@@ -1,59 +0,0 @@
----
-layout: "cloudstack"
-page_title: "CloudStack: cloudstack_vpn_customer_gateway"
-sidebar_current: "docs-cloudstack-resource-vpn-customer-gateway"
-description: |-
- Creates a site to site VPN local customer gateway.
----
-
-# cloudstack_vpn_customer_gateway
-
-Creates a site to site VPN local customer gateway.
-
-## Example Usage
-
-Basic usage:
-
-```hcl
-resource "cloudstack_vpn_customer_gateway" "default" {
- name = "test-vpc"
- cidr = "10.0.0.0/8"
- esp_policy = "aes256-sha1"
- gateway = "192.168.0.1"
- ike_policy = "aes256-sha1"
- ipsec_psk = "terraform"
-}
-```
-
-## Argument Reference
-
-The following arguments are supported:
-
-* `name` - (Required) The name of the VPN Customer Gateway.
-
-* `cidr` - (Required) The CIDR block that needs to be routed through this gateway.
-
-* `esp_policy` - (Required) The ESP policy to use for this VPN Customer Gateway.
-
-* `gateway` - (Required) The public IP address of the related VPN Gateway.
-
-* `ike_policy` - (Required) The IKE policy to use for this VPN Customer Gateway.
-
-* `ipsec_psk` - (Required) The IPSEC pre-shared key used for this gateway.
-
-* `dpd` - (Optional) If DPD is enabled for the related VPN connection (defaults false)
-
-* `esp_lifetime` - (Optional) The ESP lifetime of phase 2 VPN connection to this
- VPN Customer Gateway in seconds (defaults 86400)
-
-* `ike_lifetime` - (Optional) The IKE lifetime of phase 2 VPN connection to this
- VPN Customer Gateway in seconds (defaults 86400)
-
-## Attributes Reference
-
-The following attributes are exported:
-
-* `id` - The ID of the VPN Customer Gateway.
-* `dpd` - Enable or disable DPD is enabled for the related VPN connection.
-* `esp_lifetime` - The ESP lifetime of phase 2 VPN connection to this VPN Customer Gateway.
-* `ike_lifetime` - The IKE lifetime of phase 2 VPN connection to this VPN Customer Gateway.
diff --git a/website/source/docs/providers/cloudstack/r/vpn_gateway.html.markdown b/website/source/docs/providers/cloudstack/r/vpn_gateway.html.markdown
deleted file mode 100644
index d37b3793f..000000000
--- a/website/source/docs/providers/cloudstack/r/vpn_gateway.html.markdown
+++ /dev/null
@@ -1,35 +0,0 @@
----
-layout: "cloudstack"
-page_title: "CloudStack: cloudstack_vpn_gateway"
-sidebar_current: "docs-cloudstack-resource-vpn-gateway"
-description: |-
- Creates a site to site VPN local gateway.
----
-
-# cloudstack_vpn_gateway
-
-Creates a site to site VPN local gateway.
-
-## Example Usage
-
-Basic usage:
-
-```hcl
-resource "cloudstack_vpn_gateway" "default" {
- vpc_id = "f8141e2f-4e7e-4c63-9362-986c908b7ea7"
-}
-```
-
-## Argument Reference
-
-The following arguments are supported:
-
-* `vpc_id` - (Required) The ID of the VPC for which to create the VPN Gateway.
- Changing this forces a new resource to be created.
-
-## Attributes Reference
-
-The following attributes are exported:
-
-* `id` - The ID of the VPN Gateway.
-* `public_ip` - The public IP address associated with the VPN Gateway.
diff --git a/website/source/docs/providers/cobbler/index.html.markdown b/website/source/docs/providers/cobbler/index.html.markdown
deleted file mode 100644
index 7554ac854..000000000
--- a/website/source/docs/providers/cobbler/index.html.markdown
+++ /dev/null
@@ -1,45 +0,0 @@
----
-layout: "cobbler"
-page_title: "Provider: Cobbler"
-sidebar_current: "docs-cobbler-index"
-description: |-
- The Cobbler provider is used to interact with a locally installed,
- Cobbler service.
----
-
-# Cobbler Provider
-
-The Cobbler provider is used to interact with a locally installed
-[Cobbler](http://cobbler.github.io) service. The provider needs
-to be configured with the proper credentials before it can be used.
-
-Use the navigation to the left to read about the available resources.
-
-## Example Usage
-
-```hcl
-# Configure the Cobbler provider
-provider "cobbler" {
- username = "${var.cobbler_username}"
- password = "${var.cobbler_password}"
- url = "${var.cobbler_url}"
-}
-
-# Create a Cobbler Distro
-resource "cobbler_distro" "ubuntu-1404-x86_64" {
- # ...
-}
-```
-
-## Argument Reference
-
-The following arguments are supported:
-
-* `username` - (Required) The username to the Cobbler service. This can
- also be specified with the `COBBLER_USERNAME` shell environment variable.
-
-* `password` - (Required) The password to the Cobbler service. This can
- also be specified with the `COBBLER_PASSWORD` shell environment variable.
-
-* `url` - (Required) The url to the Cobbler service. This can
- also be specified with the `COBBLER_URL` shell environment variable.
diff --git a/website/source/docs/providers/cobbler/r/distro.html.markdown b/website/source/docs/providers/cobbler/r/distro.html.markdown
deleted file mode 100644
index 1eea11caf..000000000
--- a/website/source/docs/providers/cobbler/r/distro.html.markdown
+++ /dev/null
@@ -1,84 +0,0 @@
----
-layout: "cobbler"
-page_title: "Cobbler: cobbler_distro"
-sidebar_current: "docs-cobbler-resource-distro"
-description: |-
- Manages a distribution within Cobbler.
----
-
-# cobbler_distro
-
-Manages a distribution within Cobbler.
-
-## Example Usage
-
-```hcl
-resource "cobbler_distro" "ubuntu-1404-x86_64" {
- name = "foo"
- breed = "ubuntu"
- os_version = "trusty"
- arch = "x86_64"
- kernel = "/var/www/cobbler/ks_mirror/Ubuntu-14.04/install/netboot/ubuntu-installer/amd64/linux"
- initrd = "/var/www/cobbler/ks_mirror/Ubuntu-14.04/install/netboot/ubuntu-installer/amd64/initrd.gz"
-}
-```
-
-## Argument Reference
-
-The following arguments are supported:
-
-* `arch` - (Required) The architecture of the distro. Valid options
- are: i386, x86_64, ia64, ppc, ppc64, s390, arm.
-
-* `breed` - (Required) The "breed" of distribution. Valid options
- are: redhat, fedora, centos, scientific linux, suse, debian, and
- ubuntu. These choices may vary depending on the version of Cobbler
- in use.
-
-* `boot_files` - (Optional) Files copied into tftpboot beyond the
- kernel/initrd.
-
-* `comment` - (Optional) Free form text description.
-
-* `fetchable_files` - (Optional) Templates for tftp or wget.
-
-* `kernel` - (Required) Absolute path to kernel on filesystem. This
- must already exist prior to creating the distro.
-
-* `kernel_options` - (Optional) Kernel options to use with the
- kernel.
-
-* `kernel_options_post` - (Optional) Post install Kernel options to
- use with the kernel after installation.
-
-* `initrd` - (Required) Absolute path to initrd on filesystem. This
- must already exist prior to creating the distro.
-
-* `mgmt_classes` - (Optional) Management classes for external config
- management.
-
-* `name` - (Required) A name for the distro.
-
-* `os_version` - (Required) The version of the distro you are
- creating. This varies with the version of Cobbler you are using.
- An updated signature list may need to be obtained in order to
- support a newer version. Example: `trusty`.
-
-* `owners` - (Optional) Owners list for authz_ownership.
-
-* `redhat_management_key` - (Optional) Red Hat Management key.
-
-* `redhat_management_server` - (Optional) Red Hat Management server.
-
-* `template_files` - (Optional) File mappings for built-in config
- management.
-
-## Attributes Reference
-
-All of the above Optional attributes are also exported.
-
-## Notes
-
-The path to the `kernel` and `initrd` files must exist before
-creating a Distro. Usually this involves running `cobbler import ...`
-prior to creating the Distro.
diff --git a/website/source/docs/providers/cobbler/r/kickstart_file.html.markdown b/website/source/docs/providers/cobbler/r/kickstart_file.html.markdown
deleted file mode 100644
index 450d27189..000000000
--- a/website/source/docs/providers/cobbler/r/kickstart_file.html.markdown
+++ /dev/null
@@ -1,29 +0,0 @@
----
-layout: "cobbler"
-page_title: "Cobbler: cobbler_kickstart_file"
-sidebar_current: "docs-cobbler-resource-kickstart_file"
-description: |-
- Manages a Kickstart File within Cobbler.
----
-
-# cobbler_kickstart_file
-
-Manages a Kickstart File within Cobbler.
-
-## Example Usage
-
-```hcl
-resource "cobbler_kickstart_file" "my_kickstart" {
- name = "/var/lib/cobbler/kickstarts/my_kickstart.ks"
- body = ""
-}
-```
-
-## Argument Reference
-
-The following arguments are supported:
-
-* `body` - (Required) The body of the kickstart file.
-
-* `name` - (Required) The name of the kickstart file. This must be
- the full path, including `/var/lib/cobbler/kickstarts`.
diff --git a/website/source/docs/providers/cobbler/r/profile.html.markdown b/website/source/docs/providers/cobbler/r/profile.html.markdown
deleted file mode 100644
index 5ea6950f8..000000000
--- a/website/source/docs/providers/cobbler/r/profile.html.markdown
+++ /dev/null
@@ -1,92 +0,0 @@
----
-layout: "cobbler"
-page_title: "Cobbler: cobbler_profile"
-sidebar_current: "docs-cobbler-resource-profile"
-description: |-
- Manages a Profile within Cobbler.
----
-
-# cobbler_profile
-
-Manages a Profile within Cobbler.
-
-## Example Usage
-
-```hcl
-resource "cobbler_profile" "my_profile" {
- name = "/var/lib/cobbler/snippets/my_snippet"
- distro = "ubuntu-1404-x86_64"
-}
-```
-
-## Argument Reference
-
-The following arguments are supported:
-
-* `boot_files` - (Optional) Files copied into tftpboot beyond the
- kernel/initrd.
-
-* `comment` - (Optional) Free form text description.
-
-* `distro` - (Optional) Parent distribution.
-
-* `enable_gpxe` - (Optional) Use gPXE instead of PXELINUX for
- advanced booting options.
-
-* `enable_menu` - (Optional) Enable a boot menu.
-
-* `fetchable_files` - (Optional) Templates for tftp or wget.
-
-* `kernel_options` - (Optional) Kernel options for the profile.
-
-* `kernel_options_post` - (Optional) Post install kernel options.
-
-* `kickstart` - (Optional) The kickstart file to use.
-
-* `ks_meta` - (Optional) Kickstart metadata.
-
-* `mgmt_classes` - (Optional) For external configuration management.
-
-* `mgmt_parameters` - (Optional) Parameters which will be handed to
- your management application (Must be a valid YAML dictionary).
-
-* `name_servers_search` - (Optional) Name server search settings.
-
-* `name_servers` - (Optional) Name servers.
-
-* `name` - (Required) The name of the profile.
-
-* `owners` - (Optional) Owners list for authz_ownership.
-
-* `proxy` - (Optional) Proxy URL.
-
-* `redhat_management_key` - (Optional) Red Hat Management Key.
-
-* `redhat_management_server` - (Optional) RedHat Management Server.
-
-* `repos` - (Optional) Repos to auto-assign to this profile.
-
-* `template_files` - (Optional) File mappings for built-in config
- management.
-
-* `template_remote_kickstarts` - (Optional) remote kickstart
- templates.
-
-* `virt_auto_boot` - (Optional) Auto boot virtual machines.
-
-* `virt_bridge` - (Optional) The bridge for virtual machines.
-
-* `virt_cpus` - (Optional) The number of virtual CPUs.
-
-* `virt_file_size` - (Optional) The virtual machine file size.
-
-* `virt_path` - (Optional) The virtual machine path.
-
-* `virt_ram` - (Optional) The amount of RAM for the virtual machine.
-
-* `virt_type` - (Optional) The type of virtual machine. Valid options
- are: xenpv, xenfv, qemu, kvm, vmware, openvz.
-
-## Attributes Reference
-
-All of the above Optional attributes are also exported.
diff --git a/website/source/docs/providers/cobbler/r/snippet.html.markdown b/website/source/docs/providers/cobbler/r/snippet.html.markdown
deleted file mode 100644
index f31748f72..000000000
--- a/website/source/docs/providers/cobbler/r/snippet.html.markdown
+++ /dev/null
@@ -1,29 +0,0 @@
----
-layout: "cobbler"
-page_title: "Cobbler: cobbler_snippet"
-sidebar_current: "docs-cobbler-resource-snippet"
-description: |-
- Manages a Snippet within Cobbler.
----
-
-# cobbler_snippet
-
-Manages a Snippet within Cobbler.
-
-## Example Usage
-
-```hcl
-resource "cobbler_snippet" "my_snippet" {
- name = "/var/lib/cobbler/snippets/my_snippet"
- body = ""
-}
-```
-
-## Argument Reference
-
-The following arguments are supported:
-
-* `body` - (Required) The body of the snippet.
-
-* `name` - (Required) The name of the snippet. This must be the full
- path, including `/var/lib/cobbler/snippets`.
diff --git a/website/source/docs/providers/cobbler/r/system.html.markdown b/website/source/docs/providers/cobbler/r/system.html.markdown
deleted file mode 100644
index 15ee6d542..000000000
--- a/website/source/docs/providers/cobbler/r/system.html.markdown
+++ /dev/null
@@ -1,189 +0,0 @@
----
-layout: "cobbler"
-page_title: "Cobbler: cobbler_system"
-sidebar_current: "docs-cobbler-resource-system"
-description: |-
- Manages a System within Cobbler.
----
-
-# cobbler_system
-
-Manages a System within Cobbler.
-
-## Example Usage
-
-```hcl
-resource "cobbler_system" "my_system" {
- name = "my_system"
- profile = "${cobbler_profile.my_profile.name}"
- name_servers = ["8.8.8.8", "8.8.4.4"]
- comment = "I'm a system"
-
- interface {
- name = "eth0"
- mac_address = "aa:bb:cc:dd:ee:ff"
- static = true
- ip_address = "1.2.3.4"
- netmask = "255.255.255.0"
- }
-
- interface {
- name = "eth1"
- mac_address = "aa:bb:cc:dd:ee:fa"
- static = true
- ip_address = "1.2.3.5"
- netmask = "255.255.255.0"
- }
-}
-```
-
-## Argument Reference
-
-The following arguments are supported:
-
-* `boot_files` - (Optional) TFTP boot files copied into tftpboot.
-
-* `comment` - (Optional) Free form text description
-
-* `enable_gpxe` - (Optional) Use gPXE instead of PXELINUX.
-
-* `fetchable_files` - (Optional) Templates for tftp or wget.
-
-* `gateway` - (Optional) Network gateway.
-
-* `hostname` - (Optional) Hostname of the system.
-
-* `image` - (Optional) Parent image (if no profile is used).
-
-* `interface` - (Optional)
-
-* `ipv6_default_device` - (Optional) IPv6 default device.
-
-* `kernel_options` - (Optional) Kernel options.
- ex: selinux=permissive.
-
-* `kernel_options_post` - (Optional) Kernel options (post install).
-
-* `kickstart` - (Optional) Path to kickstart template.
-
-* `ks_meta` - (Optional) Kickstart metadata.
-
-* `ldap_enabled` - (Optional) Configure LDAP at next config update.
-
-* `ldap_type` - (Optional) LDAP management type.
-
-* `mgmt_classes` - (Optional) Management classes for external config
- management.
-* `mgmt_parameters` - (Optional) Parameters which will be handed to
- your management application. Must be a valid YAML dictionary.
-
-* `monit_enabled` - (Optional) Configure monit on this machine at
- next config update.
-
-* `name_servers_search` - (Optional) Name servers search path.
-
-* `name_servers` - (Optional) Name servers.
-
-* `name` - (Required) The name of the system.
-
-* `netboot_enabled` - (Optional) (re)Install this machine at next
- boot.
-
-* `owners` - (Optional) Owners list for authz_ownership.
-
-* `power_address` - (Optional) Power management address.
-
-* `power_id` - (Optional) Usually a plug number or blade name if
- power type requires it.
-
-* `power_pass` - (Optional) Power management password.
-
-* `power_type` - (Optional) Power management type.
-
-* `power_user` - (Optional) Power management user.
-
-* `profile` - (Required) Parent profile.
-
-* `proxy` - (Optional) Proxy URL.
-
-* `redhat_management_key` - (Optional) Red Hat management key.
-
-* `redhat_management_server` - (Optional) Red Hat management server.
-
-* `status` - (Optional) System status (development, testing,
- acceptance, production).
-
-* `template_files` - (Optional) File mappings for built-in
- configuration management.
-
-* `template_remote_kickstarts` - (Optional) template remote
- kickstarts.
-
-* `virt_auto_boot` - (Optional) Auto boot the VM.
-
-* `virt_cpus` - (Optional) Number of virtual CPUs in the VM.
-
-* `virt_disk_driver` - (Optional) The on-disk format for the
- virtualization disk.
-
-* `virt_file_size` - (Optional) Virt file size.
-
-* `virt_path` - (Optional) Path to the VM.
-
-* `virt_pxe_boot` - (Optional) Use PXE to build this VM?
-
-* `virt_ram` - (Optional) The amount of RAM for the VM.
-
-* `virt_type` - (Optional) Virtualization technology to use: xenpv,
- xenfv, qemu, kvm, vmware, openvz.
-
-The `interface` block supports:
-
-* `name` - (Required) The device name of the interface. ex: eth0.
-
-* `cnames` - (Optional) Canonical name records.
-
-* `dhcp_tag` - (Optional) DHCP tag.
-
-* `dns_name` - (Optional) DNS name.
-
-* `bonding_opts` - (Optional) Options for bonded interfaces.
-
-* `bridge_opts` - (Optional) Options for bridge interfaces.
-
-* `gateway` - (Optional) Per-interface gateway.
-
-* `interface_type` - (Optional) The type of interface: na, master,
- slave, bond, bond_slave, bridge, bridge_slave, bonded_bridge_slave.
-
-* `interface_master` - (Optional) The master interface when slave.
-
-* `ip_address` - (Optional) The IP address of the interface.
-
-* `ipv6_address` - (Optional) The IPv6 address of the interface.
-
-* `ipv6_mtu` - (Optional) The MTU of the IPv6 address.
-
-* `ipv6_static_routes` - (Optional) Static routes for the IPv6
- interface.
-
-* `ipv6_default_gateway` - (Optional) The default gateawy for the
- IPv6 address / interface.
-
-* `mac_address` - (Optional) The MAC address of the interface.
-
-* `management` - (Optional) Whether this interface is a management
- interface.
-
-* `netmask` - (Optional) The IPv4 netmask of the interface.
-
-* `static` - (Optional) Whether the interface should be static or
- DHCP.
-
-* `static_routes` - (Optional) Static routes for the interface.
-
-* `virt_bridge` - (Optional) The virtual bridge to attach to.
-
-## Attribute Reference
-
-All optional attributes listed above are also exported.
diff --git a/website/source/docs/providers/consul/d/agent_self.html.markdown b/website/source/docs/providers/consul/d/agent_self.html.markdown
deleted file mode 100644
index 9d0f5061d..000000000
--- a/website/source/docs/providers/consul/d/agent_self.html.markdown
+++ /dev/null
@@ -1,162 +0,0 @@
----
-layout: "consul"
-page_title: "Consul: consul_agent_self"
-sidebar_current: "docs-consul-data-source-agent-self"
-description: |-
- Provides the configuration information of the local Consul agent.
----
-
-# consul_agent__self
-
-The `consul_agent_self` data source returns
-[configuration and status data](https://www.consul.io/docs/agent/http/agent.html#agent_self)
-from the agent specified in the `provider`.
-
-## Example Usage
-
-```hcl
-data "consul_agent_self" "read-dc1-agent" {
- query_options {
- # Optional parameter: implicitly uses the current datacenter of the agent
- datacenter = "dc1"
- }
-}
-
-# Set the description to a whitespace delimited list of the services
-resource "example_resource" "app" {
- description = "Consul datacenter ${data.consul_agent_self.read-dc1-agent.datacenter}"
-
- # ...
-}
-```
-
-## Attributes Reference
-
-The following attributes are exported:
-
-* [`acl_datacenter`](https://www.consul.io/docs/agent/options.html#acl_datacenter)
-* [`acl_default_policy`](https://www.consul.io/docs/agent/options.html#acl_default_policy)
-* `acl_disabled_ttl`
-* [`acl_down_policy`](https://www.consul.io/docs/agent/options.html#acl_down_policy)
-* [`acl_enforce_0_8_semantics`](https://www.consul.io/docs/agent/options.html#acl_enforce_version_8)
-* [`acl_ttl`](https://www.consul.io/docs/agent/options.html#acl_ttl)
-* [`addresses`](https://www.consul.io/docs/agent/options.html#addresses)
-* [`advertise_addr`](https://www.consul.io/docs/agent/options.html#_advertise)
-* [`advertise_addr_wan`](https://www.consul.io/docs/agent/options.html#_advertise-wan)
-* [`advertise_addrs`](https://www.consul.io/docs/agent/options.html#advertise_addrs)
-* [`atlas_join`](https://www.consul.io/docs/agent/options.html#_atlas_join)
-* [`bind_addr`](https://www.consul.io/docs/agent/options.html#_bind)
-* [`bootstrap_expect`](https://www.consul.io/docs/agent/options.html#_bootstrap_expect)
-* [`bootstrap_mode`](https://www.consul.io/docs/agent/options.html#_bootstrap)
-* `check_deregister_interval_min`
-* `check_reap_interval`
-* [`check_update_interval`](https://www.consul.io/docs/agent/options.html#check_update_interval)
-* [`client_addr`](https://www.consul.io/docs/agent/options.html#_client)
-* `dns` - A map of DNS configuration attributes. See below for details on the
- contents of the `dns` attribute.
-* [`dns_recursors`](https://www.consul.io/docs/agent/options.html#recursors) - A
- list of all DNS recursors.
-* [`data_dir`](https://www.consul.io/docs/agent/options.html#_data_dir)
-* [`datacenter`](https://www.consul.io/docs/agent/options.html#_datacenter)
-* [`dev_mode`](https://www.consul.io/docs/agent/options.html#_dev)
-* [`domain`](https://www.consul.io/docs/agent/options.html#_domain)
-* [`enable_anonymous_signature`](https://www.consul.io/docs/agent/options.html#disable_anonymous_signature)
-* `enable_coordinates`
-* [`enable_debug`](https://www.consul.io/docs/agent/options.html#enable_debug)
-* [`enable_remote_exec`](https://www.consul.io/docs/agent/options.html#disable_remote_exec)
-* [`enable_syslog`](https://www.consul.io/docs/agent/options.html#_syslog)
-* [`enable_ui`](https://www.consul.io/docs/agent/options.html#_ui)
-* [`enable_update_check`](https://www.consul.io/docs/agent/options.html#disable_update_check)
-* [`id`](https://www.consul.io/docs/agent/options.html#_node_id)
-* [`leave_on_int`](https://www.consul.io/docs/agent/options.html#skip_leave_on_interrupt)
-* [`leave_on_term`](https://www.consul.io/docs/agent/options.html#leave_on_terminate)
-* [`log_level`](https://www.consul.io/docs/agent/options.html#_log_level)
-* [`name`](https://www.consul.io/docs/agent/options.html#_node)
-* [`performance`](https://www.consul.io/docs/agent/options.html#performance)
-* [`pid_file`](https://www.consul.io/docs/agent/options.html#_pid_file)
-* [`ports`](https://www.consul.io/docs/agent/options.html#ports)
-* [`protocol_version`](https://www.consul.io/docs/agent/options.html#_protocol)
-* [`reconnect_timeout_lan`](https://www.consul.io/docs/agent/options.html#reconnect_timeout)
-* [`reconnect_timeout_wan`](https://www.consul.io/docs/agent/options.html#reconnect_timeout_wan)
-* [`rejoin_after_leave`](https://www.consul.io/docs/agent/options.html#_rejoin)
-* [`retry_join`](https://www.consul.io/docs/agent/options.html#retry_join)
-* [`retry_join_ec2`](https://www.consul.io/docs/agent/options.html#retry_join_ec2) -
- A map of EC2 retry attributes. See below for details on the available
- information.
-* [`retry_join_gce`](https://www.consul.io/docs/agent/options.html#retry_join_gce) -
- A map of GCE retry attributes. See below for details on the available
- information.
-* [`retry_join_wan`](https://www.consul.io/docs/agent/options.html#_retry_join_wan)
-* [`retry_max_attempts`](https://www.consul.io/docs/agent/options.html#_retry_max)
-* [`retry_max_attempts_wan`](https://www.consul.io/docs/agent/options.html#_retry_max_wan)
-* [`serf_lan_bind_addr`](https://www.consul.io/docs/agent/options.html#_serf_lan_bind)
-* [`serf_wan_bind_addr`](https://www.consul.io/docs/agent/options.html#_serf_wan_bind)
-* [`server_mode`](https://www.consul.io/docs/agent/options.html#_server)
-* [`server_name`](https://www.consul.io/docs/agent/options.html#server_name)
-* [`session_ttl_min`](https://www.consul.io/docs/agent/options.html#session_ttl_min)
-* [`start_join`](https://www.consul.io/docs/agent/options.html#start_join)
-* [`start_join_wan`](https://www.consul.io/docs/agent/options.html#start_join_wan)
-* [`syslog_facility`](https://www.consul.io/docs/agent/options.html#syslog_facility)
-* [`tls_ca_file`](https://www.consul.io/docs/agent/options.html#ca_file)
-* [`tls_cert_file`](https://www.consul.io/docs/agent/options.html#cert_file)
-* [`tls_key_file`](https://www.consul.io/docs/agent/options.html#key_file)
-* [`tls_min_version`](https://www.consul.io/docs/agent/options.html#tls_min_version)
-* [`tls_verify_incoming`](https://www.consul.io/docs/agent/options.html#verify_incoming)
-* [`tls_verify_outgoing`](https://www.consul.io/docs/agent/options.html#verify_outgoing)
-* [`tls_verify_server_hostname`](https://www.consul.io/docs/agent/options.html#verify_server_hostname)
-* [`tagged_addresses`](https://www.consul.io/docs/agent/options.html#translate_wan_addrs)
-* [`telemetry`](https://www.consul.io/docs/agent/options.html#telemetry) - A map
- of telemetry configuration.
-* [`translate_wan_addrs`](https://www.consul.io/docs/agent/options.html#translate_wan_addrs)
-* [`ui_dir`](https://www.consul.io/docs/agent/options.html#ui_dir)
-* [`unix_sockets`](https://www.consul.io/docs/agent/options.html#unix_sockets)
-* `version` - The version of the Consul agent.
-* `version_prerelease`
-* `version_revision`
-
-### DNS Attributes
-
-* [`allow_stale`](https://www.consul.io/docs/agent/options.html#allow_stale)
-* [`enable_compression`](https://www.consul.io/docs/agent/options.html#disable_compression)
-* [`enable_truncate`](https://www.consul.io/docs/agent/options.html#enable_truncate)
-* [`max_stale`](https://www.consul.io/docs/agent/options.html#max_stale)
-* [`node_ttl`](https://www.consul.io/docs/agent/options.html#node_ttl)
-* [`only_passing`](https://www.consul.io/docs/agent/options.html#only_passing)
-* [`recursor_timeout`](https://www.consul.io/docs/agent/options.html#recursor_timeout)
-* [`service_ttl`](https://www.consul.io/docs/agent/options.html#service_ttl)
-* [`udp_answer_limit`](https://www.consul.io/docs/agent/options.html#udp_answer_limit)
-
-### Retry Join EC2 Attributes
-
-* [`region`](https://www.consul.io/docs/agent/options.html#region)
-* [`tag_key`](https://www.consul.io/docs/agent/options.html#tag_key)
-* [`tag_value`](https://www.consul.io/docs/agent/options.html#tag_value)
-
-### Retry Join GCE Attributes
-
-* [`credentials_file`](https://www.consul.io/docs/agent/options.html#credentials_file)
-* [`project_name`](https://www.consul.io/docs/agent/options.html#project_name)
-* [`tag_value`](https://www.consul.io/docs/agent/options.html#tag_value)
-* [`zone_pattern`](https://www.consul.io/docs/agent/options.html#zone_pattern)
-
-### Telemetry Attributes
-
-* [`circonus_api_app`](https://www.consul.io/docs/agent/options.html#telemetry-circonus_api_app)
-* [`circonus_api_token`](https://www.consul.io/docs/agent/options.html#telemetry-circonus_api_token)
-* [`circonus_api_url`](https://www.consul.io/docs/agent/options.html#telemetry-circonus_api_url)
-* [`circonus_broker_id`](https://www.consul.io/docs/agent/options.html#telemetry-circonus_broker_id)
-* [`circonus_check_id`](https://www.consul.io/docs/agent/options.html#telemetry-circonus_check_id)
-* [`circonus_check_tags`](https://www.consul.io/docs/agent/options.html#telemetry-circonus_check_tags)
-* [`circonus_display_name`](https://www.consul.io/docs/agent/options.html#telemetry-circonus_check_display_name)
-* [`circonus_force_metric_activation`](https://www.consul.io/docs/agent/options.html#telemetry-circonus_check_force_metric_activation)
-* [`circonus_instance_id`](https://www.consul.io/docs/agent/options.html#telemetry-circonus_check_instance_id)
-* [`circonus_search_tag`](https://www.consul.io/docs/agent/options.html#telemetry-circonus_check_search_tag)
-* [`circonus_select_tag`](https://www.consul.io/docs/agent/options.html#telemetry-circonus_broker_select_tag)
-* [`circonus_submission_interval`](https://www.consul.io/docs/agent/options.html#telemetry-circonus_submission_interval)
-* [`circonus_submission_url`](https://www.consul.io/docs/agent/options.html#telemetry-circonus_submission_url)
-* [`dogstatsd_addr`](https://www.consul.io/docs/agent/options.html#telemetry-dogstatsd_addr)
-* [`dogstatsd_tags`](https://www.consul.io/docs/agent/options.html#telemetry-dogstatsd_tags)
-* [`enable_hostname`](https://www.consul.io/docs/agent/options.html#telemetry-disable_hostname)
-* [`statsd_addr`](https://www.consul.io/docs/agent/options.html#telemetry-statsd_address)
-* [`statsite_addr`](https://www.consul.io/docs/agent/options.html#telemetry-statsite_address)
-* [`statsite_prefix`](https://www.consul.io/docs/agent/options.html#telemetry-statsite_prefix)
diff --git a/website/source/docs/providers/consul/d/keys.html.markdown b/website/source/docs/providers/consul/d/keys.html.markdown
deleted file mode 100644
index 42e2528dc..000000000
--- a/website/source/docs/providers/consul/d/keys.html.markdown
+++ /dev/null
@@ -1,68 +0,0 @@
----
-layout: "consul"
-page_title: "Consul: consul_keys"
-sidebar_current: "docs-consul-data-source-keys"
-description: |-
- Reads values from the Consul key/value store.
----
-
-# consul_keys
-
-The `consul_keys` resource reads values from the Consul key/value store.
-This is a powerful way dynamically set values in templates.
-
-## Example Usage
-
-```hcl
-data "consul_keys" "app" {
- datacenter = "nyc1"
- token = "abcd"
-
- # Read the launch AMI from Consul
- key {
- name = "ami"
- path = "service/app/launch_ami"
- default = "ami-1234"
- }
-}
-
-# Start our instance with the dynamic ami value
-resource "aws_instance" "app" {
- ami = "${data.consul_keys.app.var.ami}"
-
- # ...
-}
-```
-
-## Argument Reference
-
-The following arguments are supported:
-
-* `datacenter` - (Optional) The datacenter to use. This overrides the
- datacenter in the provider setup and the agent's default datacenter.
-
-* `token` - (Optional) The ACL token to use. This overrides the
- token that the agent provides by default.
-
-* `key` - (Required) Specifies a key in Consul to be read or written.
- Supported values documented below.
-
-The `key` block supports the following:
-
-* `name` - (Required) This is the name of the key. This value of the
- key is exposed as `var.`. This is not the path of the key
- in Consul.
-
-* `path` - (Required) This is the path in Consul that should be read
- or written to.
-
-* `default` - (Optional) This is the default value to set for `var.`
- if the key does not exist in Consul. Defaults to an empty string.
-
-## Attributes Reference
-
-The following attributes are exported:
-
-* `datacenter` - The datacenter the keys are being read from to.
-* `var.` - For each name given, the corresponding attribute
- has the value of the key.
diff --git a/website/source/docs/providers/consul/d/nodes.html.markdown b/website/source/docs/providers/consul/d/nodes.html.markdown
deleted file mode 100644
index 82babcbca..000000000
--- a/website/source/docs/providers/consul/d/nodes.html.markdown
+++ /dev/null
@@ -1,84 +0,0 @@
----
-layout: "consul"
-page_title: "Consul: consul_catalog_nodes"
-sidebar_current: "docs-consul-data-source-catalog-nodes"
-description: |-
- Provides a list of nodes in a given Consul datacenter.
----
-
-# consul_catalog_nodes
-
-The `consul_catalog_nodes` data source returns a list of Consul nodes that have
-been registered with the Consul cluster in a given datacenter. By specifying a
-different datacenter in the `query_options` it is possible to retrieve a list of
-nodes from a different WAN-attached Consul datacenter.
-
-## Example Usage
-
-```hcl
-data "consul_catalog_nodes" "read-dc1-nodes" {
- query_options {
- # Optional parameter: implicitly uses the current datacenter of the agent
- datacenter = "dc1"
- }
-}
-
-# Set the description to a whitespace delimited list of the node names
-resource "example_resource" "app" {
- description = "${join(" ", formatlist("%s", data.consul_catalog_nodes.node_names))}"
-
- # ...
-}
-```
-
-## Argument Reference
-
-The following arguments are supported:
-
-* `datacenter` - (Optional) The Consul datacenter to query. Defaults to the
- same value found in `query_options` parameter specified below, or if that is
- empty, the `datacenter` value found in the Consul agent that this provider is
- configured to talk to.
-
-* `query_options` - (Optional) See below.
-
-The `query_options` block supports the following:
-
-* `allow_stale` - (Optional) When `true`, the default, allow responses from
- Consul servers that are followers.
-
-* `require_consistent` - (Optional) When `true` force the client to perform a
- read on at least quorum servers and verify the result is the same. Defaults
- to `false`.
-
-* `token` - (Optional) Specify the Consul ACL token to use when performing the
- request. This defaults to the same API token configured by the `consul`
- provider but may be overriden if necessary.
-
-* `wait_index` - (Optional) Index number used to enable blocking quereis.
-
-* `wait_time` - (Optional) Max time the client should wait for a blocking query
- to return.
-
-## Attributes Reference
-
-The following attributes are exported:
-
-* `datacenter` - The datacenter the keys are being read from to.
-* `node_ids` - A list of the Consul node IDs.
-* `node_names` - A list of the Consul node names.
-* `nodes` - A list of nodes and details about each Consul agent. The list of
- per-node attributes is detailed below.
-
-The following is a list of the per-node attributes contained within the `nodes`
-map:
-
-* `id` - The Node ID of the Consul agent.
-* [`meta`](https://www.consul.io/docs/agent/http/catalog.html#Meta) - Node meta
- data tag information, if any.
-* [`name`](https://www.consul.io/docs/agent/http/catalog.html#Node) - The name
- of the Consul node.
-* [`address`](https://www.consul.io/docs/agent/http/catalog.html#Address) - The
- IP address the node is advertising to the Consul cluster.
-* [`tagged_addresses`](https://www.consul.io/docs/agent/http/catalog.html#TaggedAddresses) -
- List of explicit LAN and WAN IP addresses for the agent.
diff --git a/website/source/docs/providers/consul/d/service.html.markdown b/website/source/docs/providers/consul/d/service.html.markdown
deleted file mode 100644
index acd74829f..000000000
--- a/website/source/docs/providers/consul/d/service.html.markdown
+++ /dev/null
@@ -1,113 +0,0 @@
----
-layout: "consul"
-page_title: "Consul: consul_catalog_service"
-sidebar_current: "docs-consul-data-source-catalog-service"
-description: |-
- Provides details about a specific Consul service
----
-
-# consul_catalog_service
-
-`consul_catalog_service` provides details about a specific Consul service in a
-given datacenter. The results include a list of nodes advertising the specified
-service, the node's IP address, port number, node ID, etc. By specifying a
-different datacenter in the `query_options` it is possible to retrieve a list of
-services from a different WAN-attached Consul datacenter.
-
-This data source is different from the `consul_catalog_services` (plural) data
-source, which provides a summary of the current Consul services.
-
-## Example Usage
-
-```hcl
-data "consul_catalog_service" "read-consul-dc1" {
- query_options {
- # Optional parameter: implicitly uses the current datacenter of the agent
- datacenter = "dc1"
- }
-
- name = "consul"
-}
-
-# Set the description to a whitespace delimited list of the node names
-resource "example_resource" "app" {
- description = "${join(" ", data.consul_catalog_service.nodes)}"
-
- # ...
-}
-```
-
-## Argument Reference
-
-The following arguments are supported:
-
-* `datacenter` - (Optional) The Consul datacenter to query. Defaults to the
- same value found in `query_options` parameter specified below, or if that is
- empty, the `datacenter` value found in the Consul agent that this provider is
- configured to talk to.
-
-* `name` - (Required) The service name to select.
-
-* `query_options` - (Optional) See below.
-
-* `tag` - (Optional) A single tag that can be used to filter the list of nodes
- to return based on a single matching tag..
-
-The `query_options` block supports the following:
-
-* `allow_stale` - (Optional) When `true`, the default, allow responses from
- Consul servers that are followers.
-
-* `require_consistent` - (Optional) When `true` force the client to perform a
- read on at least quorum servers and verify the result is the same. Defaults
- to `false`.
-
-* `token` - (Optional) Specify the Consul ACL token to use when performing the
- request. This defaults to the same API token configured by the `consul`
- provider but may be overriden if necessary.
-
-* `wait_index` - (Optional) Index number used to enable blocking quereis.
-
-* `wait_time` - (Optional) Max time the client should wait for a blocking query
- to return.
-
-## Attributes Reference
-
-The following attributes are exported:
-
-* `datacenter` - The datacenter the keys are being read from to.
-* `name` - The name of the service
-* `tag` - The name of the tag used to filter the list of nodes in `service`.
-* `service` - A list of nodes and details about each endpoint advertising a
- service. Each element in the list is a map of attributes that correspond to
- each individual node. The list of per-node attributes is detailed below.
-
-The following is a list of the per-node `service` attributes:
-
-* [`create_index`](https://www.consul.io/docs/agent/http/catalog.html#CreateIndex) -
- The index entry at which point this entry was added to the catalog.
-* [`modify_index`](https://www.consul.io/docs/agent/http/catalog.html#ModifyIndex) -
- The index entry at which point this entry was modified in the catalog.
-* [`node_address`](https://www.consul.io/docs/agent/http/catalog.html#Address) -
- The address of the Consul node advertising the service.
-* `node_id` - The Node ID of the Consul agent advertising the service.
-* [`node_meta`](https://www.consul.io/docs/agent/http/catalog.html#Meta) - Node
- meta data tag information, if any.
-* [`node_name`](https://www.consul.io/docs/agent/http/catalog.html#Node) - The
- name of the Consul node.
-* [`address`](https://www.consul.io/docs/agent/http/catalog.html#ServiceAddress) -
- The IP address of the service. If the `ServiceAddress` in the Consul catalog
- is empty, this value is automatically populated with the `node_address` (the
- `Address` in the Consul Catalog).
-* [`enable_tag_override`](https://www.consul.io/docs/agent/http/catalog.html#ServiceEnableTagOverride) -
- Whether service tags can be overridden on this service.
-* [`id`](https://www.consul.io/docs/agent/http/catalog.html#ServiceID) - A
- unique service instance identifier.
-* [`name`](https://www.consul.io/docs/agent/http/catalog.html#ServiceName) - The
- name of the service.
-* [`port`](https://www.consul.io/docs/agent/http/catalog.html#ServicePort) -
- Port number of the service.
-* [`tagged_addresses`](https://www.consul.io/docs/agent/http/catalog.html#TaggedAddresses) -
- List of explicit LAN and WAN IP addresses for the agent.
-* [`tags`](https://www.consul.io/docs/agent/http/catalog.html#ServiceTags) -
- List of tags for the service.
diff --git a/website/source/docs/providers/consul/d/services.html.markdown b/website/source/docs/providers/consul/d/services.html.markdown
deleted file mode 100644
index dcfcee3d5..000000000
--- a/website/source/docs/providers/consul/d/services.html.markdown
+++ /dev/null
@@ -1,79 +0,0 @@
----
-layout: "consul"
-page_title: "Consul: consul_catalog_services"
-sidebar_current: "docs-consul-data-source-catalog-services"
-description: |-
- Provides a list of services in a given Consul datacenter.
----
-
-# consul_catalog_services
-
-The `consul_catalog_services` data source returns a list of Consul services that
-have been registered with the Consul cluster in a given datacenter. By
-specifying a different datacenter in the `query_options` it is possible to
-retrieve a list of services from a different WAN-attached Consul datacenter.
-
-This data source is different from the `consul_catalog_service` (singular) data
-source, which provides a detailed response about a specific Consul service.
-
-## Example Usage
-
-```hcl
-data "consul_catalog_services" "read-dc1" {
- query_options {
- # Optional parameter: implicitly uses the current datacenter of the agent
- datacenter = "dc1"
- }
-}
-
-# Set the description to a whitespace delimited list of the services
-resource "example_resource" "app" {
- description = "${join(" ", data.consul_catalog_services.names)}"
-
- # ...
-}
-```
-
-## Argument Reference
-
-The following arguments are supported:
-
-* `datacenter` - (Optional) The Consul datacenter to query. Defaults to the
- same value found in `query_options` parameter specified below, or if that is
- empty, the `datacenter` value found in the Consul agent that this provider is
- configured to talk to.
-
-* `query_options` - (Optional) See below.
-
-The `query_options` block supports the following:
-
-* `allow_stale` - (Optional) When `true`, the default, allow responses from
- Consul servers that are followers.
-
-* `require_consistent` - (Optional) When `true` force the client to perform a
- read on at least quorum servers and verify the result is the same. Defaults
- to `false`.
-
-* `token` - (Optional) Specify the Consul ACL token to use when performing the
- request. This defaults to the same API token configured by the `consul`
- provider but may be overriden if necessary.
-
-* `wait_index` - (Optional) Index number used to enable blocking quereis.
-
-* `wait_time` - (Optional) Max time the client should wait for a blocking query
- to return.
-
-## Attributes Reference
-
-The following attributes are exported:
-
-* `datacenter` - The datacenter the keys are being read from to.
-* `names` - A list of the Consul services found. This will always contain the
- list of services found.
-* `services.` - For each name given, the corresponding attribute is a
- Terraform map of services and their tags. The value is an alphanumerically
- sorted, whitespace delimited set of tags associated with the service.
-* `tags` - A map of the tags found for each service. If more than one service
- shares the same tag, unique service names will be joined by whitespace (this
- is the inverse of `services` and can be used to lookup the services that match
- a single tag).
diff --git a/website/source/docs/providers/consul/index.html.markdown b/website/source/docs/providers/consul/index.html.markdown
deleted file mode 100644
index a03a927c7..000000000
--- a/website/source/docs/providers/consul/index.html.markdown
+++ /dev/null
@@ -1,53 +0,0 @@
----
-layout: "consul"
-page_title: "Provider: Consul"
-sidebar_current: "docs-consul-index"
-description: |-
- Consul is a tool for service discovery, configuration and orchestration. The Consul provider exposes resources used to interact with a Consul cluster. Configuration of the provider is optional, as it provides defaults for all arguments.
----
-
-# Consul Provider
-
-[Consul](https://www.consul.io) is a tool for service discovery, configuration
-and orchestration. The Consul provider exposes resources used to interact with a
-Consul cluster. Configuration of the provider is optional, as it provides
-defaults for all arguments.
-
-Use the navigation to the left to read about the available resources.
-
-## Example Usage
-
-```hcl
-# Configure the Consul provider
-provider "consul" {
- address = "demo.consul.io:80"
- datacenter = "nyc1"
-}
-
-# Access a key in Consul
-resource "consul_keys" "app" {
- key {
- name = "ami"
- path = "service/app/launch_ami"
- default = "ami-1234"
- }
-}
-
-# Use our variable from Consul
-resource "aws_instance" "app" {
- ami = "${consul_keys.app.var.ami}"
-}
-```
-
-## Argument Reference
-
-The following arguments are supported:
-
-* `address` - (Optional) The HTTP(S) API address of the agent to use. Defaults to "127.0.0.1:8500".
-* `scheme` - (Optional) The URL scheme of the agent to use ("http" or "https"). Defaults to "http".
-* `http_auth` - (Optional) HTTP Basic Authentication credentials to be used when communicating with Consul, in the format of either `user` or `user:pass`. This may also be specified using the `CONSUL_HTTP_AUTH` environment variable.
-* `datacenter` - (Optional) The datacenter to use. Defaults to that of the agent.
-* `token` - (Optional) The ACL token to use by default when making requests to the agent.
-* `ca_file` - (Optional) A path to a PEM-encoded certificate authority used to verify the remote agent's certificate.
-* `cert_file` - (Optional) A path to a PEM-encoded certificate provided to the remote agent; requires use of `key_file`.
-* `key_file`- (Optional) A path to a PEM-encoded private key, required if `cert_file` is specified.
diff --git a/website/source/docs/providers/consul/r/agent_service.html.markdown b/website/source/docs/providers/consul/r/agent_service.html.markdown
deleted file mode 100644
index c3538f602..000000000
--- a/website/source/docs/providers/consul/r/agent_service.html.markdown
+++ /dev/null
@@ -1,48 +0,0 @@
----
-layout: "consul"
-page_title: "Consul: consul_agent_service"
-sidebar_current: "docs-consul-resource-agent-service"
-description: |-
- Provides access to Agent Service data in Consul. This can be used to define a service associated with a particular agent. Currently, defining health checks for an agent service is not supported.
----
-
-# consul_agent_service
-
-Provides access to the agent service data in Consul. This can be used to
-define a service associated with a particular agent. Currently, defining
-health checks for an agent service is not supported.
-
-## Example Usage
-
-```hcl
-resource "consul_agent_service" "app" {
- address = "www.google.com"
- name = "google"
- port = 80
- tags = ["tag0", "tag1"]
-}
-```
-
-## Argument Reference
-
-The following arguments are supported:
-
-* `address` - (Optional) The address of the service. Defaults to the
- address of the agent.
-
-* `name` - (Required) The name of the service.
-
-* `port` - (Optional) The port of the service.
-
-* `tags` - (Optional) A list of values that are opaque to Consul,
- but can be used to distinguish between services or nodes.
-
-## Attributes Reference
-
-The following attributes are exported:
-
-* `address` - The address of the service.
-* `id` - The ID of the service, defaults to the value of `name`.
-* `name` - The name of the service.
-* `port` - The port of the service.
-* `tags` - The tags of the service.
diff --git a/website/source/docs/providers/consul/r/catalog_entry.html.markdown b/website/source/docs/providers/consul/r/catalog_entry.html.markdown
deleted file mode 100644
index e63d21c8d..000000000
--- a/website/source/docs/providers/consul/r/catalog_entry.html.markdown
+++ /dev/null
@@ -1,64 +0,0 @@
----
-layout: "consul"
-page_title: "Consul: consul_catalog_entry"
-sidebar_current: "docs-consul-resource-catalog-entry"
-description: |-
- Registers a node or service with the Consul Catalog. Currently, defining health checks is not supported.
----
-
-# consul_catalog_entry
-
-Registers a node or service with the [Consul Catalog](https://www.consul.io/docs/agent/http/catalog.html#catalog_register).
-Currently, defining health checks is not supported.
-
-## Example Usage
-
-```hcl
-resource "consul_catalog_entry" "app" {
- address = "192.168.10.10"
- node = "foobar"
-
- service = {
- address = "127.0.0.1"
- id = "redis1"
- name = "redis"
- port = 8000
- tags = ["master", "v1"]
- }
-}
-```
-
-## Argument Reference
-
-The following arguments are supported:
-
-* `address` - (Required) The address of the node being added to,
- or referenced in the catalog.
-
-* `node` - (Required) The name of the node being added to, or
- referenced in the catalog.
-
-* `service` - (Optional) A service to optionally associated with
- the node. Supported values are documented below.
-
-* `datacenter` - (Optional) The datacenter to use. This overrides the
- datacenter in the provider setup and the agent's default datacenter.
-
-* `token` - (Optional) ACL token.
-
-The `service` block supports the following:
-
-* `address` - (Optional) The address of the service. Defaults to the
- node address.
-* `id` - (Optional) The ID of the service. Defaults to the `name`.
-* `name` - (Required) The name of the service
-* `port` - (Optional) The port of the service.
-* `tags` - (Optional) A list of values that are opaque to Consul,
- but can be used to distinguish between services or nodes.
-
-## Attributes Reference
-
-The following attributes are exported:
-
-* `address` - The address of the service.
-* `node` - The ID of the service, defaults to the value of `name`.
diff --git a/website/source/docs/providers/consul/r/key_prefix.html.markdown b/website/source/docs/providers/consul/r/key_prefix.html.markdown
deleted file mode 100644
index 86abfd29e..000000000
--- a/website/source/docs/providers/consul/r/key_prefix.html.markdown
+++ /dev/null
@@ -1,79 +0,0 @@
----
-layout: "consul"
-page_title: "Consul: consul_key_prefix"
-sidebar_current: "docs-consul-resource-key-prefix"
-description: |-
- Allows Terraform to manage a namespace of Consul keys that share a
- common name prefix.
----
-
-# consul_key_prefix
-
-Allows Terraform to manage a "namespace" of Consul keys that share a common
-name prefix.
-
-Like `consul_keys`, this resource can write values into the Consul key/value
-store, but *unlike* `consul_keys` this resource can detect and remove extra
-keys that have been added some other way, thus ensuring that rogue data
-added outside of Terraform will be removed on the next run.
-
-This resource is thus useful in the case where Terraform is exclusively
-managing a set of related keys.
-
-To avoid accidentally clobbering matching data that existed in Consul before
-a `consul_key_prefix` resource was created, creation of a key prefix instance
-will fail if any matching keys are already present in the key/value store.
-If any conflicting data is present, you must first delete it manually.
-
-~> **Warning** After this resource is instantiated, Terraform takes control
-over *all* keys with the given path prefix, and will remove any matching keys
-that are not present in the configuration. It will also delete *all* keys under
-the given prefix when a `consul_key_prefix` resource is destroyed, even if
-those keys were created outside of Terraform.
-
-## Example Usage
-
-```hcl
-resource "consul_key_prefix" "myapp_config" {
- datacenter = "nyc1"
- token = "abcd"
-
- # Prefix to add to prepend to all of the subkey names below.
- path_prefix = "myapp/config/"
-
- subkeys = {
- "elb_cname" = "${aws_elb.app.dns_name}"
- "s3_bucket_name" = "${aws_s3_bucket.app.bucket}"
- "database/hostname" = "${aws_db_instance.app.address}"
- "database/port" = "${aws_db_instance.app.port}"
- "database/username" = "${aws_db_instance.app.username}"
- "database/password" = "${aws_db_instance.app.password}"
- "database/name" = "${aws_db_instance.app.name}"
- }
-}
-```
-
-## Argument Reference
-
-The following arguments are supported:
-
-* `datacenter` - (Optional) The datacenter to use. This overrides the
- datacenter in the provider setup and the agent's default datacenter.
-
-* `token` - (Optional) The ACL token to use. This overrides the
- token that the agent provides by default.
-
-* `path_prefix` - (Required) Specifies the common prefix shared by all keys
- that will be managed by this resource instance. In most cases this will
- end with a slash, to manage a "folder" of keys.
-
-* `subkeys` - (Required) A mapping from subkey name (which will be appended
- to the given `path_prefix`) to the value that should be stored at that key.
- Use slashes, as shown in the above example, to create "sub-folders" under
- the given path prefix.
-
-## Attributes Reference
-
-The following attributes are exported:
-
-* `datacenter` - The datacenter the keys are being read/written to.
diff --git a/website/source/docs/providers/consul/r/keys.html.markdown b/website/source/docs/providers/consul/r/keys.html.markdown
deleted file mode 100644
index d942dbd4a..000000000
--- a/website/source/docs/providers/consul/r/keys.html.markdown
+++ /dev/null
@@ -1,73 +0,0 @@
----
-layout: "consul"
-page_title: "Consul: consul_keys"
-sidebar_current: "docs-consul-resource-keys"
-description: |-
- Writes values into the Consul key/value store.
----
-
-# consul_keys
-
-The `consul_keys` resource writes sets of individual values into Consul.
-This is a powerful way to expose infrastructure details to clients.
-
-This resource manages individual keys, and thus it can create, update
-and delete the keys explicitly given. However, it is not able to detect
-and remove additional keys that have been added by non-Terraform means.
-To manage *all* keys sharing a common prefix, and thus have Terraform
-remove errant keys not present in the configuration, consider using the
-`consul_key_prefix` resource instead.
-
-## Example Usage
-
-```hcl
-resource "consul_keys" "app" {
- datacenter = "nyc1"
- token = "abcd"
-
- # Set the CNAME of our load balancer as a key
- key {
- path = "service/app/elb_address"
- value = "${aws_elb.app.dns_name}"
- }
-}
-```
-
-## Argument Reference
-
-The following arguments are supported:
-
-* `datacenter` - (Optional) The datacenter to use. This overrides the
- datacenter in the provider setup and the agent's default datacenter.
-
-* `token` - (Optional) The ACL token to use. This overrides the
- token that the agent provides by default.
-
-* `key` - (Required) Specifies a key in Consul to be written.
- Supported values documented below.
-
-The `key` block supports the following:
-
-* `path` - (Required) This is the path in Consul that should be written to.
-
-* `value` - (Required) The value to write to the given path.
-
-* `delete` - (Optional) If true, then the key will be deleted when
- either its configuration block is removed from the configuration or
- the entire resource is destroyed. Otherwise, it will be left in Consul.
- Defaults to false.
-
-### Deprecated `key` arguments
-
-Prior to Terraform 0.7, this resource was used both to read *and* write the
-Consul key/value store. The read functionality has moved to the `consul_keys`
-*data source*, whose documentation can be found via the navigation.
-
-The pre-0.7 interface for reading keys is still supported for backward compatibility,
-but will be removed in a future version of Terraform.
-
-## Attributes Reference
-
-The following attributes are exported:
-
-* `datacenter` - The datacenter the keys are being written to.
diff --git a/website/source/docs/providers/consul/r/node.html.markdown b/website/source/docs/providers/consul/r/node.html.markdown
deleted file mode 100644
index 20d469d38..000000000
--- a/website/source/docs/providers/consul/r/node.html.markdown
+++ /dev/null
@@ -1,38 +0,0 @@
----
-layout: "consul"
-page_title: "Consul: consul_node"
-sidebar_current: "docs-consul-resource-node"
-description: |-
- Provides access to Node data in Consul. This can be used to define a node.
----
-
-# consul_node
-
-Provides access to Node data in Consul. This can be used to define a
-node. Currently, defining health checks is not supported.
-
-## Example Usage
-
-```hcl
-resource "consul_node" "foobar" {
- address = "192.168.10.10"
- name = "foobar"
-}
-```
-
-## Argument Reference
-
-The following arguments are supported:
-
-* `address` - (Required) The address of the node being added to,
- or referenced in the catalog.
-
-* `name` - (Required) The name of the node being added to, or
- referenced in the catalog.
-
-## Attributes Reference
-
-The following attributes are exported:
-
-* `address` - The address of the service.
-* `name` - The name of the service.
diff --git a/website/source/docs/providers/consul/r/prepared_query.markdown b/website/source/docs/providers/consul/r/prepared_query.markdown
deleted file mode 100644
index 1fcaa4833..000000000
--- a/website/source/docs/providers/consul/r/prepared_query.markdown
+++ /dev/null
@@ -1,135 +0,0 @@
----
-layout: "consul"
-page_title: "Consul: consul_prepared_query"
-sidebar_current: "docs-consul-resource-prepared-query"
-description: |-
- Allows Terraform to manage a Consul prepared query
----
-
-# consul_prepared_query
-
-Allows Terraform to manage a Consul prepared query.
-
-Managing prepared queries is done using Consul's REST API. This resource is
-useful to provide a consistent and declarative way of managing prepared
-queries in your Consul cluster using Terraform.
-
-## Example Usage
-
-```hcl
-# Creates a prepared query myquery.query.consul that finds the nearest
-# healthy myapp.service.consul instance that has the active tag and not
-# the standby tag.
-resource "consul_prepared_query" "myapp-query" {
- name = "myquery"
- datacenter = "us-central1"
- token = "abcd"
- stored_token = "wxyz"
- only_passing = true
- near = "_agent"
-
- service = "myapp"
- tags = ["active", "!standby"]
-
- failover {
- nearest_n = 3
- datacenters = ["us-west1", "us-east-2", "asia-east1"]
- }
-
- dns {
- ttl = "30s"
- }
-}
-
-# Creates a Prepared Query Template that matches *-near-self.query.consul
-# and finds the nearest service that matches the glob character (e.g.
-# foo-near-self.query.consul will find the nearest healthy foo.service.consul).
-resource "consul_prepared_query" "service-near-self" {
- datacenter = "nyc1"
- token = "abcd"
- stored_token = "wxyz"
- name = ""
- only_passing = true
- near = "_agent"
-
- template {
- type = "name_prefix_match"
- regexp = "^(.*)-near-self$"
- }
-
- service = "$${match(1)}"
-
- failover {
- nearest_n = 3
- datacenters = ["dc2", "dc3", "dc4"]
- }
-
- dns {
- ttl = "5m"
- }
-}
-```
-
-## Argument Reference
-
-The following arguments are supported:
-
-* `datacenter` - (Optional) The datacenter to use. This overrides the
- datacenter in the provider setup and the agent's default datacenter.
-
-* `token` - (Optional) The ACL token to use when saving the prepared query.
- This overrides the token that the agent provides by default.
-
-* `stored_token` - (Optional) The ACL token to store with the prepared
- query. This token will be used by default whenever the query is executed.
-
-* `name` - (Required) The name of the prepared query. Used to identify
- the prepared query during requests. Can be specified as an empty string
- to configure the query as a catch-all.
-
-* `service` - (Required) The name of the service to query.
-
-* `session` - (Optional) The name of the Consul session to tie this query's
- lifetime to. This is an advanced parameter that should not be used without a
- complete understanding of Consul sessions and the implications of their use
- (it is recommended to leave this blank in nearly all cases). If this
- parameter is omitted the query will not expire.
-
-* `tags` - (Optional) The list of required and/or disallowed tags. If a tag is
- in this list it must be present. If the tag is preceded with a "!" then it is
- disallowed.
-
-* `only_passing` - (Optional) When `true`, the prepared query will only
- return nodes with passing health checks in the result.
-
-* `near` - (Optional) Allows specifying the name of a node to sort results
- near using Consul's distance sorting and network coordinates. The magic
- `_agent` value can be used to always sort nearest the node servicing the
- request.
-
-* `failover` - (Optional) Options for controlling behavior when no healthy
- nodes are available in the local DC.
-
- * `nearest_n` - (Optional) Return results from this many datacenters,
- sorted in ascending order of estimated RTT.
-
- * `datacenters` - (Optional) Remote datacenters to return results from.
-
-* `dns` - (Optional) Settings for controlling the DNS response details.
-
- * `ttl` - (Optional) The TTL to send when returning DNS results.
-
-* `template` - (Optional) Query templating options. This is used to make a
- single prepared query respond to many different requests.
-
- * `type` - (Required) The type of template matching to perform. Currently
- only `name_prefix_match` is supported.
-
- * `regexp` - (Required) The regular expression to match with. When using
- `name_prefix_match`, this regex is applied against the query name.
-
-## Attributes Reference
-
-The following attributes are exported:
-
-* `id` - The ID of the prepared query, generated by Consul.
diff --git a/website/source/docs/providers/consul/r/service.html.markdown b/website/source/docs/providers/consul/r/service.html.markdown
deleted file mode 100644
index a61e3e2c3..000000000
--- a/website/source/docs/providers/consul/r/service.html.markdown
+++ /dev/null
@@ -1,51 +0,0 @@
----
-layout: "consul"
-page_title: "Consul: consul_service"
-sidebar_current: "docs-consul-resource-service"
-description: |-
- A high-level resource for creating a Service in Consul. Since Consul requires clients to register services with either the catalog or an agent, `consul_service` may register with either the catalog or an agent, depending on the configuration of `consul_service`. For now, `consul_service` always registers services with the agent running at the address defined in the `consul` resource. Health checks are not currently supported.
----
-
-# consul_service
-
-A high-level resource for creating a Service in Consul. Currently,
-defining health checks for a service is not supported.
-
-## Example Usage
-
-```hcl
-resource "consul_service" "google" {
- address = "www.google.com"
- name = "google"
- port = 80
- tags = ["tag0", "tag1"]
-}
-```
-
-## Argument Reference
-
-The following arguments are supported:
-
-* `service_id` - (Optional, string) The ID of the service, defaults to the value of `name`
- if not supplied.
-
-* `address` - (Optional, string) The address of the service. Defaults to the
- address of the agent.
-
-* `name` - (Required, string) The name of the service.
-
-* `port` - (Optional, int) The port of the service.
-
-* `tags` - (Optional, set of strings) A list of values that are opaque to Consul,
- but can be used to distinguish between services or nodes.
-
-
-## Attributes Reference
-
-The following attributes are exported:
-
-* `service_id` - The id of the service, defaults to the value of `name`.
-* `address` - The address of the service.
-* `name` - The name of the service.
-* `port` - The port of the service.
-* `tags` - The tags of the service.
diff --git a/website/source/docs/providers/datadog/index.html.markdown b/website/source/docs/providers/datadog/index.html.markdown
deleted file mode 100644
index 6dcd52648..000000000
--- a/website/source/docs/providers/datadog/index.html.markdown
+++ /dev/null
@@ -1,42 +0,0 @@
----
-layout: "datadog"
-page_title: "Provider: Datadog"
-sidebar_current: "docs-datadog-index"
-description: |-
- The Datadog provider is used to interact with the resources supported by Datadog. The provider needs to be configured with the proper credentials before it can be used.
----
-
-# Datadog Provider
-
-The [Datadog](https://www.datadoghq.com) provider is used to interact with the
-resources supported by Datadog. The provider needs to be configured
-with the proper credentials before it can be used.
-
-Use the navigation to the left to read about the available resources.
-
-## Example Usage
-
-```hcl
-# Configure the Datadog provider
-provider "datadog" {
- api_key = "${var.datadog_api_key}"
- app_key = "${var.datadog_app_key}"
-}
-
-# Create a new monitor
-resource "datadog_monitor" "default" {
- # ...
-}
-
-# Create a new timeboard
-resource "datadog_timeboard" "default" {
- # ...
-}
-```
-
-## Argument Reference
-
-The following arguments are supported:
-
-* `api_key` - (Required) Datadog API key. This can also be set via the `DATADOG_API_KEY` environment variable.
-* `app_key` - (Required) Datadog APP key. This can also be set via the `DATADOG_APP_KEY` environment variable.
diff --git a/website/source/docs/providers/datadog/r/downtime.html.markdown b/website/source/docs/providers/datadog/r/downtime.html.markdown
deleted file mode 100644
index 188b17620..000000000
--- a/website/source/docs/providers/datadog/r/downtime.html.markdown
+++ /dev/null
@@ -1,56 +0,0 @@
----
-layout: "datadog"
-page_title: "Datadog: datadog_downtime"
-sidebar_current: "docs-datadog-resource-downtime"
-description: |-
- Provides a Datadog downtime resource. This can be used to create and manage downtimes.
----
-
-# datadog_downtime
-
-Provides a Datadog downtime resource. This can be used to create and manage Datadog downtimes.
-
-## Example Usage
-
-```hcl
-# Create a new daily 1700-0900 Datadog downtime
-resource "datadog_downtime" "foo" {
- scope = ["*"]
- start = 1483308000
- end = 1483365600
-
- recurrence {
- type = "days"
- period = 1
- }
-}
-```
-
-## Argument Reference
-
-The following arguments are supported:
-
-* `scope` - (Required) A list of items to apply the downtime to, e.g. host:X
-* `start` - (Optional) POSIX timestamp to start the downtime.
-* `end` - (Optional) POSIX timestamp to end the downtime.
-* `recurrence` - (Optional) A dictionary to configure the downtime to be recurring.
- * `type` - days, weeks, months, or years
- * `period` - How often to repeat as an integer. For example to repeat every 3 days, select a type of days and a period of 3.
- * `week_days` - (Optional) A list of week days to repeat on. Choose from: Mon, Tue, Wed, Thu, Fri, Sat or Sun. Only applicable when type is weeks. First letter must be capitalized.
- * `until_occurrences` - (Optional) How many times the downtime will be rescheduled. `until_occurrences` and `until_date` are mutually exclusive.
- * `until_date` - (Optional) The date at which the recurrence should end as a POSIX timestamp. `until_occurrences` and `until_date` are mutually exclusive.
-* `message` - (Optional) A message to include with notifications for this downtime.
-
-## Attributes Reference
-
-The following attributes are exported:
-
-* `id` - ID of the Datadog downtime
-
-## Import
-
-Downtimes can be imported using their numeric ID, e.g.
-
-```
-$ terraform import datadog_downtime.bytes_received_localhost 2081
-```
diff --git a/website/source/docs/providers/datadog/r/monitor.html.markdown b/website/source/docs/providers/datadog/r/monitor.html.markdown
deleted file mode 100644
index ec1ab8b1d..000000000
--- a/website/source/docs/providers/datadog/r/monitor.html.markdown
+++ /dev/null
@@ -1,133 +0,0 @@
----
-layout: "datadog"
-page_title: "Datadog: datadog_monitor"
-sidebar_current: "docs-datadog-resource-monitor"
-description: |-
- Provides a Datadog monitor resource. This can be used to create and manage monitors.
----
-
-# datadog_monitor
-
-Provides a Datadog monitor resource. This can be used to create and manage Datadog monitors.
-
-## Example Usage
-
-```hcl
-# Create a new Datadog monitor
-resource "datadog_monitor" "foo" {
- name = "Name for monitor foo"
- type = "metric alert"
- message = "Monitor triggered. Notify: @hipchat-channel"
- escalation_message = "Escalation message @pagerduty"
-
- query = "avg(last_1h):avg:aws.ec2.cpu{environment:foo,host:foo} by {host} > 2"
-
- thresholds {
- ok = 0
- warning = 1
- critical = 2
- }
-
- notify_no_data = false
- renotify_interval = 60
-
- notify_audit = false
- timeout_h = 60
- include_tags = true
-
- silenced {
- "*" = 0
- }
-
- tags = ["foo:bar", "baz"]
-}
-```
-
-## Argument Reference
-
-The following arguments are supported:
-
-* `type` - (Required) The type of the monitor, chosen from:
- * `metric alert`
- * `service check`
- * `event alert`
- * `query alert`
-* `name` - (Required) Name of Datadog monitor
-* `query` - (Required) The monitor query to notify on with syntax varying depending on what type of monitor
- you are creating. See [API Reference](http://docs.datadoghq.com/api) for options.
-* `message` - (Required) A message to include with notifications for this monitor.
- Email notifications can be sent to specific users by using the same '@username' notation as events.
-* `escalation_message` - (Optional) A message to include with a re-notification. Supports the '@username'
- notification allowed elsewhere.
-* `thresholds` - (Optional)
- * Metric alerts:
- A dictionary of thresholds by threshold type. Currently we have two threshold types for metric alerts: critical and warning. Critical is defined in the query, but can also be specified in this option. Warning threshold can only be specified using the thresholds option.
- Example usage:
- ```
- thresholds {
- critical = 90
- warning = 80
- }
- ```
- * Service checks:
- A dictionary of thresholds by status. Because service checks can have multiple thresholds, we don't define them directly in the query.
- Default values:
- ```
- thresholds {
- ok = 1
- critical = 1
- warning = 1
- }
- ```
-
-* `notify_no_data` (Optional) A boolean indicating whether this monitor will notify when data stops reporting. Defaults
- to true.
-* `new_host_delay` (Optional) Time (in seconds) to allow a host to boot and
- applications to fully start before starting the evaluation of monitor
- results. Should be a non negative integer. Defaults to 300.
-* `evaluation_delay` (Optional) Time (in seconds) to delay evaluation, as a non-negative integer.
- For example, if the value is set to 300 (5min), the timeframe is set to last_5m and the time is 7:00,
- the monitor will evaluate data from 6:50 to 6:55. This is useful for AWS CloudWatch and other backfilled
- metrics to ensure the monitor will always have data during evaluation.
-* `no_data_timeframe` (Optional) The number of minutes before a monitor will notify when data stops reporting. Must be at
- least 2x the monitor timeframe for metric alerts or 2 minutes for service checks. Default: 2x timeframe for
- metric alerts, 2 minutes for service checks.
-* `renotify_interval` (Optional) The number of minutes after the last notification before a monitor will re-notify
- on the current status. It will only re-notify if it's not resolved.
-* `notify_audit` (Optional) A boolean indicating whether tagged users will be notified on changes to this monitor.
- Defaults to false.
-* `timeout_h` (Optional) The number of hours of the monitor not reporting data before it will automatically resolve
- from a triggered state. Defaults to false.
-* `include_tags` (Optional) A boolean indicating whether notifications from this monitor will automatically insert its
- triggering tags into the title. Defaults to true.
-* `require_full_window` (Optional) A boolean indicating whether this monitor needs a full window of data before it's evaluated.
- We highly recommend you set this to False for sparse metrics, otherwise some evaluations will be skipped.
- Default: True for "on average", "at all times" and "in total" aggregation. False otherwise.
-* `locked` (Optional) A boolean indicating whether changes to to this monitor should be restricted to the creator or admins. Defaults to False.
-* `tags` (Optional) A list of tags to associate with your monitor. This can help you categorize and filter monitors in the manage monitors page of the UI. Note: it's not currently possible to filter by these tags when querying via the API
-* `silenced` (Optional) Each scope will be muted until the given POSIX timestamp or forever if the value is 0.
- To mute the alert completely:
-
- silenced {
- '*' = 0
- }
-
- To mute role:db for a short time:
-
- silenced {
- 'role:db' = 1412798116
- }
-
-## Attributes Reference
-
-The following attributes are exported:
-
-* `id` - ID of the Datadog monitor
-
-## Import
-
-Monitors can be imported using their numeric ID, e.g.
-
-```
-$ terraform import datadog_monitor.bytes_received_localhost 2081
-```
diff --git a/website/source/docs/providers/datadog/r/timeboard.html.markdown b/website/source/docs/providers/datadog/r/timeboard.html.markdown
deleted file mode 100644
index 7423f79f2..000000000
--- a/website/source/docs/providers/datadog/r/timeboard.html.markdown
+++ /dev/null
@@ -1,141 +0,0 @@
----
-layout: "datadog"
-page_title: "Datadog: datadog_timeboard"
-sidebar_current: "docs-datadog-resource-timeboard"
-description: |-
- Provides a Datadog timeboard resource. This can be used to create and manage timeboards.
----
-
-# datadog_timeboard
-
-Provides a Datadog timeboard resource. This can be used to create and manage Datadog timeboards.
-
-## Example Usage
-
-```hcl
-# Create a new Datadog timeboard
-resource "datadog_timeboard" "redis" {
- title = "Redis Timeboard (created via Terraform)"
- description = "created using the Datadog provider in Terraform"
- read_only = true
-
- graph {
- title = "Redis latency (ms)"
- viz = "timeseries"
-
- request {
- q = "avg:redis.info.latency_ms{$host}"
- type = "bars"
- }
- }
-
- graph {
- title = "Redis memory usage"
- viz = "timeseries"
-
- request {
- q = "avg:redis.mem.used{$host} - avg:redis.mem.lua{$host}, avg:redis.mem.lua{$host}"
- stacked = true
- }
-
- request {
- q = "avg:redis.mem.rss{$host}"
-
- style {
- palette = "warm"
- }
- }
- }
-
- graph {
- title = "Top System CPU by Docker container"
- viz = "toplist"
-
- request {
- q = "top(avg:docker.cpu.system{*} by {container_name}, 10, 'mean', 'desc')"
- }
- }
-
- template_variable {
- name = "host"
- prefix = "host"
- }
-}
-```
-
-## Argument Reference
-
-The following arguments are supported:
-
-* `title` - (Required) The name of the dashboard.
-* `description` - (Required) A description of the dashboard's content.
-* `read_only` - (Optional) The read-only status of the timeboard. Default is false.
-* `graph` - (Required) Nested block describing a graph definition. The structure of this block is described below. Multiple graph blocks are allowed within a datadog_timeboard resource.
-* `template_variable` - (Optional) Nested block describing a template variable. The structure of this block is described below. Multiple template_variable blocks are allowed within a datadog_timeboard resource.
-
-### Nested `graph` blocks
-
-Nested `graph` blocks have the following structure:
-
-* `title` - (Required) The name of the graph.
-* `viz` - (Required) The type of visualization to use for the graph. Valid choices are "change", "distribution", "heatmap", "hostmap", "query_value", timeseries", and "toplist".
-* `request` - Nested block describing a graph definition request (a metric query to plot on the graph). The structure of this block is described below. Multiple request blocks are allowed within a graph block.
-* `events` - (Optional) A list of event filter strings. Note that, while supported by the Datadog API, the Datadog UI does not (currently) support multiple event filters very well, so use at your own risk.
-* `autoscale` - (Optional) Boolean that determines whether to autoscale graphs.
-* `precision` - (Optional) Number of digits displayed, use `*` for full precision.
-* `custom_unit` - (Optional) Display a custom unit on the graph (such as 'hertz')
-* `text_align` - (Optional) How to align text in the graph, can be one of 'left', 'center', or 'right'.
-* `style` - (Optional) Nested block describing hostmaps. The structure of this block is described below.
-* `group` - (Optional) List of groups for hostmaps (shown as 'group by' in the UI).
-* `include_no_metric_hosts` - (Optional) If set to true, will display hosts on hostmap that have no reported metrics.
-* `include_ungrouped_hosts` - (Optional) If set to true, will display hosts without groups on hostmaps.
-* `scope` - (Optional) List of scopes for hostmaps (shown as 'filter by' in the UI).
-* `yaxis` - (Optional) Nested block describing modifications to the yaxis rendering. The structure of this block is described below.
-* `marker` - (Optional) Nested block describing lines / ranges added to graph for formatting. The structure of this block is described below. Multiple marker blocks are allowed within a graph block.
-
-#### Nested `graph` `marker` blocks
-
-Nested `graph` `marker` blocks have the following structure:
-
-* `type` - (Required) How the marker lines will look. Possible values are {"error", "warning", "info", "ok"} {"dashed", "solid", "bold"}. Example: "error dashed".
-* `value` - (Required) Mathematical expression describing the marker. Examples: "y > 1", "-5 < y < 0", "y = 19".
-* `label` - (Optional) A label for the line or range.
-
-{error, warning, info, ok} {dashed, solid, bold}
-
-#### Nested `graph` `yaxis` block
-* `min` - (Optional) Minimum bound for the graph's yaxis, a string.
-* `max` - (Optional) Maximum bound for the graph's yaxis, a string.
-* `scale` - (Optional) How to scale the yaxis. Possible values are: "linear", "log", "sqrt", "pow##" (eg. pow2, pow0.5, 2 is used if only "pow" was provided). Default: "linear".
-
-#### Nested `graph` `request` blocks
-
-Nested `graph` `request` blocks have the following structure:
-
-* `q` - (Required) The query of the request. Pro tip: Use the JSON tab inside the Datadog UI to help build you query strings.
-* `aggregator` - (Optional) The aggregation method used when the number of data points outnumbers the max that can be shown.
-* `stacked` - (Optional) Boolean value to determine if this is this a stacked area graph. Default: false (line chart).
-* `type` - (Optional) Choose how to draw the graph. For example: "line", "bar" or "area". Default: "line".
-* `style` - (Optional) Nested block to customize the graph style.
-
-### Nested `graph` `style` block
-The nested `style` block is used specifically for styling `hostmap` graphs, and has the following structure:
-
-* `palette` - (Optional) Spectrum of colors to use when styling a hostmap. For example: "green_to_orange", "yellow_to_green", "YlOrRd", or "hostmap_blues". Default: "green_to_orange".
-* `palette_flip` - (Optional) Flip how the hostmap is rendered. For example, with the default palette, low values are represented as green, with high values as orange. If palette_flip is "true", then low values will be orange, and high values will be green.
-
-### Nested `graph` `request` `style` block
-
-The nested `style` blocks has the following structure:
-
-* `palette` - (Optional) Color of the line drawn. For example: "classic", "cool", "warm", "purple", "orange" or "gray". Default: "classic".
-* `width` - (Optional) Line width. Possible values: "thin", "normal", "thick". Default: "normal".
-* `type` - (Optional) Type of line drawn. Possible values: "dashed", "solid", "dotted". Default: "solid".
-
-### Nested `template_variable` blocks
-
-Nested `template_variable` blocks have the following structure:
-
-* `name` - (Required) The variable name. Can be referenced as $name in `graph` `request` `q` query strings.
-* `prefix` - (Optional) The tag group. Default: no tag group.
-* `default` - (Required) The default tag. Default: "*" (match all).
diff --git a/website/source/docs/providers/datadog/r/user.html.markdown b/website/source/docs/providers/datadog/r/user.html.markdown
deleted file mode 100644
index d119ad8a5..000000000
--- a/website/source/docs/providers/datadog/r/user.html.markdown
+++ /dev/null
@@ -1,49 +0,0 @@
----
-layout: "datadog"
-page_title: "Datadog: datadog_user"
-sidebar_current: "docs-datadog-resource-user"
-description: |-
- Provides a Datadog user resource. This can be used to create and manage users.
----
-
-# datadog_user
-
-Provides a Datadog user resource. This can be used to create and manage Datadog users.
-
-## Example Usage
-
-```hcl
-# Create a new Datadog user
-resource "datadog_user" "foo" {
- email = "new@example.com"
- handle = "new@example.com"
- name = "New User"
-}
-```
-
-## Argument Reference
-
-The following arguments are supported:
-
-* `disabled` - (Optional) Whether the user is disabled
-* `email` - (Required) Email address for user
-* `handle` - (Required) The user handle, must be a valid email.
-* `is_admin` - (Optional) Whether the user is an administrator
-* `name` - (Required) Name for user
-* `role` - (Optional) Role description for user (NOTE: can only be applied on user creation)
-
-## Attributes Reference
-
-The following attributes are exported:
-
-* `disabled` - Returns true if Datadog user is disabled (NOTE: Datadog does not actually delete users so this will be true for those as well)
-* `id` - ID of the Datadog user
-* `verified` - Returns true if Datadog user is verified
-
-## Import
-
-users can be imported using their handle, e.g.
-
-```
-$ terraform import datadog_user.example_user existing@example.com
-```
diff --git a/website/source/docs/providers/dme/index.html.markdown b/website/source/docs/providers/dme/index.html.markdown
deleted file mode 100644
index 5dfbba4dd..000000000
--- a/website/source/docs/providers/dme/index.html.markdown
+++ /dev/null
@@ -1,45 +0,0 @@
----
-layout: "dme"
-page_title: "Provider: DNSMadeEasy"
-sidebar_current: "docs-dme-index"
-description: |-
- The DNSMadeEasy provider is used to interact with the resources supported by DNSMadeEasy. The provider needs to be configured with the proper credentials before it can be used.
----
-
-# DNSMadeEasy Provider
-
-The DNSMadeEasy provider is used to interact with the
-resources supported by DNSMadeEasy. The provider needs to be configured
-with the proper credentials before it can be used.
-
-Use the navigation to the left to read about the available resources.
-
-## Example Usage
-
-```hcl
-# Configure the DNSMadeEasy provider
-provider "dme" {
- akey = "${var.dme_akey}"
- skey = "${var.dme_skey}"
- usesandbox = true
-}
-
-# Create an A record
-resource "dme_record" "www" {
- domainid = "123456"
-
- # ...
-}
-```
-
-## Argument Reference
-
-The following arguments are supported:
-
-* `akey` - (Required) The DNSMadeEasy API key. This can also be specified with
- the `DME_AKEY` shell environment variable.
-* `skey` - (Required) The DNSMadeEasy Secret key. This can also be specified
- with the `DME_SKEY` shell environment variable.
-* `usesandbox` - (Optional) If true, the DNSMadeEasy sandbox will be
- used. This can also be specified with the `DME_USESANDBOX` shell environment
- variable.
diff --git a/website/source/docs/providers/dme/r/record.html.markdown b/website/source/docs/providers/dme/r/record.html.markdown
deleted file mode 100644
index ed6309162..000000000
--- a/website/source/docs/providers/dme/r/record.html.markdown
+++ /dev/null
@@ -1,249 +0,0 @@
----
-layout: "dme"
-page_title: "DNSMadeEasy: dme_record"
-sidebar_current: "docs-dme-resource-record"
-description: |-
- Provides a DNSMadeEasy record resource.
----
-
-# dme_record
-
-Provides a DNSMadeEasy record resource.
-
-## Example Usage
-
-```hcl
-# Add an A record to the domain
-resource "dme_record" "www" {
- domainid = "123456"
- name = "www"
- type = "A"
- value = "192.168.1.1"
- ttl = 3600
- gtdLocation = "DEFAULT"
-}
-```
-
-## Argument Reference
-
-The following arguments are supported:
-
-* `domainid` - (String, Required) The domain id to add the
- record to
-* `name` - (Required) The name of the record `type` - (Required) The type of
-* the record `value` - (Required) The value of the record; its usage
- will depend on the `type` (see below)
-* `ttl` - (Integer, Optional) The TTL of the record `gtdLocation` - (String,
- Optional) The GTD Location of the record on Global Traffic Director enabled
- domains; Unless GTD is enabled this should either be omitted or set to
- "DEFAULT"
-
-Additional arguments are listed below under DNS Record Types.
-
-## DNS Record Types
-
-The type of record being created affects the interpretation of
-the `value` argument; also, some additional arguments are
-required for some record types.
-http://help.dnsmadeeasy.com/tutorials/managed-dns/ has more
-information.
-
-#### A Record
-
-* `value` is the hostname
-
-#### CNAME Record
-
-* `value` is the alias name
-
-#### ANAME Record
-
-* `value` is the aname target
-
-#### MX Record
-
-* `value` is the server
-* `mxLevel` (Integer, Required) is the MX level
-
-#### HTTPRED Record
-
-* `value` is the URL
-* `hardLink` (Boolean, Optional) If true, any request that is
- made for this record will have the path removed after the
- fully qualified domain name portion of the requested URL
-* `redirectType` (Required) One of 'Hidden Frame Masked',
- 'Standard 301', or 'Standard 302'
-* `title` (Optional) If set, the hidden iframe that is
- used in conjunction with the Hidden Frame Masked Redirect
- Type will have the HTML meta description data field set to
- the value of this field
-* `keywords` (Optional) If set, the hidden iframe that is used
- in conjunction with the Hidden Frame Masked Redirect Type
- will have the HTML meta keywords data field set to the value
- of this field
-* `description` (Optional) A human-readable description.
-
-#### TXT Record
-
-* `value` is free form text
-
-#### SPF Record
-
-* `value` is the SPF definition of hosts allowed to send email
-
-#### PTR Record
-
-* `value` is the reverse DNS for the host
-
-#### NS Record
-
-* `value` is the host name of the server
-
-#### AAAA Record
-
-* `value` is the IPv6 address
-
-#### SRV Record
-
-* `value` is the host
-* `priority` (Integer, Required). Acts the same way as MX Level
-* `weight` (Integer, Required). Hits will be assigned proportionately
- by weight
-* `port` (Integer, Required). The actual port of the service offered
-
-## Attributes Reference
-
-The following attributes are exported:
-
-* `name` - The name of the record
-* `type` - The type of the record
-* `value` - The value of the record
- `type` (see below)
-* `ttl` - The TTL of the record
-* `gtdLocation` - The GTD Location of the record on GTD enabled domains
-
-Additional fields may also be exported by some record types -
-see DNS Record Types.
-
-#### Record Type Examples
-
-Following are examples of using each of the record types.
-
-```hcl
-# Provide your API and Secret Keys, and whether the sandbox
-# is being used (defaults to false)
-provider "dme" {
- akey = "aaaaaa1a-11a1-1aa1-a101-11a1a11aa1aa"
- skey = "11a0a11a-a1a1-111a-a11a-a11110a11111"
- usesandbox = true
-}
-
-# A Record
-resource "dme_record" "testa" {
- domainid = "123456"
- name = "testa"
- type = "A"
- value = "1.1.1.1"
- ttl = 1000
- gtdLocation = "DEFAULT"
-}
-
-# CNAME record
-resource "dme_record" "testcname" {
- domainid = "123456"
- name = "testcname"
- type = "CNAME"
- value = "foo"
- ttl = 1000
-}
-
-# ANAME record
-resource "dme_record" "testaname" {
- domainid = "123456"
- name = "testaname"
- type = "ANAME"
- value = "foo"
- ttl = 1000
-}
-
-# MX record
-resource "dme_record" "testmx" {
- domainid = "123456"
- name = "testmx"
- type = "MX"
- value = "foo"
- mxLevel = 10
- ttl = 1000
-}
-
-# HTTPRED
-resource "dme_record" "testhttpred" {
- domainid = "123456"
- name = "testhttpred"
- type = "HTTPRED"
- value = "https://github.com/soniah/terraform-provider-dme"
- hardLink = true
- redirectType = "Hidden Frame Masked"
- title = "An Example"
- keywords = "terraform example"
- description = "This is a description"
- ttl = 2000
-}
-
-# TXT record
-resource "dme_record" "testtxt" {
- domainid = "123456"
- name = "testtxt"
- type = "TXT"
- value = "foo"
- ttl = 1000
-}
-
-# SPF record
-resource "dme_record" "testspf" {
- domainid = "123456"
- name = "testspf"
- type = "SPF"
- value = "foo"
- ttl = 1000
-}
-
-# PTR record
-resource "dme_record" "testptr" {
- domainid = "123456"
- name = "testptr"
- type = "PTR"
- value = "foo"
- ttl = 1000
-}
-
-# NS record
-resource "dme_record" "testns" {
- domainid = "123456"
- name = "testns"
- type = "NS"
- value = "foo"
- ttl = 1000
-}
-
-# AAAA record
-resource "dme_record" "testaaaa" {
- domainid = "123456"
- name = "testaaaa"
- type = "AAAA"
- value = "FE80::0202:B3FF:FE1E:8329"
- ttl = 1000
-}
-
-# SRV record
-resource "dme_record" "testsrv" {
- domainid = "123456"
- name = "testsrv"
- type = "SRV"
- value = "foo"
- priority = 10
- weight = 20
- port = 30
- ttl = 1000
-}
-```
diff --git a/website/source/docs/providers/dns/d/dns_a_record_set.html.markdown b/website/source/docs/providers/dns/d/dns_a_record_set.html.markdown
deleted file mode 100644
index 1ec48117d..000000000
--- a/website/source/docs/providers/dns/d/dns_a_record_set.html.markdown
+++ /dev/null
@@ -1,37 +0,0 @@
----
-layout: "dns"
-page_title: "DNS: dns_a_record_set"
-sidebar_current: "docs-dns-datasource-a-record-set"
-description: |-
- Get DNS A record set.
----
-
-# dns_a_record_set
-
-Use this data source to get DNS A records of the host.
-
-## Example Usage
-
-```hcl
-data "dns_a_record_set" "google" {
- host = "google.com"
-}
-
-output "google_addrs" {
- value = "${join(",", data.dns_a_record_set.google.addrs)}"
-}
-```
-
-## Argument Reference
-
-The following arguments are supported:
-
- * `host` - (required): Host to look up
-
-## Attributes Reference
-
-The following attributes are exported:
-
- * `id` - Set to `host`.
-
- * `addrs` - A list of IP addresses. IP addresses are always sorted to avoid constant changing plans.
diff --git a/website/source/docs/providers/dns/d/dns_cname_record_set.html.markdown b/website/source/docs/providers/dns/d/dns_cname_record_set.html.markdown
deleted file mode 100644
index 6d7ad48e2..000000000
--- a/website/source/docs/providers/dns/d/dns_cname_record_set.html.markdown
+++ /dev/null
@@ -1,37 +0,0 @@
----
-layout: "dns"
-page_title: "DNS: dns_cname_record_set"
-sidebar_current: "docs-dns-datasource-cname-record-set"
-description: |-
- Get DNS CNAME record set.
----
-
-# dns_cname_record_set
-
-Use this data source to get DNS CNAME record set of the host.
-
-## Example Usage
-
-```hcl
-data "dns_cname_record_set" "hashicorp" {
- host = "www.hashicorp.com"
-}
-
-output "hashi_cname" {
- value = "${data.dns_cname_record_set.hashi.cname}"
-}
-```
-
-## Argument Reference
-
-The following arguments are supported:
-
- * `host` - (required): Host to look up
-
-## Attributes Reference
-
-The following attributes are exported:
-
- * `id` - Set to `host`.
-
- * `cname` - A CNAME record associated with host.
diff --git a/website/source/docs/providers/dns/d/dns_txt_record_set.html.markdown b/website/source/docs/providers/dns/d/dns_txt_record_set.html.markdown
deleted file mode 100644
index bb18dc915..000000000
--- a/website/source/docs/providers/dns/d/dns_txt_record_set.html.markdown
+++ /dev/null
@@ -1,43 +0,0 @@
----
-layout: "dns"
-page_title: "DNS: dns_txt_record_set"
-sidebar_current: "docs-dns-datasource-txt-record-set"
-description: |-
- Get DNS TXT record set.
----
-
-# dns_txt_record_set
-
-Use this data source to get DNS TXT record set of the host.
-
-## Example Usage
-
-```hcl
-data "dns_txt_record_set" "hashicorp" {
- host = "www.hashicorp.com"
-}
-
-output "hashi_txt" {
- value = "${data.dns_txt_record_set.hashi.record}"
-}
-
-output "hashi_txts" {
- value = "${join(",", data.dns_txt_record_set.hashi.records})"
-}
-```
-
-## Argument Reference
-
-The following arguments are supported:
-
- * `host` - (required): Host to look up
-
-## Attributes Reference
-
-The following attributes are exported:
-
- * `id` - Set to `host`.
-
- * `record` - The first TXT record.
-
- * `records` - A list of TXT records.
diff --git a/website/source/docs/providers/dns/index.html.markdown b/website/source/docs/providers/dns/index.html.markdown
deleted file mode 100644
index d24a65adf..000000000
--- a/website/source/docs/providers/dns/index.html.markdown
+++ /dev/null
@@ -1,45 +0,0 @@
----
-layout: "dns"
-page_title: "Provider: DNS"
-sidebar_current: "docs-dns-index"
-description: |-
- The DNS provider supports DNS updates (RFC 2136). Additionally, the provider can be configured with secret key based transaction authentication (RFC 2845).
----
-
-# DNS Provider
-
-The DNS provider supports DNS updates (RFC 2136). Additionally, the provider can be configured with secret key based transaction authentication (RFC 2845).
-
-Use the navigation to the left to read about the available resources.
-
-## Example Usage
-
-```hcl
-# Configure the DNS Provider
-provider "dns" {
- update {
- server = "192.168.0.1"
- key_name = "example.com."
- key_algorithm = "hmac-md5"
- key_secret = "3VwZXJzZWNyZXQ="
- }
-}
-
-# Create a DNS A record set
-resource "dns_a_record_set" "www" {
- # ...
-}
-```
-
-## Configuration Reference
-
-`update` - (Optional) When the provider is used for DNS updates, this block is required. Structure is documented below.
-
-The `update` block supports the following attributes:
-
-* `server` - (Required) The IPv4 address of the DNS server to send updates to.
-* `port` - (Optional) The target UDP port on the server where updates are sent to. Defaults to `53`.
-* `key_name` - (Optional) The name of the TSIG key used to sign the DNS update messages.
-* `key_algorithm` - (Optional; Required if `key_name` is set) When using TSIG authentication, the algorithm to use for HMAC. Valid values are `hmac-md5`, `hmac-sha1`, `hmac-sha256` or `hmac-sha512`.
-* `key_secret` - (Optional; Required if `key_name` is set)
- A Base64-encoded string containing the shared secret to be used for TSIG.
diff --git a/website/source/docs/providers/dns/r/dns_a_record_set.html.markdown b/website/source/docs/providers/dns/r/dns_a_record_set.html.markdown
deleted file mode 100644
index 72b726fe4..000000000
--- a/website/source/docs/providers/dns/r/dns_a_record_set.html.markdown
+++ /dev/null
@@ -1,44 +0,0 @@
----
-layout: "dns"
-page_title: "DNS: dns_a_record_set"
-sidebar_current: "docs-dns-a-record-set"
-description: |-
- Creates a A type DNS record set.
----
-
-# dns_a_record_set
-
-Creates a A type DNS record set.
-
-## Example Usage
-
-```hcl
-resource "dns_a_record_set" "www" {
- zone = "example.com."
- name = "www"
- addresses = [
- "192.168.0.1",
- "192.168.0.2",
- "192.168.0.3",
- ]
- ttl = 300
-}
-```
-
-## Argument Reference
-
-The following arguments are supported:
-
-* `zone` - (Required) DNS zone the record set belongs to. It must be an FQDN, that is, include the trailing dot.
-* `name` - (Required) The name of the record set. The `zone` argument will be appended to this value to create the full record path.
-* `addresses` - (Required) The IPv4 addresses this record set will point to.
-* `ttl` - (Optional) The TTL of the record set. Defaults to `3600`.
-
-## Attributes Reference
-
-The following attributes are exported:
-
-* `zone` - See Argument Reference above.
-* `name` - See Argument Reference above.
-* `addresses` - See Argument Reference above.
-* `ttl` - See Argument Reference above.
diff --git a/website/source/docs/providers/dns/r/dns_aaaa_record_set.html.markdown b/website/source/docs/providers/dns/r/dns_aaaa_record_set.html.markdown
deleted file mode 100644
index 7cbf89599..000000000
--- a/website/source/docs/providers/dns/r/dns_aaaa_record_set.html.markdown
+++ /dev/null
@@ -1,43 +0,0 @@
----
-layout: "dns"
-page_title: "DNS: dns_aaaa_record_set"
-sidebar_current: "docs-dns-aaaa-record-set"
-description: |-
- Creates a AAAA type DNS record set.
----
-
-# dns_aaaa_record_set
-
-Creates a AAAA type DNS record set.
-
-## Example Usage
-
-```hcl
-resource "dns_aaaa_record_set" "www" {
- zone = "example.com."
- name = "www"
- addresses = [
- "fdd5:e282:43b8:5303:dead:beef:cafe:babe",
- "fdd5:e282:43b8:5303:cafe:babe:dead:beef",
- ]
- ttl = 300
-}
-```
-
-## Argument Reference
-
-The following arguments are supported:
-
-* `zone` - (Required) DNS zone the record set belongs to. It must be an FQDN, that is, include the trailing dot.
-* `name` - (Required) The name of the record set. The `zone` argument will be appended to this value to create the full record path.
-* `addresses` - (Required) The IPv6 addresses this record set will point to.
-* `ttl` - (Optional) The TTL of the record set. Defaults to `3600`.
-
-## Attributes Reference
-
-The following attributes are exported:
-
-* `zone` - See Argument Reference above.
-* `name` - See Argument Reference above.
-* `addresses` - See Argument Reference above.
-* `ttl` - See Argument Reference above.
diff --git a/website/source/docs/providers/dns/r/dns_cname_record.html.markdown b/website/source/docs/providers/dns/r/dns_cname_record.html.markdown
deleted file mode 100644
index 914716850..000000000
--- a/website/source/docs/providers/dns/r/dns_cname_record.html.markdown
+++ /dev/null
@@ -1,40 +0,0 @@
----
-layout: "dns"
-page_title: "DNS: dns_cname_record"
-sidebar_current: "docs-dns-cname-record"
-description: |-
- Creates a CNAME type DNS record.
----
-
-# dns_cname_record
-
-Creates a CNAME type DNS record.
-
-## Example Usage
-
-```hcl
-resource "dns_cname_record" "foo" {
- zone = "example.com."
- name = "foo"
- cname = "bar.example.com."
- ttl = 300
-}
-```
-
-## Argument Reference
-
-The following arguments are supported:
-
-* `zone` - (Required) DNS zone the record belongs to. It must be an FQDN, that is, include the trailing dot.
-* `name` - (Required) The name of the record. The `zone` argument will be appended to this value to create the full record path.
-* `cname` - (Required) The canonical name this record will point to.
-* `ttl` - (Optional) The TTL of the record set. Defaults to `3600`.
-
-## Attributes Reference
-
-The following attributes are exported:
-
-* `zone` - See Argument Reference above.
-* `name` - See Argument Reference above.
-* `cname` - See Argument Reference above.
-* `ttl` - See Argument Reference above.
diff --git a/website/source/docs/providers/dns/r/dns_ptr_record.html.markdown b/website/source/docs/providers/dns/r/dns_ptr_record.html.markdown
deleted file mode 100644
index 14c0e31ca..000000000
--- a/website/source/docs/providers/dns/r/dns_ptr_record.html.markdown
+++ /dev/null
@@ -1,40 +0,0 @@
----
-layout: "dns"
-page_title: "DNS: dns_ptr_record"
-sidebar_current: "docs-dns-ptr-record"
-description: |-
- Creates a PTR type DNS record.
----
-
-# dns_ptr_record
-
-Creates a PTR type DNS record.
-
-## Example Usage
-
-```hcl
-resource "dns_ptr_record" "dns-sd" {
- zone = "example.com."
- name = "r._dns-sd"
- ptr = "example.com."
- ttl = 300
-}
-```
-
-## Argument Reference
-
-The following arguments are supported:
-
-* `zone` - (Required) DNS zone the record belongs to. It must be an FQDN, that is, include the trailing dot.
-* `name` - (Required) The name of the record. The `zone` argument will be appended to this value to create the full record path.
-* `ptr` - (Required) The canonical name this record will point to.
-* `ttl` - (Optional) The TTL of the record set. Defaults to `3600`.
-
-## Attributes Reference
-
-The following attributes are exported:
-
-* `zone` - See Argument Reference above.
-* `name` - See Argument Reference above.
-* `ptr` - See Argument Reference above.
-* `ttl` - See Argument Reference above.
diff --git a/website/source/docs/providers/dnsimple/index.html.markdown b/website/source/docs/providers/dnsimple/index.html.markdown
deleted file mode 100644
index 26d050780..000000000
--- a/website/source/docs/providers/dnsimple/index.html.markdown
+++ /dev/null
@@ -1,55 +0,0 @@
----
-layout: "dnsimple"
-page_title: "Provider: DNSimple"
-sidebar_current: "docs-dnsimple-index"
-description: |-
- The DNSimple provider is used to interact with the resources supported by DNSimple. The provider needs to be configured with the proper credentials before it can be used.
----
-
-# DNSimple Provider
-
-The DNSimple provider is used to interact with the
-resources supported by DNSimple. The provider needs to be configured
-with the proper credentials before it can be used.
-
-Use the navigation to the left to read about the available resources.
-
-
-## Example Usage
-
-```hcl
-# Configure the DNSimple provider
-provider "dnsimple" {
- token = "${var.dnsimple_token}"
- account = "${var.dnsimple_account}"
-}
-
-# Create a record
-resource "dnsimple_record" "www" {
- # ...
-}
-```
-
-
-## API v2 vs API v1
-
-This integration uses the new DNSimple API v2 [released on December 2016](https://blog.dnsimple.com/2016/12/api-v2-stable/). The API v2 provides support for multi-accounts and requires a new authentication mechanism.
-
-If you are upgrading from a previous Terraform version and you were using the API v1, you will need to upgrade the DNSimple provider configuration to use the new API access token and specify the Account ID. Terraform will automatically detect an existing legacy configurations and it will return an error message asking to upgrade.
-
-API v1 is no longer supported. If you are using the `DNSIMPLE_EMAIL` argument, you can safely remove it once you have upgraded to API v2. To use API v1 you will need to use a Terraform version lower than 0.9.
-
-To upgrade from the DNSimple provider API v1 to DNSimple provider API v2 follow these steps:
-
-1. [Generate an API v2 access token](https://support.dnsimple.com/articles/api-access-token/)
-1. [Determine the Account ID](https://developer.dnsimple.com/v2/#account-scope)
-1. Add the `account` configuration and update the `token`, as shown in the example above
-1. Remove the `email` configuration, as it's no longer used
-
-
-## Argument Reference
-
-The following arguments are supported:
-
-* `token` - (Required) The DNSimple API v2 token. It must be provided, but it can also be sourced from the `DNSIMPLE_TOKEN` environment variable. Please note that this must be an [API v2 token](https://support.dnsimple.com/articles/api-access-token/). You can use either an User or Account token, but an Account token is recommended.
-* `account` - (Required) The ID of the account associated with the token. It must be provided, but it can also be sourced from the `DNSIMPLE_ACCOUNT` environment variable.
diff --git a/website/source/docs/providers/dnsimple/r/record.html.markdown b/website/source/docs/providers/dnsimple/r/record.html.markdown
deleted file mode 100644
index f4bb50053..000000000
--- a/website/source/docs/providers/dnsimple/r/record.html.markdown
+++ /dev/null
@@ -1,70 +0,0 @@
----
-layout: "dnsimple"
-page_title: "DNSimple: dnsimple_record"
-sidebar_current: "docs-dnsimple-resource-record"
-description: |-
- Provides a DNSimple record resource.
----
-
-# dnsimple\_record
-
-Provides a DNSimple record resource.
-
-## Example Usage
-
-```hcl
-# Add a record to the root domain
-resource "dnsimple_record" "foobar" {
- domain = "${var.dnsimple_domain}"
- name = ""
- value = "192.168.0.11"
- type = "A"
- ttl = 3600
-}
-```
-
-```hcl
-# Add a record to a sub-domain
-resource "dnsimple_record" "foobar" {
- domain = "${var.dnsimple_domain}"
- name = "terraform"
- value = "192.168.0.11"
- type = "A"
- ttl = 3600
-}
-```
-
-## Argument Reference
-
-The following arguments are supported:
-
-* `domain` - (Required) The domain to add the record to
-* `name` - (Required) The name of the record
-* `value` - (Required) The value of the record
-* `type` - (Required) The type of the record
-* `ttl` - (Optional) The TTL of the record
-* `priority` - (Optional) The priority of the record - only useful for some record types
-
-
-## Attributes Reference
-
-The following attributes are exported:
-
-* `id` - The record ID
-* `name` - The name of the record
-* `value` - The value of the record
-* `type` - The type of the record
-* `ttl` - The TTL of the record
-* `priority` - The priority of the record
-* `domain_id` - The domain ID of the record
-* `hostname` - The FQDN of the record
-
-## Import
-
-DNSimple resources can be imported using their domain name and numeric ID, e.g.
-
-```
-$ terraform import dnsimple_record.resource_name example.com_1234
-```
-
-The numeric ID can be found in the URL when editing a record on the dnsimple web dashboard.
diff --git a/website/source/docs/providers/do/d/image.html.md b/website/source/docs/providers/do/d/image.html.md
deleted file mode 100644
index ffed23f25..000000000
--- a/website/source/docs/providers/do/d/image.html.md
+++ /dev/null
@@ -1,58 +0,0 @@
----
-layout: "digitalocean"
-page_title: "DigitalOcean: digitalocean_image"
-sidebar_current: "docs-do-datasource-image"
-description: |-
- Get information on an snapshot.
----
-
-# digitalocean_image
-
-Get information on an snapshot images. The aim of this datasource is to enable
-you to build droplets based on snapshot names.
-
-An error is triggered if zero or more than one result is returned by the query.
-
-## Example Usage
-
-Get the data about a snapshot:
-
-```hcl
-data "digitalocean_image" "example1" {
- name = "example-1.0.0"
-}
-```
-
-Reuse the data about a snapshot to create a droplet:
-
-```hcl
-data "digitalocean_image" "example1" {
- name = "example-1.0.0"
-}
-resource "digitalocean_droplet" "example1" {
- image = "${data.digitalocean_image.example1.image}"
- name = "example-1"
- region = "nyc2"
- size = "512mb"
-}
-```
-
-## Argument Reference
-
-The following arguments are supported:
-
-* `name` - The name of the image.
-
-## Attributes Reference
-
-The following attributes are exported:
-
-* `name` - See Argument Reference above.
-* `image` - The id of the image.
-* `min_disk_size`: The minimum 'disk' required for the image.
-* `private` - Is image a public image or not. Public images represents
- Linux distributions or Application, while non-public images represent
- snapshots and backups and are only available within your account.
-* `regions`: The regions that the image is available in.
-* `size_gigabytes`: The size of the image in gigabytes.
-* `type`: Type of the image. Can be "snapshot" or "backup".
diff --git a/website/source/docs/providers/do/index.html.markdown b/website/source/docs/providers/do/index.html.markdown
deleted file mode 100644
index 1c7571f9f..000000000
--- a/website/source/docs/providers/do/index.html.markdown
+++ /dev/null
@@ -1,41 +0,0 @@
----
-layout: "digitalocean"
-page_title: "Provider: DigitalOcean"
-sidebar_current: "docs-do-index"
-description: |-
- The DigitalOcean (DO) provider is used to interact with the resources supported by DigitalOcean. The provider needs to be configured with the proper credentials before it can be used.
----
-
-# DigitalOcean Provider
-
-The DigitalOcean (DO) provider is used to interact with the
-resources supported by DigitalOcean. The provider needs to be configured
-with the proper credentials before it can be used.
-
-Use the navigation to the left to read about the available resources.
-
-## Example Usage
-
-```hcl
-# Set the variable value in *.tfvars file
-# or using -var="do_token=..." CLI option
-variable "do_token" {}
-
-# Configure the DigitalOcean Provider
-provider "digitalocean" {
- token = "${var.do_token}"
-}
-
-# Create a web server
-resource "digitalocean_droplet" "web" {
- # ...
-}
-```
-
-## Argument Reference
-
-The following arguments are supported:
-
-* `token` - (Required) This is the DO API token. This can also be specified
- with the `DIGITALOCEAN_TOKEN` shell environment variable.
-
diff --git a/website/source/docs/providers/do/r/certificate.html.markdown b/website/source/docs/providers/do/r/certificate.html.markdown
deleted file mode 100644
index a1d8ba0d2..000000000
--- a/website/source/docs/providers/do/r/certificate.html.markdown
+++ /dev/null
@@ -1,65 +0,0 @@
----
-layout: "digitalocean"
-page_title: "DigitalOcean: digitalocean_certificate"
-sidebar_current: "docs-do-resource-certificate"
-description: |-
- Provides a DigitalOcean Certificate resource.
----
-
-# digitalocean\_certificate
-
-Provides a DigitalOcean Certificate resource that allows you to manage
-certificates for configuring TLS termination in Load Balancers.
-Certificates created with this resource can be referenced in your
-Load Balancer configuration via their ID.
-
-## Example Usage
-
-```hcl
-# Create a new TLS certificate
-resource "digitalocean_certificate" "cert" {
- name = "Terraform Example"
- private_key = "${file("/Users/terraform/certs/privkey.pem")}"
- leaf_certificate = "${file("/Users/terraform/certs/cert.pem")}"
- certificate_chain = "${file("/Users/terraform/certs/fullchain.pem")}"
-}
-
-# Create a new Load Balancer with TLS termination
-resource "digitalocean_loadbalancer" "public" {
- name = "secure-loadbalancer-1"
- region = "nyc3"
- droplet_tag = "backend"
-
- forwarding_rule {
- entry_port = 443
- entry_protocol = "https"
-
- target_port = 80
- target_protocol = "http"
-
- certificate_id = "${digitalocean_certificate.cert.id}"
- }
-}
-```
-
-## Argument Reference
-
-The following arguments are supported:
-
-* `name` - (Required) The name of the certificate for identification.
-* `private_key` - (Required) The contents of a PEM-formatted private-key
-corresponding to the SSL certificate.
-* `leaf_certificate` - (Required) The contents of a PEM-formatted public
-TLS certificate.
-* `certificate_chain` - (Optional) The full PEM-formatted trust chain
-between the certificate authority's certificate and your domain's TLS
-certificate.
-
-## Attributes Reference
-
-The following attributes are exported:
-
-* `id` - The unique ID of the certificate
-* `name` - The name of the certificate
-* `not_after` - The expiration date of the certificate
-* `sha1_fingerprint` - The SHA-1 fingerprint of the certificate
diff --git a/website/source/docs/providers/do/r/domain.html.markdown b/website/source/docs/providers/do/r/domain.html.markdown
deleted file mode 100644
index c90ae150e..000000000
--- a/website/source/docs/providers/do/r/domain.html.markdown
+++ /dev/null
@@ -1,46 +0,0 @@
----
-layout: "digitalocean"
-page_title: "DigitalOcean: digitalocean_domain"
-sidebar_current: "docs-do-resource-domain"
-description: |-
- Provides a DigitalOcean domain resource.
----
-
-# digitalocean\_domain
-
-Provides a DigitalOcean domain resource.
-
-## Example Usage
-
-```hcl
-# Create a new domain
-resource "digitalocean_domain" "default" {
- name = "www.example.com"
- ip_address = "${digitalocean_droplet.foo.ipv4_address}"
-}
-```
-
-## Argument Reference
-
-The following arguments are supported:
-
-* `name` - (Required) The name of the domain
-* `ip_address` - (Required) The IP address of the domain. This IP
- is used to created an initial A record for the domain. It is required
- upstream by the DigitalOcean API.
-
-## Attributes Reference
-
-The following attributes are exported:
-
-* `id` - The name of the domain
-
-
-
-## Import
-
-Domains can be imported using the `domain name`, e.g.
-
-```
-terraform import digitalocean_domain.mydomain mytestdomain.com
-```
diff --git a/website/source/docs/providers/do/r/droplet.html.markdown b/website/source/docs/providers/do/r/droplet.html.markdown
deleted file mode 100644
index 3b15b2e4e..000000000
--- a/website/source/docs/providers/do/r/droplet.html.markdown
+++ /dev/null
@@ -1,83 +0,0 @@
----
-layout: "digitalocean"
-page_title: "DigitalOcean: digitalocean_droplet"
-sidebar_current: "docs-do-resource-droplet"
-description: |-
- Provides a DigitalOcean Droplet resource. This can be used to create, modify, and delete Droplets. Droplets also support provisioning.
----
-
-# digitalocean\_droplet
-
-Provides a DigitalOcean Droplet resource. This can be used to create,
-modify, and delete Droplets. Droplets also support
-[provisioning](/docs/provisioners/index.html).
-
-## Example Usage
-
-```hcl
-# Create a new Web Droplet in the nyc2 region
-resource "digitalocean_droplet" "web" {
- image = "ubuntu-14-04-x64"
- name = "web-1"
- region = "nyc2"
- size = "512mb"
-}
-```
-
-## Argument Reference
-
-The following arguments are supported:
-
-* `image` - (Required) The Droplet image ID or slug.
-* `name` - (Required) The Droplet name
-* `region` - (Required) The region to start in
-* `size` - (Required) The instance size to start
-* `backups` - (Optional) Boolean controlling if backups are made. Defaults to
- false.
-* `ipv6` - (Optional) Boolean controlling if IPv6 is enabled. Defaults to false.
-* `private_networking` - (Optional) Boolean controlling if private networks are
- enabled. Defaults to false.
-* `ssh_keys` - (Optional) A list of SSH IDs or fingerprints to enable in
- the format `[12345, 123456]`. To retrieve this info, use a tool such
- as `curl` with the [DigitalOcean API](https://developers.digitalocean.com/#keys),
- to retrieve them.
-* `resize_disk` - (Optional) Boolean controlling whether to increase the disk
- size when resizing a Droplet. It defaults to `true`. When set to `false`,
- only the Droplet's RAM and CPU will be resized. **Increasing a Droplet's disk
- size is a permanent change**. Increasing only RAM and CPU is reversible.
-* `tags` - (Optional) A list of the tags to label this droplet. A tag resource
- must exist before it can be associated with a droplet.
-* `user_data` (Optional) - A string of the desired User Data for the Droplet.
-* `volume_ids` (Optional) - A list of the IDs of each [block storage volume](/docs/providers/do/r/volume.html) to be attached to the Droplet.
-
-## Attributes Reference
-
-The following attributes are exported:
-
-* `id` - The ID of the Droplet
-* `name`- The name of the Droplet
-* `region` - The region of the Droplet
-* `image` - The image of the Droplet
-* `ipv6` - Is IPv6 enabled
-* `ipv6_address` - The IPv6 address
-* `ipv6_address_private` - The private networking IPv6 address
-* `ipv4_address` - The IPv4 address
-* `ipv4_address_private` - The private networking IPv4 address
-* `locked` - Is the Droplet locked
-* `private_networking` - Is private networking enabled
-* `price_hourly` - Droplet hourly price
-* `price_monthly` - Droplet monthly price
-* `size` - The instance size
-* `disk` - The size of the instance's disk in GB
-* `vcpus` - The number of the instance's virtual CPUs
-* `status` - The status of the droplet
-* `tags` - The tags associated with the droplet
-* `volume_ids` - A list of the attached block storage volumes
-
-## Import
-
-Droplets can be imported using the droplet `id`, e.g.
-
-```
-terraform import digitalocean_droplet.mydroplet 100823
-```
diff --git a/website/source/docs/providers/do/r/floating_ip.html.markdown b/website/source/docs/providers/do/r/floating_ip.html.markdown
deleted file mode 100644
index 63196b09d..000000000
--- a/website/source/docs/providers/do/r/floating_ip.html.markdown
+++ /dev/null
@@ -1,52 +0,0 @@
----
-layout: "digitalocean"
-page_title: "DigitalOcean: digitalocean_floating_ip"
-sidebar_current: "docs-do-resource-floating-ip"
-description: |-
- Provides a DigitalOcean Floating IP resource.
----
-
-# digitalocean\_floating_ip
-
-Provides a DigitalOcean Floating IP to represent a publicly-accessible static IP addresses that can be mapped to one of your Droplets.
-
-## Example Usage
-
-```hcl
-resource "digitalocean_droplet" "foobar" {
- name = "baz"
- size = "1gb"
- image = "centos-5-8-x32"
- region = "sgp1"
- ipv6 = true
- private_networking = true
-}
-
-resource "digitalocean_floating_ip" "foobar" {
- droplet_id = "${digitalocean_droplet.foobar.id}"
- region = "${digitalocean_droplet.foobar.region}"
-}
-```
-
-## Argument Reference
-
-The following arguments are supported:
-
-* `region` - (Required) The region that the Floating IP is reserved to.
-* `droplet_id` - (Optional) The ID of Droplet that the Floating IP will be assigned to.
-
-~> **NOTE:** A Floating IP can be assigned to a region OR a droplet_id. If both region AND droplet_id are specified, then the Floating IP will be assigned to the droplet and use that region
-
-## Attributes Reference
-
-The following attributes are exported:
-
-* `ip_address` - The IP Address of the resource
-
-## Import
-
-Floating IPs can be imported using the `ip`, e.g.
-
-```
-terraform import digitalocean_floating_ip.myip 192.168.0.1
-```
\ No newline at end of file
diff --git a/website/source/docs/providers/do/r/loadbalancer.html.markdown b/website/source/docs/providers/do/r/loadbalancer.html.markdown
deleted file mode 100644
index ec51ebcad..000000000
--- a/website/source/docs/providers/do/r/loadbalancer.html.markdown
+++ /dev/null
@@ -1,98 +0,0 @@
----
-layout: "digitalocean"
-page_title: "DigitalOcean: digitalocean_loadbalancer"
-sidebar_current: "docs-do-resource-loadbalancer"
-description: |-
- Provides a DigitalOcean Load Balancer resource. This can be used to create, modify, and delete Load Balancers.
----
-
-# digitalocean\_loadbalancer
-
-Provides a DigitalOcean Load Balancer resource. This can be used to create,
-modify, and delete Load Balancers.
-
-## Example Usage
-
-```hcl
-resource "digitalocean_droplet" "web" {
- name = "web-1"
- size = "512mb"
- image = "centos-7-x64"
- region = "nyc3"
-}
-
-resource "digitalocean_loadbalancer" "public" {
- name = "loadbalancer-1"
- region = "nyc3"
-
- forwarding_rule {
- entry_port = 80
- entry_protocol = "http"
-
- target_port = 80
- target_protocol = "http"
- }
-
- healthcheck {
- port = 22
- protocol = "tcp"
- }
-
- droplet_ids = ["${digitalocean_droplet.web.id}"]
-}
-```
-
-## Argument Reference
-
-The following arguments are supported:
-
-* `name` - (Required) The Load Balancer name
-* `region` - (Required) The region to start in
-* `algorithm` - (Optional) The load balancing algorithm used to determine
-which backend Droplet will be selected by a client. It must be either `round_robin`
-or `least_connections`. The default value is `round_robin`.
-* `forwarding_rule` - (Required) A list of `forwarding_rule` to be assigned to the
-Load Balancer. The `forwarding_rule` block is documented below.
-* `healthcheck` - (Optional) A `healthcheck` block to be assigned to the
-Load Balancer. The `healthcheck` block is documented below. Only 1 healthcheck is allowed.
-* `sticky_sessions` - (Optional) A `sticky_sessions` block to be assigned to the
-Load Balancer. The `sticky_sessions` block is documented below. Only 1 sticky_sessions block is allowed.
-* `redirect_http_to_https` - (Optional) A boolean value indicating whether
-HTTP requests to the Load Balancer on port 80 will be redirected to HTTPS on port 443.
-Default value is `false`.
-* `droplet_ids` (Optional) - A list of the IDs of each droplet to be attached to the Load Balancer.
-* `droplet_tag` (Optional) - The name of a Droplet tag corresponding to Droplets to be assigned to the Load Balancer.
-
-`forwarding_rule` supports the following:
-
-* `entry_protocol` - (Required) The protocol used for traffic to the Load Balancer. The possible values are: `http`, `https`, or `tcp`.
-* `entry_port` - (Required) An integer representing the port on which the Load Balancer instance will listen.
-* `target_protocol` - (Required) The protocol used for traffic from the Load Balancer to the backend Droplets. The possible values are: `http`, `https`, or `tcp`.
-* `target_port` - (Required) An integer representing the port on the backend Droplets to which the Load Balancer will send traffic.
-* `certificate_id` - (Optional) The ID of the TLS certificate to be used for SSL termination.
-* `tls_passthrough` - (Optional) A boolean value indicating whether SSL encrypted traffic will be passed through to the backend Droplets. The default value is `false`.
-
-`sticky_sessions` supports the following:
-
-* `type` - (Required) An attribute indicating how and if requests from a client will be persistently served by the same backend Droplet. The possible values are `cookies` or `none`. If not specified, the default value is `none`.
-* `cookie_name` - (Optional) The name to be used for the cookie sent to the client. This attribute is required when using `cookies` for the sticky sessions type.
-* `cookie_ttl_seconds` - (Optional) The number of seconds until the cookie set by the Load Balancer expires. This attribute is required when using `cookies` for the sticky sessions type.
-
-
-`healthcheck` supports the following:
-
-* `protocol` - (Required) The protocol used for health checks sent to the backend Droplets. The possible values are `http` or `tcp`.
-* `port` - (Optional) An integer representing the port on the backend Droplets on which the health check will attempt a connection.
-* `path` - (Optional) The path on the backend Droplets to which the Load Balancer instance will send a request.
-* `check_interval_seconds` - (Optional) The number of seconds between between two consecutive health checks. If not specified, the default value is `10`.
-* `response_timeout_seconds` - (Optional) The number of seconds the Load Balancer instance will wait for a response until marking a health check as failed. If not specified, the default value is `5`.
-* `unhealthy_threshold` - (Optional) The number of times a health check must fail for a backend Droplet to be marked "unhealthy" and be removed from the pool. If not specified, the default value is `3`.
-* `healthy_threshold` - (Optional) The number of times a health check must pass for a backend Droplet to be marked "healthy" and be re-added to the pool. If not specified, the default value is `5`.
-
-
-## Attributes Reference
-
-The following attributes are exported:
-
-* `id` - The ID of the Load Balancer
-* `ip`- The ip of the Load Balancer
diff --git a/website/source/docs/providers/do/r/record.html.markdown b/website/source/docs/providers/do/r/record.html.markdown
deleted file mode 100644
index 5160b49fc..000000000
--- a/website/source/docs/providers/do/r/record.html.markdown
+++ /dev/null
@@ -1,50 +0,0 @@
----
-layout: "digitalocean"
-page_title: "DigitalOcean: digitalocean_record"
-sidebar_current: "docs-do-resource-record"
-description: |-
- Provides a DigitalOcean DNS record resource.
----
-
-# digitalocean\_record
-
-Provides a DigitalOcean DNS record resource.
-
-## Example Usage
-
-```hcl
-# Create a new domain
-resource "digitalocean_domain" "default" {
- name = "www.example.com"
- ip_address = "${digitalocean_droplet.foo.ipv4_address}"
-}
-
-# Add a record to the domain
-resource "digitalocean_record" "foobar" {
- domain = "${digitalocean_domain.default.name}"
- type = "A"
- name = "foobar"
- value = "192.168.0.11"
-}
-```
-
-## Argument Reference
-
-The following arguments are supported:
-
-* `type` - (Required) The type of record
-* `domain` - (Required) The domain to add the record to
-* `value` - (Optional) The value of the record
-* `name` - (Optional) The name of the record
-* `weight` - (Optional) The weight of the record, for SRV records.
-* `port` - (Optional) The port of the record, for SRV records.
-* `priority` - (Optional) The priority of the record, for MX and SRV
- records.
-* `ttl` - (Optional) The time to live for the record, in seconds.
-
-## Attributes Reference
-
-The following attributes are exported:
-
-* `id` - The record ID
-* `fqdn` - The FQDN of the record
diff --git a/website/source/docs/providers/do/r/ssh_key.html.markdown b/website/source/docs/providers/do/r/ssh_key.html.markdown
deleted file mode 100644
index 4d27039fe..000000000
--- a/website/source/docs/providers/do/r/ssh_key.html.markdown
+++ /dev/null
@@ -1,49 +0,0 @@
----
-layout: "digitalocean"
-page_title: "DigitalOcean: digitalocean_ssh_key"
-sidebar_current: "docs-do-resource-ssh-key"
-description: |-
- Provides a DigitalOcean SSH key resource.
----
-
-# digitalocean\_ssh_key
-
-Provides a DigitalOcean SSH key resource to allow you manage SSH
-keys for Droplet access. Keys created with this resource
-can be referenced in your droplet configuration via their ID or
-fingerprint.
-
-## Example Usage
-
-```hcl
-# Create a new SSH key
-resource "digitalocean_ssh_key" "default" {
- name = "Terraform Example"
- public_key = "${file("/Users/terraform/.ssh/id_rsa.pub")}"
-}
-```
-
-## Argument Reference
-
-The following arguments are supported:
-
-* `name` - (Required) The name of the SSH key for identification
-* `public_key` - (Required) The public key. If this is a file, it
-can be read using the file interpolation function
-
-## Attributes Reference
-
-The following attributes are exported:
-
-* `id` - The unique ID of the key
-* `name` - The name of the SSH key
-* `public_key` - The text of the public key
-* `fingerprint` - The fingerprint of the SSH key
-
-## Import
-
-SSH Keys can be imported using the `ssh key id`, e.g.
-
-```
-terraform import digitalocean_ssh_key.mykey 263654
-```
\ No newline at end of file
diff --git a/website/source/docs/providers/do/r/tag.html.markdown b/website/source/docs/providers/do/r/tag.html.markdown
deleted file mode 100644
index 4ec09795a..000000000
--- a/website/source/docs/providers/do/r/tag.html.markdown
+++ /dev/null
@@ -1,54 +0,0 @@
----
-layout: "digitalocean"
-page_title: "DigitalOcean: digitalocean_tag"
-sidebar_current: "docs-do-resource-tag"
-description: |-
- Provides a DigitalOcean Tag resource.
----
-
-# digitalocean\_tag
-
-Provides a DigitalOcean Tag resource. A Tag is a label that can be applied to a
-droplet resource in order to better organize or facilitate the lookups and
-actions on it. Tags created with this resource can be referenced in your droplet
-configuration via their ID or name.
-
-## Example Usage
-
-```hcl
-# Create a new tag
-resource "digitalocean_tag" "foobar" {
- name = "foobar"
-}
-
-# Create a new droplet in nyc3 with the foobar tag
-resource "digitalocean_droplet" "web" {
- image = "ubuntu-16-04-x64"
- name = "web-1"
- region = "nyc3"
- size = "512mb"
- tags = ["${digitalocean_tag.foobar.id}"]
-}
-```
-
-## Argument Reference
-
-The following arguments are supported:
-
-* `name` - (Required) The name of the tag
-
-## Attributes Reference
-
-The following attributes are exported:
-
-* `id` - The id of the tag
-* `name` - The name of the tag
-
-
-## Import
-
-Tags can be imported using the `name`, e.g.
-
-```
-terraform import digitalocean_tag.mytag tagname
-```
\ No newline at end of file
diff --git a/website/source/docs/providers/do/r/volume.markdown b/website/source/docs/providers/do/r/volume.markdown
deleted file mode 100644
index 23ffedfb8..000000000
--- a/website/source/docs/providers/do/r/volume.markdown
+++ /dev/null
@@ -1,54 +0,0 @@
----
-layout: "digitalocean"
-page_title: "DigitalOcean: digitalocean_volume"
-sidebar_current: "docs-do-resource-volume"
-description: |-
- Provides a DigitalOcean volume resource.
----
-
-# digitalocean\_volume
-
-Provides a DigitalOcean Block Storage volume which can be attached to a Droplet in order to provide expanded storage.
-
-## Example Usage
-
-```hcl
-resource "digitalocean_volume" "foobar" {
- region = "nyc1"
- name = "baz"
- size = 100
- description = "an example volume"
-}
-
-resource "digitalocean_droplet" "foobar" {
- name = "baz"
- size = "1gb"
- image = "coreos-stable"
- region = "nyc1"
- volume_ids = ["${digitalocean_volume.foobar.id}"]
-}
-```
-
-## Argument Reference
-
-The following arguments are supported:
-
-* `region` - (Required) The region that the block storage volume will be created in.
-* `name` - (Required) A name for the block storage volume. Must be lowercase and be composed only of numbers, letters and "-", up to a limit of 64 characters.
-* `size` - (Required) The size of the block storage volume in GiB.
-* `description` - (Optional) A free-form text field up to a limit of 1024 bytes to describe a block storage volume.
-
-## Attributes Reference
-
-The following attributes are exported:
-
-* `id` - The unique identifier for the block storage volume.
-
-
-## Import
-
-Volumes can be imported using the `volume id`, e.g.
-
-```
-terraform import digitalocean_volume.volumea 506f78a4-e098-11e5-ad9f-000f53306ae1
-```
\ No newline at end of file
diff --git a/website/source/docs/providers/docker/d/registry_image.html.markdown b/website/source/docs/providers/docker/d/registry_image.html.markdown
deleted file mode 100644
index f8d0f8dea..000000000
--- a/website/source/docs/providers/docker/d/registry_image.html.markdown
+++ /dev/null
@@ -1,40 +0,0 @@
----
-layout: "docker"
-page_title: "Docker: docker_registry_image"
-sidebar_current: "docs-docker-datasource-registry-image"
-description: |-
- Finds the latest available sha256 digest for a docker image/tag from a registry.
----
-
-# docker\_registry\_image
-
--> **Note:** The initial (current) version of this data source can reliably read only **public** images **from the official Docker Hub Registry**.
-
-Reads the image metadata from a Docker Registry. Used in conjunction with the
-[docker\_image](/docs/providers/docker/r/image.html) resource to keep an image up
-to date on the latest available version of the tag.
-
-## Example Usage
-
-```hcl
-data "docker_registry_image" "ubuntu" {
- name = "ubuntu:precise"
-}
-
-resource "docker_image" "ubuntu" {
- name = "${data.docker_registry_image.ubuntu.name}"
- pull_triggers = ["${data.docker_registry_image.ubuntu.sha256_digest}"]
-}
-```
-
-## Argument Reference
-
-The following arguments are supported:
-
-* `name` - (Required, string) The name of the Docker image, including any tags. e.g. `alpine:latest`
-
-## Attributes Reference
-
-The following attributes are exported in addition to the above configuration:
-
-* `sha256_digest` (string) - The content digest of the image, as stored on the registry.
diff --git a/website/source/docs/providers/docker/index.html.markdown b/website/source/docs/providers/docker/index.html.markdown
deleted file mode 100644
index f55e0973a..000000000
--- a/website/source/docs/providers/docker/index.html.markdown
+++ /dev/null
@@ -1,63 +0,0 @@
----
-layout: "docker"
-page_title: "Provider: Docker"
-sidebar_current: "docs-docker-index"
-description: |-
- The Docker provider is used to interact with Docker containers and images.
----
-
-# Docker Provider
-
-The Docker provider is used to interact with Docker containers and images.
-It uses the Docker API to manage the lifecycle of Docker containers. Because
-the Docker provider uses the Docker API, it is immediately compatible not
-only with single server Docker but Swarm and any additional Docker-compatible
-API hosts.
-
-Use the navigation to the left to read about the available resources.
-
-## Example Usage
-
-```hcl
-# Configure the Docker provider
-provider "docker" {
- host = "tcp://127.0.0.1:2376/"
-}
-
-# Create a container
-resource "docker_container" "foo" {
- image = "${docker_image.ubuntu.latest}"
- name = "foo"
-}
-
-resource "docker_image" "ubuntu" {
- name = "ubuntu:latest"
-}
-```
-
-## Registry Credentials
-
-The initial (current) version of the Docker provider **doesn't** support registry authentication.
-This limits any use cases to public images for now.
-
-## Argument Reference
-
-The following arguments are supported:
-
-* `host` - (Required) This is the address to the Docker host. If this is
- blank, the `DOCKER_HOST` environment variable will also be read.
-
-* `cert_path` - (Optional) Path to a directory with certificate information
- for connecting to the Docker host via TLS. If this is blank, the
- `DOCKER_CERT_PATH` will also be checked.
-
-* `ca_material`, `cert_material`, `key_material`, - (Optional) Content of `ca.pem`, `cert.pem`, and `key.pem` files
- for TLS authentication. Cannot be used together with `cert_path`.
-
-~> **NOTE on Certificates and `docker-machine`:** As per [Docker Remote API
-documentation](https://docs.docker.com/engine/reference/api/docker_remote_api/),
-in any docker-machine environment, the Docker daemon uses an encrypted TCP
-socket (TLS) and requires `cert_path` for a successful connection. As an alternative,
-if using `docker-machine`, run `eval $(docker-machine env )` prior
-to running Terraform, and the host and certificate path will be extracted from
-the environment.
diff --git a/website/source/docs/providers/docker/r/container.html.markdown b/website/source/docs/providers/docker/r/container.html.markdown
deleted file mode 100644
index a267cb244..000000000
--- a/website/source/docs/providers/docker/r/container.html.markdown
+++ /dev/null
@@ -1,175 +0,0 @@
----
-layout: "docker"
-page_title: "Docker: docker_container"
-sidebar_current: "docs-docker-resource-container"
-description: |-
- Manages the lifecycle of a Docker container.
----
-
-# docker\_container
-
-Manages the lifecycle of a Docker container.
-
-## Example Usage
-
-```hcl
-# Start a container
-resource "docker_container" "ubuntu" {
- name = "foo"
- image = "${docker_image.ubuntu.latest}"
-}
-
-# Find the latest Ubuntu precise image.
-resource "docker_image" "ubuntu" {
- name = "ubuntu:precise"
-}
-```
-
-## Argument Reference
-
-The following arguments are supported:
-
-* `name` - (Required, string) The name of the Docker container.
-* `image` - (Required, string) The ID of the image to back this container.
- The easiest way to get this value is to use the `docker_image` resource
- as is shown in the example above.
-
-* `command` - (Optional, list of strings) The command to use to start the
- container. For example, to run `/usr/bin/myprogram -f baz.conf` set the
- command to be `["/usr/bin/myprogram", "-f", "baz.conf"]`.
-* `entrypoint` - (Optional, list of strings) The command to use as the
- Entrypoint for the container. The Entrypoint allows you to configure a
- container to run as an executable. For example, to run `/usr/bin/myprogram`
- when starting a container, set the entrypoint to be
- `["/usr/bin/myprogram"]`.
-* `user` - (Optional, string) User used for run the first process. Format is
- `user` or `user:group` which user and group can be passed literraly or
- by name.
-* `dns` - (Optional, set of strings) Set of DNS servers.
-* `dns_opts` - (Optional, set of strings) Set of DNS options used by the DNS provider(s), see `resolv.conf` documentation for valid list of options.
-* `dns_search` - (Optional, set of strings) Set of DNS search domains that are used when bare unqualified hostnames are used inside of the container.
-* `env` - (Optional, set of strings) Environment variables to set.
-* `labels` - (Optional, map of strings) Key/value pairs to set as labels on the
- container.
-* `links` - (Optional, set of strings) Set of links for link based
- connectivity between containers that are running on the same host.
-* `hostname` - (Optional, string) Hostname of the container.
-* `domainname` - (Optional, string) Domain name of the container.
-* `restart` - (Optional, string) The restart policy for the container. Must be
- one of "no", "on-failure", "always", "unless-stopped".
-* `max_retry_count` - (Optional, int) The maximum amount of times to an attempt
- a restart when `restart` is set to "on-failure"
-* `must_run` - (Optional, bool) If true, then the Docker container will be
- kept running. If false, then as long as the container exists, Terraform
- assumes it is successful.
-* `capabilities` - (Optional, block) See [Capabilities](#capabilities) below for details.
-* `ports` - (Optional, block) See [Ports](#ports) below for details.
-* `host` - (Optional, block) See [Extra Hosts](#extra_hosts) below for
- details.
-* `privileged` - (Optional, bool) Run container in privileged mode.
-* `publish_all_ports` - (Optional, bool) Publish all ports of the container.
-* `volumes` - (Optional, block) See [Volumes](#volumes) below for details.
-* `memory` - (Optional, int) The memory limit for the container in MBs.
-* `memory_swap` - (Optional, int) The total memory limit (memory + swap) for the
- container in MBs. This setting may compute to `-1` after `terraform apply` if the target host doesn't support memory swap, when that is the case docker will use a soft limitation.
-* `cpu_shares` - (Optional, int) CPU shares (relative weight) for the container.
-* `log_driver` - (Optional, string) The logging driver to use for the container.
- Defaults to "json-file".
-* `log_opts` - (Optional, map of strings) Key/value pairs to use as options for
- the logging driver.
-* `network_alias` - (Optional, set of strings) Network aliases of the container for user-defined networks only.
-* `network_mode` - (Optional, string) Network mode of the container.
-* `networks` - (Optional, set of strings) Id of the networks in which the
- container is.
-* `destroy_grace_seconds` - (Optional, int) If defined will attempt to stop the container before destroying. Container will be destroyed after `n` seconds or on successful stop.
-* `upload` - (Optional, block) See [File Upload](#upload) below for details.
-
-
-### Capabilities
-
-`capabilities` is a block within the configuration that allows you to add or drop linux capabilities. For more information about what capabilities you can add and drop please visit the docker run documentation.
-
-* `add` - (Optional, set of strings) list of linux capabilities to add.
-* `drop` - (Optional, set of strings) list of linux capabilities to drop.
-
-Example:
-
-```hcl
-resource "docker_container" "ubuntu" {
- name = "foo"
- image = "${docker_image.ubuntu.latest}"
- capabilities {
- add = ["ALL"]
- drop = ["SYS_ADMIN"]
- }
-}
-```
-
-
-### Ports
-
-`ports` is a block within the configuration that can be repeated to specify
-the port mappings of the container. Each `ports` block supports
-the following:
-
-* `internal` - (Required, int) Port within the container.
-* `external` - (Required, int) Port exposed out of the container.
-* `ip` - (Optional, string) IP address/mask that can access this port.
-* `protocol` - (Optional, string) Protocol that can be used over this port,
- defaults to TCP.
-
-
-### Extra Hosts
-
-`host` is a block within the configuration that can be repeated to specify
-the extra host mappings for the container. Each `host` block supports
-the following:
-
-* `host` - (Required, string) Hostname to add.
-* `ip` - (Required, string) IP address this hostname should resolve to.
-
-This is equivalent to using the `--add-host` option when using the `run`
-command of the Docker CLI.
-
-
-### Volumes
-
-`volumes` is a block within the configuration that can be repeated to specify
-the volumes attached to a container. Each `volumes` block supports
-the following:
-
-* `from_container` - (Optional, string) The container where the volume is
- coming from.
-* `host_path` - (Optional, string) The path on the host where the volume
- is coming from.
-* `volume_name` - (Optional, string) The name of the docker volume which
- should be mounted.
-* `container_path` - (Optional, string) The path in the container where the
- volume will be mounted.
-* `read_only` - (Optional, bool) If true, this volume will be readonly.
- Defaults to false.
-
-One of `from_container`, `host_path` or `volume_name` must be set.
-
-
-### File Upload
-
-`upload` is a block within the configuration that can be repeated to specify
-files to upload to the container before starting it.
-Each `upload` supports the following
-
-* `content` - (Required, string) A content of a file to upload.
-* `file` - (Required, string) path to a file in the container.
-
-## Attributes Reference
-
-The following attributes are exported:
-
- * `ip_address` - The IP address of the container as read from its
- NetworkSettings.
- * `ip_prefix_length` - The IP prefix length of the container as read from its
- NetworkSettings.
- * `gateway` - The network gateway of the container as read from its
- NetworkSettings.
- * `bridge` - The network bridge of the container as read from its
- NetworkSettings.
diff --git a/website/source/docs/providers/docker/r/image.html.markdown b/website/source/docs/providers/docker/r/image.html.markdown
deleted file mode 100644
index c08f016ea..000000000
--- a/website/source/docs/providers/docker/r/image.html.markdown
+++ /dev/null
@@ -1,63 +0,0 @@
----
-layout: "docker"
-page_title: "Docker: docker_image"
-sidebar_current: "docs-docker-resource-image"
-description: |-
- Pulls a Docker image to a given Docker host.
----
-
-# docker\_image
-
--> **Note:** The initial (current) version of this resource can only pull **public** images **from the official Docker Hub Registry**.
-
-Pulls a Docker image to a given Docker host from a Docker Registry.
-
-This resource will *not* pull new layers of the image automatically unless used in
-conjunction with [`docker_registry_image`](/docs/providers/docker/d/registry_image.html)
-data source to update the `pull_triggers` field.
-
-## Example Usage
-
-```hcl
-# Find the latest Ubuntu precise image.
-resource "docker_image" "ubuntu" {
- name = "ubuntu:precise"
-}
-
-# Access it somewhere else with ${docker_image.ubuntu.latest}
-
-```
-
-### Dynamic image
-
-```hcl
-data "docker_registry_image" "ubuntu" {
- name = "ubuntu:precise"
-}
-
-resource "docker_image" "ubuntu" {
- name = "${data.docker_registry_image.ubuntu.name}"
- pull_triggers = ["${data.docker_registry_image.ubuntu.sha256_digest}"]
-}
-```
-
-## Argument Reference
-
-The following arguments are supported:
-
-* `name` - (Required, string) The name of the Docker image, including any tags.
-* `keep_locally` - (Optional, boolean) If true, then the Docker image won't be
- deleted on destroy operation. If this is false, it will delete the image from
- the docker local storage on destroy operation.
-* `pull_triggers` - (Optional, list of strings) List of values which cause an
- image pull when changed. This is used to store the image digest from the
- registry when using the `docker_registry_image` [data source](/docs/providers/docker/d/registry_image.html)
- to trigger an image update.
-* `pull_trigger` - **Deprecated**, use `pull_triggers` instead.
-
-
-## Attributes Reference
-
-The following attributes are exported in addition to the above configuration:
-
-* `latest` (string) - The ID of the image.
diff --git a/website/source/docs/providers/docker/r/network.html.markdown b/website/source/docs/providers/docker/r/network.html.markdown
deleted file mode 100644
index d0636d499..000000000
--- a/website/source/docs/providers/docker/r/network.html.markdown
+++ /dev/null
@@ -1,61 +0,0 @@
----
-layout: "docker"
-page_title: "Docker: docker_network"
-sidebar_current: "docs-docker-resource-network"
-description: |-
- Manages a Docker Network.
----
-
-# docker\_network
-
-Manages a Docker Network. This can be used alongside
-[docker\_container](/docs/providers/docker/r/container.html)
-to create virtual networks within the docker environment.
-
-## Example Usage
-
-```hcl
-# Find the latest Ubuntu precise image.
-resource "docker_network" "private_network" {
- name = "my_network"
-}
-
-# Access it somewhere else with ${docker_network.private_network.name}
-
-```
-
-## Argument Reference
-
-The following arguments are supported:
-
-* `name` - (Required, string) The name of the Docker network.
-* `check_duplicate` - (Optional, boolean) Requests daemon to check for networks
- with same name.
-* `driver` - (Optional, string) Name of the network driver to use. Defaults to
- `bridge` driver.
-* `options` - (Optional, map of strings) Network specific options to be used by
- the drivers.
-* `internal` - (Optional, boolean) Restrict external access to the network.
- Defaults to `false`.
-* `ipam_driver` - (Optional, string) Driver used by the custom IP scheme of the
- network.
-* `ipam_config` - (Optional, block) See [IPAM config](#ipam_config) below for
- details.
-
-
-### IPAM config
-Configuration of the custom IP scheme of the network.
-
-The `ipam_config` block supports:
-
-* `subnet` - (Optional, string)
-* `ip_range` - (Optional, string)
-* `gateway` - (Optional, string)
-* `aux_address` - (Optional, map of string)
-
-## Attributes Reference
-
-The following attributes are exported in addition to the above configuration:
-
-* `id` (string)
-* `scope` (string)
diff --git a/website/source/docs/providers/docker/r/volume.html.markdown b/website/source/docs/providers/docker/r/volume.html.markdown
deleted file mode 100644
index f5d9dee94..000000000
--- a/website/source/docs/providers/docker/r/volume.html.markdown
+++ /dev/null
@@ -1,40 +0,0 @@
----
-layout: "docker"
-page_title: "Docker: docker_volume"
-sidebar_current: "docs-docker-resource-volume"
-description: |-
- Creates and destroys docker volumes.
----
-
-# docker\_volume
-
-Creates and destroys a volume in Docker. This can be used alongside
-[docker\_container](/docs/providers/docker/r/container.html)
-to prepare volumes that can be shared across containers.
-
-## Example Usage
-
-```hcl
-# Creates a docker volume "shared_volume".
-resource "docker_volume" "shared_volume" {
- name = "shared_volume"
-}
-
-# Reference the volume with ${docker_volume.shared_volume.name}
-
-```
-
-## Argument Reference
-
-The following arguments are supported:
-
-* `name` - (Optional, string) The name of the Docker volume (generated if not
- provided).
-* `driver` - (Optional, string) Driver type for the volume (defaults to local).
-* `driver_opts` - (Optional, map of strings) Options specific to the driver.
-
-## Attributes Reference
-
-The following attributes are exported in addition to the above configuration:
-
-* `mountpoint` (string) - The mountpoint of the volume.
diff --git a/website/source/docs/providers/dyn/index.html.markdown b/website/source/docs/providers/dyn/index.html.markdown
deleted file mode 100644
index 8811bb7bc..000000000
--- a/website/source/docs/providers/dyn/index.html.markdown
+++ /dev/null
@@ -1,39 +0,0 @@
----
-layout: "dyn"
-page_title: "Provider: Dyn"
-sidebar_current: "docs-dyn-index"
-description: |-
- The Dyn provider is used to interact with the resources supported by Dyn. The provider needs to be configured with the proper credentials before it can be used.
----
-
-# Dyn Provider
-
-The Dyn provider is used to interact with the
-resources supported by Dyn. The provider needs to be configured
-with the proper credentials before it can be used.
-
-Use the navigation to the left to read about the available resources.
-
-## Example Usage
-
-```hcl
-# Configure the Dyn provider
-provider "dyn" {
- customer_name = "${var.dyn_customer_name}"
- username = "${var.dyn_username}"
- password = "${var.dyn_password}"
-}
-
-# Create a record
-resource "dyn_record" "www" {
- # ...
-}
-```
-
-## Argument Reference
-
-The following arguments are supported:
-
-* `customer_name` - (Required) The Dyn customer name. It must be provided, but it can also be sourced from the `DYN_CUSTOMER_NAME` environment variable.
-* `username` - (Required) The Dyn username. It must be provided, but it can also be sourced from the `DYN_USERNAME` environment variable.
-* `password` - (Required) The Dyn password. It must be provided, but it can also be sourced from the `DYN_PASSWORD` environment variable.
diff --git a/website/source/docs/providers/dyn/r/record.html.markdown b/website/source/docs/providers/dyn/r/record.html.markdown
deleted file mode 100644
index 92cd3b510..000000000
--- a/website/source/docs/providers/dyn/r/record.html.markdown
+++ /dev/null
@@ -1,41 +0,0 @@
----
-layout: "dyn"
-page_title: "Dyn: dyn_record"
-sidebar_current: "docs-dyn-resource-record"
-description: |-
- Provides a Dyn DNS record resource.
----
-
-# dyn\_record
-
-Provides a Dyn DNS record resource.
-
-## Example Usage
-
-```hcl
-# Add a record to the domain
-resource "dyn_record" "foobar" {
- zone = "${var.dyn_zone}"
- name = "terraform"
- value = "192.168.0.11"
- type = "A"
- ttl = 3600
-}
-```
-
-## Argument Reference
-
-The following arguments are supported:
-
-* `name` - (Required) The name of the record.
-* `type` - (Required) The type of the record.
-* `value` - (Required) The value of the record.
-* `zone` - (Required) The DNS zone to add the record to.
-* `ttl` - (Optional) The TTL of the record. Default uses the zone default.
-
-## Attributes Reference
-
-The following attributes are exported:
-
-* `id` - The record ID.
-* `fqdn` - The FQDN of the record, built from the `name` and the `zone`.
diff --git a/website/source/docs/providers/external/data_source.html.md b/website/source/docs/providers/external/data_source.html.md
deleted file mode 100644
index 53b2271df..000000000
--- a/website/source/docs/providers/external/data_source.html.md
+++ /dev/null
@@ -1,116 +0,0 @@
----
-layout: "external"
-page_title: "External Data Source"
-sidebar_current: "docs-external-data-source"
-description: |-
- Executes an external program that implements a data source.
----
-
-# External Data Source
-
-The `external` data source allows an external program implementing a specific
-protocol (defined below) to act as a data source, exposing arbitrary
-data for use elsewhere in the Terraform configuration.
-
-~> **Warning** This mechanism is provided as an "escape hatch" for exceptional
-situations where a first-class Terraform provider is not more appropriate.
-Its capabilities are limited in comparison to a true data source, and
-implementing a data source via an external program is likely to hurt the
-portability of your Terraform configuration by creating dependencies on
-external programs and libraries that may not be available (or may need to
-be used differently) on different operating systems.
-
-~> **Warning** Terraform Enterprise does not guarantee availability of any
-particular language runtimes or external programs beyond standard shell
-utilities, so it is not recommended to use this data source within
-configurations that are applied within Terraform Enterprise.
-
-## Example Usage
-
-```hcl
-data "external" "example" {
- program = ["python", "${path.module}/example-data-source.py"]
-
- query = {
- # arbitrary map from strings to strings, passed
- # to the external program as the data query.
- id = "abc123"
- }
-}
-```
-
-## External Program Protocol
-
-The external program described by the `program` attribute must implement a
-specific protocol for interacting with Terraform, as follows.
-
-The program must read all of the data passed to it on `stdin`, and parse
-it as a JSON object. The JSON object contains the contents of the `query`
-argument and its values will always be strings.
-
-The program must then produce a valid JSON object on `stdout`, which will
-be used to populate the `result` attribute exported to the rest of the
-Terraform configuration. This JSON object must again have all of its
-values as strings. On successful completion it must exit with status zero.
-
-If the program encounters an error and is unable to produce a result, it
-must print a human-readable error message (ideally a single line) to `stderr`
-and exit with a non-zero status. Any data on `stdout` is ignored if the
-program returns a non-zero status.
-
-All environment variables visible to the Terraform process are passed through
-to the child program.
-
-Terraform expects a data source to have *no observable side-effects*, and will
-re-run the program each time the state is refreshed.
-
-## Argument Reference
-
-The following arguments are supported:
-
-* `program` - (Required) A list of strings, whose first element is the program
- to run and whose subsequent elements are optional command line arguments
- to the program. Terraform does not execute the program through a shell, so
- it is not necessary to escape shell metacharacters nor add quotes around
- arguments containing spaces.
-
-* `query` - (Optional) A map of string values to pass to the external program
- as the query arguments. If not supplied, the program will receive an empty
- object as its input.
-
-## Attributes Reference
-
-The following attributes are exported:
-
-* `result` - A map of string values returned from the external program.
-
-## Processing JSON in shell scripts
-
-Since the external data source protocol uses JSON, it is recommended to use
-the utility [`jq`](https://stedolan.github.io/jq/) to translate to and from
-JSON in a robust way when implementing a data source in a shell scripting
-language.
-
-The following example shows some input/output boilerplate code for a
-data source implemented in bash:
-
-```bash
-#!/bin/bash
-
-# Exit if any of the intermediate steps fail
-set -e
-
-# Extract "foo" and "baz" arguments from the input into
-# FOO and BAZ shell variables.
-# jq will ensure that the values are properly quoted
-# and escaped for consumption by the shell.
-eval "$(jq -r '@sh "FOO=\(.foo) BAZ=\(.baz)"')"
-
-# Placeholder for whatever data-fetching logic your script implements
-FOOBAZ="$FOO BAZ"
-
-# Safely produce a JSON object containing the result value.
-# jq will ensure that the value is properly quoted
-# and escaped to produce a valid JSON string.
-jq -n --arg foobaz "$FOOBAZ" '{"foobaz":$foobaz}'
-```
diff --git a/website/source/docs/providers/external/index.html.markdown b/website/source/docs/providers/external/index.html.markdown
deleted file mode 100644
index cd7ee01d9..000000000
--- a/website/source/docs/providers/external/index.html.markdown
+++ /dev/null
@@ -1,27 +0,0 @@
----
-layout: "external"
-page_title: "Provider: External"
-sidebar_current: "docs-external-index"
-description: |-
- The external provider allows external scripts to be integrated with Terraform.
----
-
-# External Provider
-
-`external` is a special provider that exists to provide an interface
-between Terraform and external programs.
-
-Using this provider it is possible to write separate programs that can
-participate in the Terraform workflow by implementing a specific protocol.
-
-This provider is intended to be used for simple situations where you wish
-to integrate Terraform with a system for which a first-class provider
-doesn't exist. It is not as powerful as a first-class Terraform provider,
-so users of this interface should carefully consider the implications
-described on each of the child documentation pages (available from the
-navigation bar) for each type of object this provider supports.
-
-~> **Warning** Terraform Enterprise does not guarantee availability of any
-particular language runtimes or external programs beyond standard shell
-utilities, so it is not recommended to use this provider within configurations
-that are applied within Terraform Enterprise.
diff --git a/website/source/docs/providers/fastly/d/ip_ranges.html.markdown b/website/source/docs/providers/fastly/d/ip_ranges.html.markdown
deleted file mode 100644
index adc50a9f9..000000000
--- a/website/source/docs/providers/fastly/d/ip_ranges.html.markdown
+++ /dev/null
@@ -1,34 +0,0 @@
----
-layout: "fastly"
-page_title: "Fastly: fastly_ip_ranges"
-sidebar_current: "docs-fastly-datasource-ip_ranges"
-description: |-
- Get information on Fastly IP ranges.
----
-
-# fastly_ip_ranges
-
-Use this data source to get the [IP ranges][1] of Fastly edge nodes.
-
-## Example Usage
-
-```hcl
-data "fastly_ip_ranges" "fastly" {}
-
-resource "aws_security_group" "from_fastly" {
- name = "from_fastly"
-
- ingress {
- from_port = "443"
- to_port = "443"
- protocol = "tcp"
- cidr_blocks = ["${data.fastly_ip_ranges.fastly.cidr_blocks}"]
- }
-}
-```
-
-## Attributes Reference
-
-* `cidr_blocks` - The lexically ordered list of CIDR blocks.
-
-[1]: https://docs.fastly.com/guides/securing-communications/accessing-fastlys-ip-ranges
diff --git a/website/source/docs/providers/fastly/index.html.markdown b/website/source/docs/providers/fastly/index.html.markdown
deleted file mode 100644
index e530dd3a5..000000000
--- a/website/source/docs/providers/fastly/index.html.markdown
+++ /dev/null
@@ -1,88 +0,0 @@
----
-layout: "fastly"
-page_title: "Provider: Fastly"
-sidebar_current: "docs-fastly-index"
-description: |-
- Fastly
----
-
-# Fastly Provider
-
-The Fastly provider is used to interact with the content delivery network (CDN)
-provided by Fastly.
-
-In order to use this Provider, you must have an active account with Fastly.
-Pricing and signup information can be found at https://www.fastly.com/signup
-
-Use the navigation to the left to read about the available resources.
-
-## Example Usage
-
-```hcl
-# Configure the Fastly Provider
-provider "fastly" {
- api_key = "test"
-}
-
-# Create a Service
-resource "fastly_service_v1" "myservice" {
- name = "myawesometestservice"
-
- # ...
-}
-```
-
-## Authentication
-
-The Fastly provider offers an API key based method of providing credentials for
-authentication. The following methods are supported, in this order, and
-explained below:
-
-- Static API key
-- Environment variables
-
-
-### Static API Key ###
-
-Static credentials can be provided by adding a `api_key` in-line in the
-Fastly provider block:
-
-Usage:
-
-```hcl
-provider "fastly" {
- api_key = "test"
-}
-
-resource "fastly_service_v1" "myservice" {
- # ...
-}
-```
-
-The API key for an account can be found on the Account page: https://app.fastly.com/#account
-
-###Environment variables
-
-You can provide your API key via `FASTLY_API_KEY` environment variable,
-representing your Fastly API key. When using this method, you may omit the
-Fastly `provider` block entirely:
-
-```hcl
-resource "fastly_service_v1" "myservice" {
- # ...
-}
-```
-
-Usage:
-
-```
-$ export FASTLY_API_KEY="afastlyapikey"
-$ terraform plan
-```
-
-## Argument Reference
-
-The following arguments are supported in the `provider` block:
-
-* `api_key` - (Optional) This is the API key. It must be provided, but
- it can also be sourced from the `FASTLY_API_KEY` environment variable
diff --git a/website/source/docs/providers/fastly/r/service_v1.html.markdown b/website/source/docs/providers/fastly/r/service_v1.html.markdown
deleted file mode 100644
index 2f0a7cb2e..000000000
--- a/website/source/docs/providers/fastly/r/service_v1.html.markdown
+++ /dev/null
@@ -1,390 +0,0 @@
----
-layout: "fastly"
-page_title: "Fastly: service_v1"
-sidebar_current: "docs-fastly-resource-service-v1"
-description: |-
- Provides an Fastly Service
----
-
-# fastly_service_v1
-
-Provides a Fastly Service, representing the configuration for a website, app,
-API, or anything else to be served through Fastly. A Service encompasses Domains
-and Backends.
-
-The Service resource requires a domain name that is correctly set up to direct
-traffic to the Fastly service. See Fastly's guide on [Adding CNAME Records][fastly-cname]
-on their documentation site for guidance.
-
-## Example Usage
-
-Basic usage:
-
-```hcl
-resource "fastly_service_v1" "demo" {
- name = "demofastly"
-
- domain {
- name = "demo.notexample.com"
- comment = "demo"
- }
-
- backend {
- address = "127.0.0.1"
- name = "localhost"
- port = 80
- }
-
- force_destroy = true
-}
-```
-
-Basic usage with an Amazon S3 Website and that removes the `x-amz-request-id` header:
-
-```hcl
-resource "fastly_service_v1" "demo" {
- name = "demofastly"
-
- domain {
- name = "demo.notexample.com"
- comment = "demo"
- }
-
- backend {
- address = "demo.notexample.com.s3-website-us-west-2.amazonaws.com"
- name = "AWS S3 hosting"
- port = 80
- }
-
- header {
- destination = "http.x-amz-request-id"
- type = "cache"
- action = "delete"
- name = "remove x-amz-request-id"
- }
-
- gzip {
- name = "file extensions and content types"
- extensions = ["css", "js"]
- content_types = ["text/html", "text/css"]
- }
-
- default_host = "${aws_s3_bucket.website.name}.s3-website-us-west-2.amazonaws.com"
-
- force_destroy = true
-}
-
-resource "aws_s3_bucket" "website" {
- bucket = "demo.notexample.com"
- acl = "public-read"
-
- website {
- index_document = "index.html"
- error_document = "error.html"
- }
-}
-```
-
-Basic usage with [custom
-VCL](https://docs.fastly.com/guides/vcl/uploading-custom-vcl) (must be
-enabled on your Fastly account):
-
-```hcl
-resource "fastly_service_v1" "demo" {
- name = "demofastly"
-
- domain {
- name = "demo.notexample.com"
- comment = "demo"
- }
-
- backend {
- address = "127.0.0.1"
- name = "localhost"
- port = 80
- }
-
- force_destroy = true
-
- vcl {
- name = "my_custom_main_vcl"
- content = "${file("${path.module}/my_custom_main.vcl")}"
- main = true
- }
-
- vcl {
- name = "my_custom_library_vcl"
- content = "${file("${path.module}/my_custom_library.vcl")}"
- }
-}
-```
-
--> **Note:** For an AWS S3 Bucket, the Backend address is
-`.s3-website-.amazonaws.com`. The `default_host` attribute
-should be set to `.s3-website-.amazonaws.com`. See the
-Fastly documentation on [Amazon S3][fastly-s3].
-
-## Argument Reference
-
-The following arguments are supported:
-
-* `name` - (Required) The unique name for the Service to create.
-* `domain` - (Required) A set of Domain names to serve as entry points for your
-Service. Defined below.
-* `backend` - (Optional) A set of Backends to service requests from your Domains.
-Defined below. Backends must be defined in this argument, or defined in the
-`vcl` argument below
-* `condition` - (Optional) A set of conditions to add logic to any basic
-configuration object in this service. Defined below.
-* `cache_setting` - (Optional) A set of Cache Settings, allowing you to override
-when an item is not to be cached based on an above `condition`. Defined below
-* `gzip` - (Required) A set of gzip rules to control automatic gzipping of
-content. Defined below.
-* `header` - (Optional) A set of Headers to manipulate for each request. Defined
-below.
-* `healthcheck` - (Optional) Automated healthchecks on the cache that can change how fastly interacts with the cache based on its health.
-* `default_host` - (Optional) The default hostname.
-* `default_ttl` - (Optional) The default Time-to-live (TTL) for
-requests.
-* `force_destroy` - (Optional) Services that are active cannot be destroyed. In
-order to destroy the Service, set `force_destroy` to `true`. Default `false`.
-* `request_setting` - (Optional) A set of Request modifiers. Defined below
-* `s3logging` - (Optional) A set of S3 Buckets to send streaming logs too.
-Defined below.
-* `papertrail` - (Optional) A Papertrail endpoint to send streaming logs too.
-Defined below.
-* `sumologic` - (Optional) A Sumologic endpoint to send streaming logs too.
-Defined below.
-* `gcslogging` - (Optional) A gcs endpoint to send streaming logs too.
-Defined below.
-* `response_object` - (Optional) Allows you to create synthetic responses that exist entirely on the varnish machine. Useful for creating error or maintenance pages that exists outside the scope of your datacenter. Best when used with Condition objects.
-* `vcl` - (Optional) A set of custom VCL configuration blocks. The
-ability to upload custom VCL code is not enabled by default for new Fastly
-accounts (see the [Fastly documentation](https://docs.fastly.com/guides/vcl/uploading-custom-vcl) for details).
-
-The `domain` block supports:
-
-* `name` - (Required) The domain to which this Service will respond.
-* `comment` - (Optional) An optional comment about the Domain.
-
-The `backend` block supports:
-
-* `name` - (Required, string) Name for this Backend. Must be unique to this Service.
-* `address` - (Required, string) An IPv4, hostname, or IPv6 address for the Backend.
-* `auto_loadbalance` - (Optional, boolean) Denotes if this Backend should be
-included in the pool of backends that requests are load balanced against.
-Default `true`.
-* `between_bytes_timeout` - (Optional) How long to wait between bytes in milliseconds. Default `10000`.
-* `connect_timeout` - (Optional) How long to wait for a timeout in milliseconds.
-Default `1000`
-* `error_threshold` - (Optional) Number of errors to allow before the Backend is marked as down. Default `0`.
-* `first_byte_timeout` - (Optional) How long to wait for the first bytes in milliseconds. Default `15000`.
-* `max_conn` - (Optional) Maximum number of connections for this Backend.
-Default `200`.
-* `port` - (Optional) The port number on which the Backend responds. Default `80`.
-* `request_condition` - (Optional, string) Name of already defined `condition`, which if met, will select this backend during a request.
-* `ssl_check_cert` - (Optional) Be strict about checking SSL certs. Default `true`.
-* `ssl_hostname` - (Optional, deprecated by Fastly) Used for both SNI during the TLS handshake and to validate the cert.
-* `ssl_cert_hostname` - (Optional) Overrides ssl_hostname, but only for cert verification. Does not affect SNI at all.
-* `ssl_sni_hostname` - (Optional) Overrides ssl_hostname, but only for SNI in the handshake. Does not affect cert validation at all.
-* `shield` - (Optional) The POP of the shield designated to reduce inbound load.
-* `weight` - (Optional) The [portion of traffic](https://docs.fastly.com/guides/performance-tuning/load-balancing-configuration.html#how-weight-affects-load-balancing) to send to this Backend. Each Backend receives `weight / total` of the traffic. Default `100`.
-
-The `condition` block supports allows you to add logic to any basic configuration
-object in a service. See Fastly's documentation
-["About Conditions"](https://docs.fastly.com/guides/conditions/about-conditions)
-for more detailed information on using Conditions. The Condition `name` can be
-used in the `request_condition`, `response_condition`, or
-`cache_condition` attributes of other block settings.
-
-* `name` - (Required) The unique name for the condition.
-* `statement` - (Required) The statement used to determine if the condition is met.
-* `priority` - (Required) A number used to determine the order in which multiple
-conditions execute. Lower numbers execute first.
-* `type` - (Required) Type of condition, either `REQUEST` (req), `RESPONSE`
-(req, resp), or `CACHE` (req, beresp).
-
-The `cache_setting` block supports:
-
-* `name` - (Required) Unique name for this Cache Setting.
-* `action` - (Required) One of `cache`, `pass`, or `restart`, as defined
-on Fastly's documentation under ["Caching action descriptions"](https://docs.fastly.com/guides/performance-tuning/controlling-caching#caching-action-descriptions).
-* `cache_condition` - (Optional) Name of already defined `condition` used to test whether this settings object should be used. This `condition` must be of type `CACHE`.
-* `stale_ttl` - (Optional) Max "Time To Live" for stale (unreachable) objects.
-Default `300`.
-* `ttl` - (Optional) The Time-To-Live (TTL) for the object.
-
-The `gzip` block supports:
-
-* `name` - (Required) A unique name.
-* `content_types` - (Optional) The content-type for each type of content you wish to
-have dynamically gzip'ed. Example: `["text/html", "text/css"]`.
-* `extensions` - (Optional) File extensions for each file type to dynamically
-gzip. Example: `["css", "js"]`.
-* `cache_condition` - (Optional) Name of already defined `condition` controlling when this gzip configuration applies. This `condition` must be of type `CACHE`. For detailed information about Conditionals,
-see [Fastly's Documentation on Conditionals][fastly-conditionals].
-
-
-The `Header` block supports adding, removing, or modifying Request and Response
-headers. See Fastly's documentation on
-[Adding or modifying headers on HTTP requests and responses](https://docs.fastly.com/guides/basic-configuration/adding-or-modifying-headers-on-http-requests-and-responses#field-description-table) for more detailed information on any of the properties below.
-
-* `name` - (Required) Unique name for this header attribute.
-* `action` - (Required) The Header manipulation action to take; must be one of
-`set`, `append`, `delete`, `regex`, or `regex_repeat`.
-* `type` - (Required) The Request type on which to apply the selected Action; must be one of `request`, `fetch`, `cache` or `response`.
-* `destination` - (Required) The name of the header that is going to be affected by the Action.
-* `ignore_if_set` - (Optional) Do not add the header if it is already present. (Only applies to the `set` action.). Default `false`.
-* `source` - (Optional) Variable to be used as a source for the header
-content. (Does not apply to the `delete` action.)
-* `regex` - (Optional) Regular expression to use (Only applies to the `regex` and `regex_repeat` actions.)
-* `substitution` - (Optional) Value to substitute in place of regular expression. (Only applies to the `regex` and `regex_repeat` actions.)
-* `priority` - (Optional) Lower priorities execute first. Default: `100`.
-* `request_condition` - (Optional) Name of already defined `condition` to apply. This `condition` must be of type `REQUEST`.
-* `cache_condition` - (Optional) Name of already defined `condition` to apply. This `condition` must be of type `CACHE`.
-* `response_condition` - (Optional) Name of already defined `condition` to apply. This `condition` must be of type `RESPONSE`. For detailed information about Conditionals,
-see [Fastly's Documentation on Conditionals][fastly-conditionals].
-
-The `healthcheck` block supports:
-
-* `name` - (Required) A unique name to identify this Healthcheck.
-* `host` - (Required) Address of the host to check.
-* `path` - (Required) The path to check.
-* `check_interval` - (Optional) How often to run the Healthcheck in milliseconds. Default `5000`.
-* `expected_response` - (Optional) The status code expected from the host. Default `200`.
-* `http_version` - (Optional) Whether to use version 1.0 or 1.1 HTTP. Default `1.1`.
-* `initial` - (Optional) When loading a config, the initial number of probes to be seen as OK. Default `2`.
-* `method` - (Optional) Which HTTP method to use. Default `HEAD`.
-* `threshold` - (Optional) How many Healthchecks must succeed to be considered healthy. Default `3`.
-* `timeout` - (Optional) Timeout in milliseconds. Default `500`.
-* `window` - (Optional) The number of most recent Healthcheck queries to keep for this Healthcheck. Default `5`.
-
-The `request_setting` block allow you to customize Fastly's request handling, by
-defining behavior that should change based on a predefined `condition`:
-
-* `name` - (Required) The domain for this request setting.
-* `request_condition` - (Optional) Name of already defined `condition` to
-determine if this request setting should be applied.
-* `max_stale_age` - (Optional) How old an object is allowed to be to serve
-`stale-if-error` or `stale-while-revalidate`, in seconds. Default `60`.
-* `force_miss` - (Optional) Force a cache miss for the request. If specified,
-can be `true` or `false`.
-* `force_ssl` - (Optional) Forces the request to use SSL (Redirects a non-SSL request to SSL).
-* `action` - (Optional) Allows you to terminate request handling and immediately
-perform an action. When set it can be `lookup` or `pass` (Ignore the cache completely).
-* `bypass_busy_wait` - (Optional) Disable collapsed forwarding, so you don't wait
-for other objects to origin.
-* `hash_keys` - (Optional) Comma separated list of varnish request object fields
-that should be in the hash key.
-* `xff` - (Optional) X-Forwarded-For, should be `clear`, `leave`, `append`,
-`append_all`, or `overwrite`. Default `append`.
-* `timer_support` - (Optional) Injects the X-Timer info into the request for
-viewing origin fetch durations.
-* `geo_headers` - (Optional) Injects Fastly-Geo-Country, Fastly-Geo-City, and
-Fastly-Geo-Region into the request headers.
-* `default_host` - (Optional) Sets the host header.
-
-The `s3logging` block supports:
-
-* `name` - (Required) A unique name to identify this S3 Logging Bucket.
-* `bucket_name` - (Optional) An optional comment about the Domain.
-* `s3_access_key` - (Required) AWS Access Key of an account with the required
-permissions to post logs. It is **strongly** recommended you create a separate
-IAM user with permissions to only operate on this Bucket. This key will be
-not be encrypted. You can provide this key via an environment variable, `FASTLY_S3_ACCESS_KEY`.
-* `s3_secret_key` - (Required) AWS Secret Key of an account with the required
-permissions to post logs. It is **strongly** recommended you create a separate
-IAM user with permissions to only operate on this Bucket. This secret will be
-not be encrypted. You can provide this secret via an environment variable, `FASTLY_S3_SECRET_KEY`.
-* `path` - (Optional) Path to store the files. Must end with a trailing slash.
-If this field is left empty, the files will be saved in the bucket's root path.
-* `domain` - (Optional) If you created the S3 bucket outside of `us-east-1`,
-then specify the corresponding bucket endpoint. Example: `s3-us-west-2.amazonaws.com`.
-* `period` - (Optional) How frequently the logs should be transferred, in
-seconds. Default `3600`.
-* `gzip_level` - (Optional) Level of GZIP compression, from `0-9`. `0` is no
-compression. `1` is fastest and least compressed, `9` is slowest and most
-compressed. Default `0`.
-* `format` - (Optional) Apache-style string or VCL variables to use for log formatting. Defaults to Apache Common Log format (`%h %l %u %t %r %>s`)
-* `timestamp_format` - (Optional) `strftime` specified timestamp formatting (default `%Y-%m-%dT%H:%M:%S.000`).
-* `response_condition` - (Optional) Name of already defined `condition` to apply. This `condition` must be of type `RESPONSE`. For detailed information about Conditionals,
-see [Fastly's Documentation on Conditionals][fastly-conditionals].
-
-The `papertrail` block supports:
-
-* `name` - (Required) A unique name to identify this Papertrail endpoint.
-* `address` - (Required) The address of the Papertrail endpoint.
-* `port` - (Required) The port associated with the address where the Papertrail endpoint can be accessed.
-* `format` - (Optional) Apache-style string or VCL variables to use for log formatting. Defaults to Apache Common Log format (`%h %l %u %t %r %>s`)
-* `response_condition` - (Optional) Name of already defined `condition` to apply. This `condition` must be of type `RESPONSE`. For detailed information about Conditionals,
-see [Fastly's Documentation on Conditionals][fastly-conditionals].
-
-The `sumologic` block supports:
-
-* `name` - (Required) A unique name to identify this Sumologic endpoint.
-* `url` - (Required) The URL to Sumologic collector endpoint
-* `format` - (Optional) Apache-style string or VCL variables to use for log formatting. Defaults to Apache Common Log format (`%h %l %u %t %r %>s`)
-* `format_version` - (Optional) The version of the custom logging format used for the configured endpoint. Can be either 1 (the default, version 1 log format) or 2 (the version 2 log format).
-* `response_condition` - (Optional) Name of already defined `condition` to apply. This `condition` must be of type `RESPONSE`. For detailed information about Conditionals, see [Fastly's Documentation on Conditionals][fastly-conditionals].
-* `message_type` - (Optional) How the message should be formatted. One of: classic, loggly, logplex, blank. See [Fastly's Documentation on Sumologic][fastly-sumologic]
-
-The `gcslogging` block supports:
-
-* `name` - (Required) A unique name to identify this GCS endpoint.
-* `email` - (Required) The email address associated with the target GCS bucket on your account.
-* `bucket_name` - (Required) The name of the bucket in which to store the logs.
-* `secret_key` - (Required) The secret key associated with the target gcs bucket on your account.
-* `path` - (Optional) Path to store the files. Must end with a trailing slash.
-If this field is left empty, the files will be saved in the bucket's root path.
-* `period` - (Optional) How frequently the logs should be transferred, in
-seconds. Default `3600`.
-* `gzip_level` - (Optional) Level of GZIP compression, from `0-9`. `0` is no
-compression. `1` is fastest and least compressed, `9` is slowest and most
-compressed. Default `0`.
-* `format` - (Optional) Apache-style string or VCL variables to use for log formatting. Defaults to Apache Common Log format (`%h %l %u %t %r %>s`)
-* `response_condition` - (Optional) Name of already defined `condition` to apply. This `condition` must be of type `RESPONSE`. For detailed information about Conditionals, see [Fastly's Documentation on Conditionals][fastly-conditionals].
-
-The `response_object` block supports:
-
-* `name` - (Required) A unique name to identify this Response Object.
-* `status` - (Optional) The HTTP Status Code. Default `200`.
-* `response` - (Optional) The HTTP Response. Default `Ok`.
-* `content` - (Optional) The content to deliver for the response object.
-* `content_type` - (Optional) The MIME type of the content.
-* `request_condition` - (Optional) Name of already defined `condition` to be checked during the request phase. If the condition passes then this object will be delivered. This `condition` must be of type `REQUEST`.
-* `cache_condition` - (Optional) Name of already defined `condition` to check after we have retrieved an object. If the condition passes then deliver this Request Object instead. This `condition` must be of type `CACHE`. For detailed information about Conditionals,
-see [Fastly's Documentation on Conditionals][fastly-conditionals].
-
-
-The `vcl` block supports:
-
-* `name` - (Required) A unique name for this configuration block.
-* `content` - (Required) The custom VCL code to upload.
-* `main` - (Optional) If `true`, use this block as the main configuration. If
-`false`, use this block as an includable library. Only a single VCL block can be
-marked as the main block. Default is `false`.
-
-## Attributes Reference
-
-The following attributes are exported:
-
-* `id` - The ID of the Service.
-* `name` – Name of this service.
-* `active_version` - The currently active version of your Fastly
-Service.
-* `domain` – Set of Domains. See above for details.
-* `backend` – Set of Backends. See above for details.
-* `header` – Set of Headers. See above for details.
-* `s3logging` – Set of S3 Logging configurations. See above for details.
-* `papertrail` – Set of Papertrail configurations. See above for details.
-* `response_object` - Set of Response Object configurations. See above for details.
-* `vcl` – Set of custom VCL configurations. See above for details.
-* `default_host` – Default host specified.
-* `default_ttl` - Default TTL.
-* `force_destroy` - Force the destruction of the Service on delete.
-
-[fastly-s3]: https://docs.fastly.com/guides/integrations/amazon-s3
-[fastly-cname]: https://docs.fastly.com/guides/basic-setup/adding-cname-records
-[fastly-conditionals]: https://docs.fastly.com/guides/conditions/using-conditions
-[fastly-sumologic]: https://docs.fastly.com/api/logging#logging_sumologic
-[fastly-gcs]: https://docs.fastly.com/api/logging#logging_gcs
diff --git a/website/source/docs/providers/github/d/team.html.markdown b/website/source/docs/providers/github/d/team.html.markdown
deleted file mode 100644
index a8026adf9..000000000
--- a/website/source/docs/providers/github/d/team.html.markdown
+++ /dev/null
@@ -1,30 +0,0 @@
----
-layout: "github"
-page_title: "Github: github_team"
-sidebar_current: "docs-github-datasource-team"
-description: |-
- Get information on a Github team.
----
-
-# github\_team
-
-Use this data source to retrieve information about a Github team.
-
-## Example Usage
-
-```
-data "github_team" "example" {
- slug = "example"
-}
-```
-
-## Argument Reference
-
- * `slug` - (Required) The team slug.
-
-## Attributes Reference
-
- * `name` - the team's full name.
- * `description` - the team's description.
- * `privacy` - the team's privacy type.
- * `permission` - the team's permission level.
diff --git a/website/source/docs/providers/github/d/user.html.markdown b/website/source/docs/providers/github/d/user.html.markdown
deleted file mode 100644
index 48cff5866..000000000
--- a/website/source/docs/providers/github/d/user.html.markdown
+++ /dev/null
@@ -1,43 +0,0 @@
----
-layout: "github"
-page_title: "Github: github_user"
-sidebar_current: "docs-github-datasource-user"
-description: |-
- Get information on a Github user.
----
-
-# github\_user
-
-Use this data source to retrieve information about a Github user.
-
-## Example Usage
-
-```
-data "github_user" "example" {
- username = "example"
-}
-```
-
-## Argument Reference
-
- * `username` - (Required) The username.
-
-## Attributes Reference
-
- * `login` - the user's login.
- * `avatar_url` - the user's avatar URL.
- * `gravatar_id` - the user's gravatar ID.
- * `site_admin` - whether the user is a Github admin.
- * `name` - the user's full name.
- * `company` - the user's company name.
- * `blog` - the user's blog location.
- * `location` - the user's location.
- * `email` - the user's email.
- * `bio` - the user's bio.
- * `public_repos` - the number of public repositories.
- * `public_gists` - the number of public gists.
- * `followers` - the number of followers.
- * `following` - the number of following users.
- * `created_at` - the creation date.
- * `updated_at` - the update date.
-
diff --git a/website/source/docs/providers/github/index.html.markdown b/website/source/docs/providers/github/index.html.markdown
deleted file mode 100644
index f8f767e80..000000000
--- a/website/source/docs/providers/github/index.html.markdown
+++ /dev/null
@@ -1,46 +0,0 @@
----
-layout: "github"
-page_title: "Provider: GitHub"
-sidebar_current: "docs-github-index"
-description: |-
- The GitHub provider is used to interact with GitHub organization resources.
----
-
-# GitHub Provider
-
-The GitHub provider is used to interact with GitHub organization resources.
-
-The provider allows you to manage your GitHub organization's members and teams easily.
-It needs to be configured with the proper credentials before it can be used.
-
-Use the navigation to the left to read about the available resources.
-
-## Example Usage
-
-```hcl
-# Configure the GitHub Provider
-provider "github" {
- token = "${var.github_token}"
- organization = "${var.github_organization}"
-}
-
-# Add a user to the organization
-resource "github_membership" "membership_for_user_x" {
- # ...
-}
-```
-
-## Argument Reference
-
-The following arguments are supported in the `provider` block:
-
-* `token` - (Optional) This is the GitHub personal access token. It must be provided, but
- it can also be sourced from the `GITHUB_TOKEN` environment variable.
-
-* `organization` - (Optional) This is the target GitHub organization to manage. The account
- corresponding to the token will need "owner" privileges for this organization. It must be provided, but
- it can also be sourced from the `GITHUB_ORGANIZATION` environment variable.
-
-* `base_url` - (Optional) This is the target GitHub base API endpoint. Providing a value is a
- requirement when working with GitHub Enterprise. It is optional to provide this value and
- it can also be sourced from the `GITHUB_BASE_URL` environment variable. The value must end with a slash.
diff --git a/website/source/docs/providers/github/r/branch_protection.html.markdown b/website/source/docs/providers/github/r/branch_protection.html.markdown
deleted file mode 100644
index 8feead2cd..000000000
--- a/website/source/docs/providers/github/r/branch_protection.html.markdown
+++ /dev/null
@@ -1,80 +0,0 @@
----
-layout: "github"
-page_title: "GitHub: github_branch_protection"
-sidebar_current: "docs-github-resource-branch-protection"
-description: |-
- Protects a GitHub branch.
----
-
-# github\_branch\_protection
-
-Protects a GitHub branch.
-
-This resource allows you to configure branch protection for repositories in your organization. When applied, the branch will be protected from forced pushes and deletion. Additional constraints, such as required status checks or restrictions on users and teams, can also be configured.
-
-## Example Usage
-
-```
-# Protect the master branch of the foo repository. Additionally, require that
-# the "ci/travis" context to be passing and only allow the engineers team merge
-# to the branch.
-resource "github_branch_protection" "foo_master" {
- repository = "foo"
- branch = "master"
-
- required_status_checks {
- include_admins = true
- strict = false
- contexts = ["ci/travis"]
- }
-
- required_pull_request_reviews {
- include_admins = true
- }
-
- restrictions {
- teams = ["engineers"]
- }
-}
-```
-
-## Argument Reference
-
-The following arguments are supported:
-
-* `repository` - (Required) The GitHub repository name.
-* `branch` - (Required) The Git branch to protect.
-* `required_status_checks` - (Optional) Enforce restrictions for required status checks. See [Required Status Checks](#required-status-checks) below for details.
-* `required_pull_request_reviews` - (Optional) Enforce restrictions for pull request reviews. See [Required Pull Request Reviews](#required-pull-request-reviews) below for details.
-* `restrictions` - (Optional) Enforce restrictions for the users and teams that may push to the branch. See [Restrictions](#restrictions) below for details.
-
-### Required Status Checks
-
-`required_status_checks` supports the following arguments:
-
-* `include_admins`: (Optional) Enforce required status checks for repository administrators. Defaults to `false`.
-* `strict`: (Optional) Require branches to be up to date before merging. Defaults to `false`.
-* `contexts`: (Optional) The list of status checks to require in order to merge into this branch. No status checks are required by default.
-
-### Required Pull Request Reviews
-
-`required_pull_request_reviews` supports the following arguments:
-
-* `include_admins`: (Optional) Enforce required status checks for repository administrators. Defaults to `false`.
-
-### Restrictions
-
-`restrictions` supports the following arguments:
-
-* `users`: (Optional) The list of user logins with push access.
-* `teams`: (Optional) The list of team slugs with push access.
-
-`restrictions` is only available for organization-owned repositories.
-
-## Import
-
-Github Branch Protection can be imported using an id made up of `repository:branch`, e.g.
-
-```
-$ terraform import github_branch_protection.terraform terraform:master
-```
\ No newline at end of file
diff --git a/website/source/docs/providers/github/r/issue_label.html.markdown b/website/source/docs/providers/github/r/issue_label.html.markdown
deleted file mode 100644
index b94b82d79..000000000
--- a/website/source/docs/providers/github/r/issue_label.html.markdown
+++ /dev/null
@@ -1,51 +0,0 @@
----
-layout: "github"
-page_title: "GitHub: github_issue_label"
-sidebar_current: "docs-github-resource-issue-label"
-description: |-
- Provides a GitHub issue label resource.
----
-
-# github_issue_label
-
-Provides a GitHub issue label resource.
-
-This resource allows you to create and manage issue labels within your
-Github organization.
-
-Issue labels are keyed off of their "name", so pre-existing issue labels result
-in a 422 HTTP error if they exist outside of Terraform. Normally this would not
-be an issue, except new repositories are created with a "default" set of labels,
-and those labels easily conflict with custom ones.
-
-This resource will first check if the label exists, and then issue an update,
-otherwise it will create.
-
-## Example Usage
-
-```hcl
-# Create a new, red colored label
-resource "github_issue_label" "test_repo" {
- repository = "test-repo"
- name = "Urgent"
- color = "FF0000"
-}
-```
-
-## Argument Reference
-
-The following arguments are supported:
-
-* `repository` - (Required) The GitHub repository
-
-* `name` - (Required) The name of the label.
-
-* `color` - (Required) A 6 character hex code, **without the leading #**, identifying the color of the label.
-
-## Import
-
-Github Issue Labels can be imported using an id made up of `repository:name`, e.g.
-
-```
-$ terraform import github_issue_label.panic_label terraform:panic
-```
\ No newline at end of file
diff --git a/website/source/docs/providers/github/r/membership.html.markdown b/website/source/docs/providers/github/r/membership.html.markdown
deleted file mode 100644
index 3c04b6fb9..000000000
--- a/website/source/docs/providers/github/r/membership.html.markdown
+++ /dev/null
@@ -1,42 +0,0 @@
----
-layout: "github"
-page_title: "GitHub: github_membership"
-sidebar_current: "docs-github-resource-membership"
-description: |-
- Provides a GitHub membership resource.
----
-
-# github_membership
-
-Provides a GitHub membership resource.
-
-This resource allows you to add/remove users from your organization. When applied,
-an invitation will be sent to the user to become part of the organization. When
-destroyed, either the invitation will be cancelled or the user will be removed.
-
-## Example Usage
-
-```hcl
-# Add a user to the organization
-resource "github_membership" "membership_for_some_user" {
- username = "SomeUser"
- role = "member"
-}
-```
-
-## Argument Reference
-
-The following arguments are supported:
-
-* `username` - (Required) The user to add to the organization.
-* `role` - (Optional) The role of the user within the organization.
- Must be one of `member` or `admin`. Defaults to `member`.
-
-
-## Import
-
-Github Membership can be imported using an id made up of `organization:username`, e.g.
-
-```
-$ terraform import github_membership.member hashicorp:someuser
-```
\ No newline at end of file
diff --git a/website/source/docs/providers/github/r/organization_webhook.html.markdown b/website/source/docs/providers/github/r/organization_webhook.html.markdown
deleted file mode 100644
index ccd6e374e..000000000
--- a/website/source/docs/providers/github/r/organization_webhook.html.markdown
+++ /dev/null
@@ -1,47 +0,0 @@
----
-layout: "github"
-page_title: "GitHub: github_organization_webhook"
-sidebar_current: "docs-github-resource-organization-webhook"
-description: |-
- Creates and manages webhooks for Github organizations
----
-
-# github_organization_webhook
-
-This resource allows you to create and manage webhooks for Github organization.
-
-## Example Usage
-
-```hcl
-resource "github_organization_webhook" "foo" {
- name = "web"
-
- configuration {
- url = "https://google.de/"
- content_type = "form"
- insecure_ssl = false
- }
-
- active = false
-
- events = ["issues"]
-}
-```
-
-## Argument Reference
-
-The following arguments are supported:
-
-* `name` - (Required) The type of the webhook. See a list of [available hooks](https://api.github.com/hooks).
-
-* `events` - (Required) A list of events which should trigger the webhook. Defaults to `["push"]`. See a list of [available events](https://developer.github.com/v3/activity/events/types/)
-
-* `config` - (Required) key/value pair of configuration for this webhook. Available keys are `url`, `content_type`, `secret` and `insecure_ssl`.
-
-* `active` - (Optional) Indicate of the webhook should receive events. Defaults to `true`.
-
-## Attributes Reference
-
-The following additional attributes are exported:
-
-* `url` - URL of the webhook
diff --git a/website/source/docs/providers/github/r/repository.html.markdown b/website/source/docs/providers/github/r/repository.html.markdown
deleted file mode 100644
index a2b50c828..000000000
--- a/website/source/docs/providers/github/r/repository.html.markdown
+++ /dev/null
@@ -1,80 +0,0 @@
----
-layout: "github"
-page_title: "GitHub: github_repository"
-sidebar_current: "docs-github-resource-repository"
-description: |-
- Creates and manages repositories within Github organizations
----
-
-# github_repository
-
-This resource allows you to create and manage repositories within your
-Github organization.
-
-This resource cannot currently be used to manage *personal* repositories,
-outside of organizations.
-
-## Example Usage
-
-```hcl
-resource "github_repository" "example" {
- name = "example"
- description = "My awesome codebase"
-
- private = true
-}
-```
-
-## Argument Reference
-
-The following arguments are supported:
-
-* `name` - (Required) The name of the repository.
-
-* `description` - (Optional) A description of the repository.
-
-* `homepage_url` - (Optional) URL of a page describing the project.
-
-* `private` - (Optional) Set to `true` to create a private repository.
- Repositories are created as public (e.g. open source) by default.
-
-* `has_issues` - (Optional) Set to `true` to enable the Github Issues features
- on the repository.
-
-* `has_wiki` - (Optional) Set to `true` to enable the Github Wiki features on
- the repository.
-
-* `has_downloads` - (Optional) Set to `true` to enable the (deprecated)
- downloads features on the repository.
-
-* `auto_init` - (Optional) Meaningful only during create; set to `true` to
- produce an initial commit in the repository.
-
-## Attributes Reference
-
-The following additional attributes are exported:
-
-* `full_name` - A string of the form "orgname/reponame".
-
-* `default_branch` - The name of the repository's default branch.
-
-* `ssh_clone_url` - URL that can be provided to `git clone` to clone the
- repository via SSH.
-
-* `http_clone_url` - URL that can be provided to `git clone` to clone the
- repository via HTTPS.
-
-* `git_clone_url` - URL that can be provided to `git clone` to clone the
- repository anonymously via the git protocol.
-
-* `svn_url` - URL that can be provided to `svn checkout` to check out
- the repository via Github's Subversion protocol emulation.
-
-
-## Import
-
-Repositories can be imported using the `name`, e.g.
-
-```
-$ terraform import github_repository.terraform terraform
-```
diff --git a/website/source/docs/providers/github/r/repository_collaborator.html.markdown b/website/source/docs/providers/github/r/repository_collaborator.html.markdown
deleted file mode 100644
index ad1c5386d..000000000
--- a/website/source/docs/providers/github/r/repository_collaborator.html.markdown
+++ /dev/null
@@ -1,54 +0,0 @@
----
-layout: "github"
-page_title: "GitHub: github_repository_collaborator"
-sidebar_current: "docs-github-resource-repository-collaborator"
-description: |-
- Provides a GitHub repository collaborator resource.
----
-
-# github_repository_collaborator
-
-Provides a GitHub repository collaborator resource.
-
-This resource allows you to add/remove collaborators from repositories in your
-organization. Collaborators can have explicit (and differing levels of) read,
-write, or administrator access to specific repositories in your organization,
-without giving the user full organization membership.
-
-When applied, an invitation will be sent to the user to become a collaborator
-on a repository. When destroyed, either the invitation will be cancelled or the
-collaborator will be removed from the repository.
-
-Further documentation on GitHub collaborators:
-
-- [Adding outside collaborators to repositories in your organization](https://help.github.com/articles/adding-outside-collaborators-to-repositories-in-your-organization/)
-- [Converting an organization member to an outside collaborator](https://help.github.com/articles/converting-an-organization-member-to-an-outside-collaborator/)
-
-## Example Usage
-
-```hcl
-# Add a collaborator to a repository
-resource "github_repository_collaborator" "a_repo_collaborator" {
- repository = "our-cool-repo"
- username = "SomeUser"
- permission = "admin"
-}
-```
-
-## Argument Reference
-
-The following arguments are supported:
-
-* `repository` - (Required) The GitHub repository
-* `username` - (Required) The user to add to the repository as a collaborator.
-* `permission` - (Optional) The permission of the outside collaborator for the repository.
- Must be one of `pull`, `push`, or `admin`. Defaults to `push`.
-
-
-## Import
-
-Github Repository Collaborators can be imported using an id made up of `repository:username`, e.g.
-
-```
-$ terraform import github_repository_collaborator.collaborator terraform:someuser
-```
\ No newline at end of file
diff --git a/website/source/docs/providers/github/r/repository_webhook.html.markdown b/website/source/docs/providers/github/r/repository_webhook.html.markdown
deleted file mode 100644
index 76e84e18c..000000000
--- a/website/source/docs/providers/github/r/repository_webhook.html.markdown
+++ /dev/null
@@ -1,63 +0,0 @@
----
-layout: "github"
-page_title: "GitHub: github_repository_webhook"
-sidebar_current: "docs-github-resource-repository-webhook"
-description: |-
- Creates and manages repository webhooks within Github organizations
----
-
-# github_repository_webhook
-
-This resource allows you to create and manage webhooks for repositories within your
-Github organization.
-
-This resource cannot currently be used to manage webhooks for *personal* repositories,
-outside of organizations.
-
-## Example Usage
-
-```hcl
-resource "github_repository" "repo" {
- name = "foo"
- description = "Terraform acceptance tests"
- homepage_url = "http://example.com/"
-
- private = false
-}
-
-resource "github_repository_webhook" "foo" {
- repository = "${github_repository.repo.name}"
-
- name = "web"
-
- configuration {
- url = "https://google.de/"
- content_type = "form"
- insecure_ssl = false
- }
-
- active = false
-
- events = ["issues"]
-}
-```
-
-## Argument Reference
-
-The following arguments are supported:
-
-* `name` - (Required) The type of the webhook. See a list of [available hooks](https://api.github.com/hooks).
-
-* `repository` - (Required) The repository of the webhook.
-
-* `events` - (Required) A list of events which should trigger the webhook. Defaults to `["push"]`. See a list of [available events](https://developer.github.com/v3/activity/events/types/)
-
-* `config` - (Required) key/value pair of configuration for this webhook. Available keys are `url`, `content_type`, `secret` and `insecure_ssl`.
-
-* `active` - (Optional) Indicate of the webhook should receive events. Defaults to `true`.
-
-## Attributes Reference
-
-The following additional attributes are exported:
-
-* `url` - URL of the webhook
diff --git a/website/source/docs/providers/github/r/team.html.markdown b/website/source/docs/providers/github/r/team.html.markdown
deleted file mode 100644
index aa99e394e..000000000
--- a/website/source/docs/providers/github/r/team.html.markdown
+++ /dev/null
@@ -1,48 +0,0 @@
----
-layout: "github"
-page_title: "GitHub: github_team"
-sidebar_current: "docs-github-resource-team"
-description: |-
- Provides a GitHub team resource.
----
-
-# github_team
-
-Provides a GitHub team resource.
-
-This resource allows you to add/remove teams from your organization. When applied,
-a new team will be created. When destroyed, that team will be removed.
-
-## Example Usage
-
-```hcl
-# Add a team to the organization
-resource "github_team" "some_team" {
- name = "some-team"
- description = "Some cool team"
- privacy = "closed"
-}
-```
-
-## Argument Reference
-
-The following arguments are supported:
-
-* `name` - (Required) The name of the team.
-* `description` - (Optional) A description of the team.
-* `privacy` - (Optional) The level of privacy for the team. Must be one of `secret` or `closed`.
- Defaults to `secret`.
-
-## Attributes Reference
-
-The following attributes are exported:
-
-* `id` - The ID of the created team.
-
-## Import
-
-Github Teams can be imported using the github team Id e.g.
-
-```
-$ terraform import github_team.core 1234567
-```
\ No newline at end of file
diff --git a/website/source/docs/providers/github/r/team_membership.html.markdown b/website/source/docs/providers/github/r/team_membership.html.markdown
deleted file mode 100644
index 80bd0c4ab..000000000
--- a/website/source/docs/providers/github/r/team_membership.html.markdown
+++ /dev/null
@@ -1,54 +0,0 @@
----
-layout: "github"
-page_title: "GitHub: github_team_membership"
-sidebar_current: "docs-github-resource-team-membership"
-description: |-
- Provides a GitHub team membership resource.
----
-
-# github_team_membership
-
-Provides a GitHub team membership resource.
-
-This resource allows you to add/remove users from teams in your organization. When applied,
-the user will be added to the team. If the user hasn't accepted their invitation to the
-organization, they won't be part of the team until they do. When
-destroyed, the user will be removed from the team.
-
-## Example Usage
-
-```hcl
-# Add a user to the organization
-resource "github_membership" "membership_for_some_user" {
- username = "SomeUser"
- role = "member"
-}
-
-resource "github_team" "some_team" {
- name = "SomeTeam"
- description = "Some cool team"
-}
-
-resource "github_team_membership" "some_team_membership" {
- team_id = "${github_team.some_team.id}"
- username = "SomeUser"
- role = "member"
-}
-```
-
-## Argument Reference
-
-The following arguments are supported:
-
-* `team_id` - (Required) The GitHub team id
-* `username` - (Required) The user to add to the team.
-* `role` - (Optional) The role of the user within the team.
- Must be one of `member` or `maintainer`. Defaults to `member`.
-
-## Import
-
-Github Team Membership can be imported using an id made up of `teamid:username`, e.g.
-
-```
-$ terraform import github_team_membership.member 1234567:someuser
-```
\ No newline at end of file
diff --git a/website/source/docs/providers/github/r/team_repository.html.markdown b/website/source/docs/providers/github/r/team_repository.html.markdown
deleted file mode 100644
index 999529905..000000000
--- a/website/source/docs/providers/github/r/team_repository.html.markdown
+++ /dev/null
@@ -1,57 +0,0 @@
----
-layout: "github"
-page_title: "GitHub: github_team_repository"
-sidebar_current: "docs-github-resource-team-repository"
-description: |-
- Manages the associations between teams and repositories.
----
-
-# github_team_repository
-
-This resource manages relationships between teams and repositories
-in your Github organization.
-
-Creating this resource grants a particular team permissions on a
-particular repository.
-
-The repository and the team must both belong to the same organization
-on Github. This resource does not actually *create* any repositories;
-to do that, see [`github_repository`](repository.html).
-
-## Example Usage
-
-```hcl
-# Add a repository to the team
-resource "github_team" "some_team" {
- name = "SomeTeam"
- description = "Some cool team"
-}
-
-resource "github_repository" "some_repo" {
- name = "some-repo"
-}
-
-resource "github_team_repository" "some_team_repo" {
- team_id = "${github_team.some_team.id}"
- repository = "${github_repository.some_repo.name}"
- permission = "pull"
-}
-```
-
-## Argument Reference
-
-The following arguments are supported:
-
-* `team_id` - (Required) The GitHub team id
-* `repository` - (Required) The repository to add to the team.
-* `permission` - (Optional) The permissions of team members regarding the repository.
- Must be one of `pull`, `push`, or `admin`. Defaults to `pull`.
-
-
-## Import
-
-Github Team Membership can be imported using an id made up of `teamid:repository`, e.g.
-
-```
-$ terraform import github_team_repository.terraform_repo 1234567:terraform
-```
\ No newline at end of file
diff --git a/website/source/docs/providers/gitlab/index.html.markdown b/website/source/docs/providers/gitlab/index.html.markdown
deleted file mode 100644
index 543cf77a1..000000000
--- a/website/source/docs/providers/gitlab/index.html.markdown
+++ /dev/null
@@ -1,67 +0,0 @@
----
-layout: "gitlab"
-page_title: "Provider: GitLab"
-sidebar_current: "docs-gitlab-index"
-description: |-
- The GitLab provider is used to interact with GitLab group or user resources.
----
-
-# GitLab Provider
-
-The GitLab provider is used to interact with GitLab group or user resources.
-
-It needs to be configured with the proper credentials before it can be used.
-
-Use the navigation to the left to read about the available resources.
-
-## Example Usage
-
-```hcl
-# Configure the GitLab Provider
-provider "gitlab" {
- token = "${var.gitlab_token}"
-}
-
-# Add a project owned by the user
-resource "gitlab_project" "sample_project" {
- name = "example"
-}
-
-# Add a hook to the project
-resource "gitlab_project_hook" "sample_project_hook" {
- project = "${gitlab_project.sample_project.id}"
- url = "https://example.com/project_hook"
-}
-
-# Add a deploy key to the project
-resource "gitlab_deploy_key" "sample_deploy_key" {
- project = "${gitlab_project.sample_project.id}"
- title = "terraform example"
- key = "ssh-rsa AAAA..."
-}
-
-# Add a group
-resource "gitlab_group" "sample_group" {
- name = "example"
- path = "example"
- description = "An example group"
-}
-
-# Add a project to the group - example/example
-resource "gitlab_project" "sample_group_project" {
- name = "example"
- namespace_id = "${gitlab_group.sample_group.id}"
-}
-```
-
-## Argument Reference
-
-The following arguments are supported in the `provider` block:
-
-* `token` - (Optional) This is the GitLab personal access token. It must be provided, but
- it can also be sourced from the `GITLAB_TOKEN` environment variable.
-
-* `base_url` - (Optional) This is the target GitLab base API endpoint. Providing a value is a
- requirement when working with GitLab CE or GitLab Enterprise e.g. https://my.gitlab.server/api/v3/.
- It is optional to provide this value and it can also be sourced from the `GITLAB_BASE_URL` environment variable.
- The value must end with a slash.
diff --git a/website/source/docs/providers/gitlab/r/deploy_key.html.markdown b/website/source/docs/providers/gitlab/r/deploy_key.html.markdown
deleted file mode 100644
index 9868ced2e..000000000
--- a/website/source/docs/providers/gitlab/r/deploy_key.html.markdown
+++ /dev/null
@@ -1,34 +0,0 @@
----
-layout: "gitlab"
-page_title: "GitLab: gitlab_deploy_key"
-sidebar_current: "docs-gitlab-resource-deploy_key"
-description: |-
- Creates and manages deploy keys for GitLab projects
----
-
-# gitlab\_deploy\_key
-
-This resource allows you to create and manage deploy keys for your GitLab projects.
-
-
-## Example Usage
-
-```hcl
-resource "gitlab_deploy_key" "example" {
- project = "example/deploying"
- title = "Example deploy key"
- key = "ssh-rsa AAAA..."
-}
-```
-
-## Argument Reference
-
-The following arguments are supported:
-
-* `project` - (Required, string) The name or id of the project to add the deploy key to.
-
-* `title` - (Required, string) A title to describe the deploy key with.
-
-* `key` - (Required, string) The public ssh key body.
-
-* `can_push` - (Optional, boolean) Allow this deploy key to be used to push changes to the project. Defaults to `false`. **NOTE::** this cannot currently be managed.
diff --git a/website/source/docs/providers/gitlab/r/group.html.markdown b/website/source/docs/providers/gitlab/r/group.html.markdown
deleted file mode 100644
index 0fbe39d90..000000000
--- a/website/source/docs/providers/gitlab/r/group.html.markdown
+++ /dev/null
@@ -1,56 +0,0 @@
----
-layout: "gitlab"
-page_title: "GitLab: gitlab_group"
-sidebar_current: "docs-gitlab-resource-group"
-description: |-
- Creates and manages GitLab groups
----
-
-# gitlab\_group
-
-This resource allows you to create and manage GitLab groups.
-Note your provider will need to be configured with admin-level access for this resource to work.
-
-## Example Usage
-
-```hcl
-resource "gitlab_group" "example" {
- name = "example"
- path = "example"
- description = "An example group"
-}
-
-// Create a project in the example group
-resource "gitlab_project" "example" {
- name = "example"
- description = "An example project"
- namespace_id = "${gitlab_group.example.id}"
-}
-```
-
-## Argument Reference
-
-The following arguments are supported:
-
-* `name` - (Required) The name of this group.
-
-* `path` - (Required) The url of the hook to invoke.
-
-* `description` - (Optional) The description of the group.
-
-* `lfs_enabled` - (Optional) Boolean, defaults to true. Whether to enable LFS
-support for projects in this group.
-
-* `request_access_enabled` - (Optional) Boolean, defaults to false. Whether to
-enable users to request access to the group.
-
-* `visibility_level` - (Optional) Set to `public` to create a public group.
- Valid values are `private`, `internal`, `public`.
- Groups are created as private by default.
-
-## Attributes Reference
-
-The resource exports the following attributes:
-
-* `id` - The unique id assigned to the group by the GitLab server. Serves as a
- namespace id where one is needed.
diff --git a/website/source/docs/providers/gitlab/r/project.html.markdown b/website/source/docs/providers/gitlab/r/project.html.markdown
deleted file mode 100644
index a60084a41..000000000
--- a/website/source/docs/providers/gitlab/r/project.html.markdown
+++ /dev/null
@@ -1,63 +0,0 @@
----
-layout: "gitlab"
-page_title: "GitLab: gitlab_project"
-sidebar_current: "docs-gitlab-resource-project-x"
-description: |-
- Creates and manages projects within GitLab groups or within your user
----
-
-# gitlab\_project
-
-This resource allows you to create and manage projects within your
-GitLab group or within your user.
-
-
-## Example Usage
-
-```hcl
-resource "gitlab_project" "example" {
- name = "example"
- description = "My awesome codebase"
-
- visibility_level = "public"
-}
-```
-
-## Argument Reference
-
-The following arguments are supported:
-
-* `name` - (Required) The name of the project.
-
-* `namespace_id` - (Optional) The namespace (group or user) of the project. Defaults to your user.
- See [`gitlab_group`](group.html) for an example.
-
-* `description` - (Optional) A description of the project.
-
-* `default_branch` - (Optional) The default branch for the project.
-
-* `issues_enabled` - (Optional) Enable issue tracking for the project.
-
-* `merge_requests_enabled` - (Optional) Enable merge requests for the project.
-
-* `wiki_enabled` - (Optional) Enable wiki for the project.
-
-* `snippets_enabled` - (Optional) Enable snippets for the project.
-
-* `visibility_level` - (Optional) Set to `public` to create a public project.
- Valid values are `private`, `internal`, `public`.
- Repositories are created as private by default.
-
-## Attributes Reference
-
-The following additional attributes are exported:
-
-* `id` - Integer that uniquely identifies the project within the gitlab install.
-
-* `ssh_url_to_repo` - URL that can be provided to `git clone` to clone the
- repository via SSH.
-
-* `http_url_to_repo` - URL that can be provided to `git clone` to clone the
- repository via HTTP.
-
-* `web_url` - URL that can be used to find the project in a browser.
diff --git a/website/source/docs/providers/gitlab/r/project_hook.html.markdown b/website/source/docs/providers/gitlab/r/project_hook.html.markdown
deleted file mode 100644
index 20dffbdd2..000000000
--- a/website/source/docs/providers/gitlab/r/project_hook.html.markdown
+++ /dev/null
@@ -1,59 +0,0 @@
----
-layout: "gitlab"
-page_title: "GitLab: gitlab_project_hook"
-sidebar_current: "docs-gitlab-resource-project-hook"
-description: |-
- Creates and manages hooks for GitLab projects
----
-
-# gitlab\_project\_hook
-
-This resource allows you to create and manage hooks for your GitLab projects.
-For further information on hooks, consult the [gitlab
-documentation](https://docs.gitlab.com/ce/user/project/integrations/webhooks.html).
-
-
-## Example Usage
-
-```hcl
-resource "gitlab_project_hook" "example" {
- project = "example/hooked"
- url = "https://example.com/hook/example"
- merge_requests_events = true
-}
-```
-
-## Argument Reference
-
-The following arguments are supported:
-
-* `project` - (Required) The name or id of the project to add the hook to.
-
-* `url` - (Required) The url of the hook to invoke.
-
-* `token` - (Optional) A token to present when invoking the hook.
-
-* `enable_ssl_verification` - (Optional) Enable ssl verification when invoking
-the hook.
-
-* `push_events` - (Optional) Invoke the hook for push events.
-
-* `issues_events` - (Optional) Invoke the hook for issues events.
-
-* `merge_requests_events` - (Optional) Invoke the hook for merge requests.
-
-* `tag_push_events` - (Optional) Invoke the hook for tag push events.
-
-* `note_events` - (Optional) Invoke the hook for tag push events.
-
-* `build_events` - (Optional) Invoke the hook for build events.
-
-* `pipeline_events` - (Optional) Invoke the hook for pipeline events.
-
-* `wiki_page_events` - (Optional) Invoke the hook for wiki page events.
-
-## Attributes Reference
-
-The resource exports the following attributes:
-
-* `id` - The unique id assigned to the hook by the GitLab server.
diff --git a/website/source/docs/providers/google/d/datasource_compute_network.html.markdown b/website/source/docs/providers/google/d/datasource_compute_network.html.markdown
deleted file mode 100644
index 8e09f33c3..000000000
--- a/website/source/docs/providers/google/d/datasource_compute_network.html.markdown
+++ /dev/null
@@ -1,46 +0,0 @@
----
-layout: "google"
-page_title: "Google: google_compute_network"
-sidebar_current: "docs-google-datasource-compute-network"
-description: |-
- Get a network within GCE.
----
-
-# google\_compute\_network
-
-Get a network within GCE from its name.
-
-## Example Usage
-
-```tf
-data "google_compute_network" "my-network" {
- name = "default-us-east1"
-}
-```
-
-## Argument Reference
-
-The following arguments are supported:
-
-* `name` - (Required) The name of the network.
-
-
-- - -
-
-* `project` - (Optional) The project in which the resource belongs. If it
- is not provided, the provider project is used.
-
-## Attributes Reference
-
-In addition to the arguments listed above, the following attributes are exported:
-
-* `network` - The network name or resource link to the parent
- network of this network.
-
-* `description` - Description of this network.
-
-* `gateway_ipv4` - The IP address of the gateway.
-
-* `subnetworks_self_links` - the list of subnetworks which belong to the network
-
-* `self_link` - The URI of the resource.
diff --git a/website/source/docs/providers/google/d/datasource_compute_subnetwork.html.markdown b/website/source/docs/providers/google/d/datasource_compute_subnetwork.html.markdown
deleted file mode 100644
index ea53d3ef9..000000000
--- a/website/source/docs/providers/google/d/datasource_compute_subnetwork.html.markdown
+++ /dev/null
@@ -1,54 +0,0 @@
----
-layout: "google"
-page_title: "Google: google_compute_subnetwork"
-sidebar_current: "docs-google-datasource-compute-subnetwork"
-description: |-
- Get a subnetwork within GCE.
----
-
-# google\_compute\_subnetwork
-
-Get a subnetwork within GCE from its name and region.
-
-## Example Usage
-
-```tf
-data "google_compute_subnetwork" "my-subnetwork" {
- name = "default-us-east1"
- region = "us-east1"
-}
-```
-
-## Argument Reference
-
-The following arguments are supported:
-
-* `name` - The name of the subnetwork.
-
-- - -
-
-* `project` - (Optional) The project in which the resource belongs. If it
- is not provided, the provider project is used.
-
-* `region` - (Optional) The region this subnetwork has been created in. If
- unspecified, this defaults to the region configured in the provider.
-
-## Attributes Reference
-
-In addition to the arguments listed above, the following attributes are exported:
-
-* `network` - The network name or resource link to the parent
- network of this subnetwork.
-
-* `description` - Description of this subnetwork.
-
-* `ip_cidr_range` - The IP address range that machines in this
- network are assigned to, represented as a CIDR block.
-
-* `gateway_address` - The IP address of the gateway.
-
-* `private_ip_google_access` - Whether the VMs in this subnet
- can access Google services without assigned external IP
- addresses.
-
-* `self_link` - The URI of the created resource.
diff --git a/website/source/docs/providers/google/d/google_compute_zones.html.markdown b/website/source/docs/providers/google/d/google_compute_zones.html.markdown
deleted file mode 100644
index ec441f09a..000000000
--- a/website/source/docs/providers/google/d/google_compute_zones.html.markdown
+++ /dev/null
@@ -1,40 +0,0 @@
----
-layout: "google"
-page_title: "Google: google_compute_zones"
-sidebar_current: "docs-google-datasource-compute-zones"
-description: |-
- Provides a list of available Google Compute zones
----
-
-# google\_compute\_zones
-
-Provides access to available Google Compute zones in a region for a given project.
-See more about [regions and zones](https://cloud.google.com/compute/docs/regions-zones/regions-zones) in the upstream docs.
-
-```
-data "google_compute_zones" "available" {}
-
-resource "google_compute_instance_group_manager" "foo" {
- count = "${length(data.google_compute_zones.available.names)}"
-
- name = "terraform-test-${count.index}"
- instance_template = "${google_compute_instance_template.foobar.self_link}"
- base_instance_name = "foobar-${count.index}"
- zone = "${data.google_compute_zones.available.names[count.index]}"
- target_size = 1
-}
-```
-
-## Argument Reference
-
-The following arguments are supported:
-
-* `region` (Optional) - Region from which to list available zones. Defaults to region declared in the provider.
-* `status` (Optional) - Allows to filter list of zones based on their current status. Status can be either `UP` or `DOWN`.
- Defaults to no filtering (all available zones - both `UP` and `DOWN`).
-
-## Attributes Reference
-
-The following attribute is exported:
-
-* `names` - A list of zones available in the given region
diff --git a/website/source/docs/providers/google/d/google_container_engine_versions.html.markdown b/website/source/docs/providers/google/d/google_container_engine_versions.html.markdown
deleted file mode 100644
index 855e9bc9e..000000000
--- a/website/source/docs/providers/google/d/google_container_engine_versions.html.markdown
+++ /dev/null
@@ -1,46 +0,0 @@
----
-layout: "google"
-page_title: "Google: google_container_engine_versions"
-sidebar_current: "docs-google-datasource-container-versions"
-description: |-
- Provides lists of available Google Container Engine versions for masters and nodes.
----
-
-# google\_container\_engine\_versions
-
-Provides access to available Google Container Engine versions in a zone for a given project.
-
-```hcl
-data "google_container_engine_versions" "central1b" {
- zone = "us-central1-b"
-}
-
-resource "google_container_cluster" "foo" {
- name = "terraform-test-cluster"
- zone = "us-central1-b"
- node_version = "${data.google_container_engine_versions.central1b.latest_node_version}"
- initial_node_count = 1
-
- master_auth {
- username = "mr.yoda"
- password = "adoy.rm"
- }
-}
-```
-
-## Argument Reference
-
-The following arguments are supported:
-
-* `zone` (required) - Zone to list available cluster versions for. Should match the zone the cluster will be deployed in.
-* `project` (optional) - ID of the project to list available cluster versions for. Should match the project the cluster will be deployed to.
- Defaults to the project that the provider is authenticated with.
-
-## Attributes Reference
-
-The following attributes are exported:
-
-* `valid_master_versions` - A list of versions available in the given zone for use with master instances.
-* `valid_node_versions` - A list of versions available in the given zone for use with node instances.
-* `latest_master_version` - The latest version available in the given zone for use with master instances.
-* `latest_node_version` - The latest version available in the given zone for use with node instances.
diff --git a/website/source/docs/providers/google/d/google_iam_policy.html.markdown b/website/source/docs/providers/google/d/google_iam_policy.html.markdown
deleted file mode 100644
index fe0c4dcab..000000000
--- a/website/source/docs/providers/google/d/google_iam_policy.html.markdown
+++ /dev/null
@@ -1,67 +0,0 @@
----
-layout: "google"
-page_title: "Google: google_iam_policy"
-sidebar_current: "docs-google-datasource-iam-policy"
-description: |-
- Generates an IAM policy that can be referenced by other resources, applying
- the policy to them.
----
-
-# google\_iam\_policy
-
-Generates an IAM policy document that may be referenced by and applied to
-other Google Cloud Platform resources, such as the `google_project` resource.
-
-```
-data "google_iam_policy" "admin" {
- binding {
- role = "roles/compute.instanceAdmin"
-
- members = [
- "serviceAccount:your-custom-sa@your-project.iam.gserviceaccount.com",
- ]
- }
-
- binding {
- role = "roles/storage.objectViewer"
-
- members = [
- "user:evanbrown@google.com",
- ]
- }
-}
-```
-
-This data source is used to define IAM policies to apply to other resources.
-Currently, defining a policy through a datasource and referencing that policy
-from another resource is the only way to apply an IAM policy to a resource.
-
-**Note:** Several restrictions apply when setting IAM policies through this API.
-See the [setIamPolicy docs](https://cloud.google.com/resource-manager/reference/rest/v1/projects/setIamPolicy)
-for a list of these restrictions.
-
-## Argument Reference
-
-The following arguments are supported:
-
-* `binding` (Required) - A nested configuration block (described below)
- defining a binding to be included in the policy document. Multiple
- `binding` arguments are supported.
-
-Each document configuration must have one or more `binding` blocks, which
-each accept the following arguments:
-
-* `role` (Required) - The role/permission that will be granted to the members.
- See the [IAM Roles](https://cloud.google.com/compute/docs/access/iam) documentation for a complete list of roles.
-* `members` (Required) - An array of users/principals that will be granted
- the privilege in the `role`. For a human user, prefix the user's e-mail
- address with `user:` (e.g., `user:evandbrown@gmail.com`). For a service
- account, prefix the service account e-mail address with `serviceAccount:`
- (e.g., `serviceAccount:your-service-account@your-project.iam.gserviceaccount.com`).
-
-## Attributes Reference
-
-The following attribute is exported:
-
-* `policy_data` - The above bindings serialized in a format suitable for
- referencing from a resource that supports IAM.
diff --git a/website/source/docs/providers/google/d/signed_url.html.markdown b/website/source/docs/providers/google/d/signed_url.html.markdown
deleted file mode 100644
index afb372b49..000000000
--- a/website/source/docs/providers/google/d/signed_url.html.markdown
+++ /dev/null
@@ -1,81 +0,0 @@
----
-layout: "google"
-page_title: "Google: google_storage_object_signed_url"
-sidebar_current: "docs-google-datasource-signed_url"
-description: |-
- Provides signed URL to Google Cloud Storage object.
----
-
-# google\_storage\_object\_signed_url
-
-The Google Cloud storage signed URL data source generates a signed URL for a given storage object. Signed URLs provide a way to give time-limited read or write access to anyone in possession of the URL, regardless of whether they have a Google account.
-
-For more info about signed URL's is available [here](https://cloud.google.com/storage/docs/access-control/signed-urls).
-
-## Example Usage
-
-```hcl
-data "google_storage_object_signed_url" "artifact" {
- bucket = "install_binaries"
- path = "path/to/install_file.bin"
-
-}
-
-resource "google_compute_instance" "vm" {
- name = "vm"
- ...
-
- provisioner "remote-exec" {
- inline = [
- "wget '${data.google_storage_object_signed_url.artifact.signed_url}' -O install_file.bin",
- "chmod +x install_file.bin",
- "./install_file.bin"
- ]
- }
-}
-```
-
-## Full Example
-
-```hcl
-data "google_storage_object_signed_url" "get_url" {
- bucket = "fried_chicken"
- path = "path/to/file"
- content_md5 = "pRviqwS4c4OTJRTe03FD1w=="
- content_type = "text/plain"
- duration = "2d"
- credentials = "${file("path/to/credentials.json")}"
-
- extension_headers {
- x-goog-if-generation-match = 1
- }
-}
-```
-
-## Argument Reference
-
-The following arguments are supported:
-
-* `bucket` - (Required) The name of the bucket to read the object from
-* `path` - (Required) The full path to the object inside the bucket
-* `http_method` - (Optional) What HTTP Method will the signed URL allow (defaults to `GET`)
-* `duration` - (Optional) For how long shall the signed URL be valid (defaults to 1 hour - i.e. `1h`).
- See [here](https://golang.org/pkg/time/#ParseDuration) for info on valid duration formats.
-* `credentials` - (Optional) What Google service account credentials json should be used to sign the URL.
- This data source checks the following locations for credentials, in order of preference: data source `credentials` attribute, provider `credentials` attribute and finally the GOOGLE_APPLICATION_CREDENTIALS environment variable.
-
-> **NOTE** the default google credentials configured by `gcloud` sdk or the service account associated with a compute instance cannot be used, because these do not include the private key required to sign the URL. A valid `json` service account credentials key file must be used, as generated via Google cloud console.
-
-* `content_type` - (Optional) If you specify this in the datasource, the client must provide the `Content-Type` HTTP header with the same value in its request.
-* `content_md5` - (Optional) The [MD5 digest](https://cloud.google.com/storage/docs/hashes-etags#_MD5) value in Base64.
- Typically retrieved from `google_storage_bucket_object.object.md5hash` attribute.
- If you provide this in the datasource, the client (e.g. browser, curl) must provide the `Content-MD5` HTTP header with this same value in its request.
-* `extension_headers` - (Optional) As needed. The server checks to make sure that the client provides matching values in requests using the signed URL.
- Any header starting with `x-goog-` is accepted but see the [Google Docs](https://cloud.google.com/storage/docs/xml-api/reference-headers) for list of headers that are supported by Google.
-
-
-## Attributes Reference
-
-The following attributes are exported:
-
-* `signed_url` - The signed URL that can be used to access the storage object without authentication.
diff --git a/website/source/docs/providers/google/index.html.markdown b/website/source/docs/providers/google/index.html.markdown
deleted file mode 100644
index a04f183d4..000000000
--- a/website/source/docs/providers/google/index.html.markdown
+++ /dev/null
@@ -1,87 +0,0 @@
----
-layout: "google"
-page_title: "Provider: Google Cloud"
-sidebar_current: "docs-google-index"
-description: |-
- The Google Cloud provider is used to interact with Google Cloud services. The provider needs to be configured with the proper credentials before it can be used.
----
-
-# Google Cloud Provider
-
-The Google Cloud provider is used to interact with
-[Google Cloud services](https://cloud.google.com/). The provider needs
-to be configured with the proper credentials before it can be used.
-
-Use the navigation to the left to read about the available resources.
-
-## Example Usage
-
-```hcl
-// Configure the Google Cloud provider
-provider "google" {
- credentials = "${file("account.json")}"
- project = "my-gce-project"
- region = "us-central1"
-}
-
-// Create a new instance
-resource "google_compute_instance" "default" {
- # ...
-}
-```
-
-## Configuration Reference
-
-The following keys can be used to configure the provider.
-
-* `credentials` - (Optional) Contents of the JSON file used to describe your
- account credentials, downloaded from Google Cloud Console. More details on
- retrieving this file are below. Credentials may be blank if you are running
- Terraform from a GCE instance with a properly-configured [Compute Engine
- Service Account](https://cloud.google.com/compute/docs/authentication). This
- can also be specified using any of the following environment variables
- (listed in order of precedence):
-
- * `GOOGLE_CREDENTIALS`
- * `GOOGLE_CLOUD_KEYFILE_JSON`
- * `GCLOUD_KEYFILE_JSON`
-
- The [`GOOGLE_APPLICATION_CREDENTIALS`](https://developers.google.com/identity/protocols/application-default-credentials#howtheywork)
- environment variable can also contain the path of a file to obtain credentials
- from.
-
-* `project` - (Required) The ID of the project to apply any resources to. This
- can be specified using any of the following environment variables (listed in
- order of precedence):
-
- * `GOOGLE_PROJECT`
- * `GCLOUD_PROJECT`
- * `CLOUDSDK_CORE_PROJECT`
-
-* `region` - (Required) The region to operate under. This can also be specified
- using any of the following environment variables (listed in order of
- precedence):
-
- * `GOOGLE_REGION`
- * `GCLOUD_REGION`
- * `CLOUDSDK_COMPUTE_REGION`
-
-## Authentication JSON File
-
-Authenticating with Google Cloud services requires a JSON
-file which we call the _account file_.
-
-This file is downloaded directly from the
-[Google Developers Console](https://console.developers.google.com). To make
-the process more straightforwarded, it is documented here:
-
-1. Log into the [Google Developers Console](https://console.developers.google.com)
- and select a project.
-
-2. The API Manager view should be selected, click on "Credentials" on the left,
- then "Create credentials", and finally "Service account key".
-
-3. Select "Compute Engine default service account" in the "Service account"
- dropdown, and select "JSON" as the key type.
-
-4. Clicking "Create" will download your `credentials`.
diff --git a/website/source/docs/providers/google/r/bigquery_dataset.html.markdown b/website/source/docs/providers/google/r/bigquery_dataset.html.markdown
deleted file mode 100644
index 5edba255d..000000000
--- a/website/source/docs/providers/google/r/bigquery_dataset.html.markdown
+++ /dev/null
@@ -1,80 +0,0 @@
----
-layout: "google"
-page_title: "Google: google_bigquery_dataset"
-sidebar_current: "docs-google-bigquery-dataset"
-description: |-
- Creates a dataset resource for Google BigQuery.
----
-
-# google_bigquery_dataset
-
-Creates a dataset resource for Google BigQuery. For more information see
-[the official documentation](https://cloud.google.com/bigquery/docs/) and
-[API](https://cloud.google.com/bigquery/docs/reference/rest/v2/datasets).
-
-
-## Example Usage
-
-```hcl
-resource "google_bigquery_dataset" "default" {
- dataset_id = "test"
- friendly_name = "test"
- description = "This is a test description"
- location = "EU"
- default_table_expiration_ms = 3600000
-
- labels {
- env = "default"
- }
-}
-```
-
-## Argument Reference
-
-The following arguments are supported:
-
-* `dataset_id` - (Required) A unique ID for the resource.
- Changing this forces a new resource to be created.
-
-* `project` - (Optional) The project in which the resource belongs. If it
- is not provided, the provider project is used.
-
-* `friendly_name` - (Optional) A descriptive name for the dataset.
-
-* `description` - (Optional) A user-friendly description of the dataset.
-
-* `location` - (Optional) The geographic location where the dataset should reside.
-
- Possible values include `EU` and `US`. The default value is `US`.
-
- Changing this forces a new resource to be created.
-
-* `default_table_expiration_ms` - (Optional) The default lifetime of all
- tables in the dataset, in milliseconds. The minimum value is 3600000
- milliseconds (one hour).
-
- Once this property is set, all newly-created
- tables in the dataset will have an expirationTime property set to the
- creation time plus the value in this property, and changing the value
- will only affect new tables, not existing ones. When the
- expirationTime for a given table is reached, that table will be
- deleted automatically. If a table's expirationTime is modified or
- removed before the table expires, or if you provide an explicit
- expirationTime when creating a table, that value takes precedence
- over the default expiration time indicated by this property.
-
- * `labels` - (Optional) A mapping of labels to assign to the resource.
-
-## Attributes Reference
-
-In addition to the arguments listed above, the following computed attributes are
-exported:
-
-* `self_link` - The URI of the created resource.
-
-* `etag` - A hash of the resource.
-
-* `creation_time` - The time when this dataset was created, in milliseconds since the epoch.
-
-* `last_modified_time` - The date when this dataset or any of its tables was last modified,
- in milliseconds since the epoch.
diff --git a/website/source/docs/providers/google/r/bigquery_table.html.markdown b/website/source/docs/providers/google/r/bigquery_table.html.markdown
deleted file mode 100644
index 165df1aa6..000000000
--- a/website/source/docs/providers/google/r/bigquery_table.html.markdown
+++ /dev/null
@@ -1,113 +0,0 @@
----
-layout: "google"
-page_title: "Google: google_bigquery_table"
-sidebar_current: "docs-google-bigquery-table"
-description: |-
- Creates a table resource in a dataset for Google BigQuery.
----
-
-# google_bigquery_table
-
-Creates a table resource in a dataset for Google BigQuery. For more information see
-[the official documentation](https://cloud.google.com/bigquery/docs/) and
-[API](https://cloud.google.com/bigquery/docs/reference/rest/v2/tables).
-
-
-## Example Usage
-
-```hcl
-resource "google_bigquery_dataset" "default" {
- dataset_id = "test"
- friendly_name = "test"
- description = "This is a test description"
- location = "EU"
- default_table_expiration_ms = 3600000
-
- labels {
- env = "default"
- }
-}
-
-resource "google_bigquery_table" "default" {
- dataset_id = "${google_bigquery_dataset.default.id}"
- table_id = "test"
-
- time_partitioning {
- type = "DAY"
- }
-
- labels {
- env = "default"
- }
-
- schema = "${file("schema.json")}"
-}
-```
-
-## Argument Reference
-
-The following arguments are supported:
-
-* `dataset_id` - (Required) The dataset ID to create the table in.
- Changing this forces a new resource to be created.
-
-* `table_id` - (Required) A unique ID for the resource.
- Changing this forces a new resource to be created.
-
-* `project` - (Optional) The project in which the resource belongs. If it
- is not provided, the provider project is used.
-
-* `description` - (Optional) The field description.
-
-* `expiration_time` - (Optional) The time when this table expires, in
- milliseconds since the epoch. If not present, the table will persist
- indefinitely. Expired tables will be deleted and their storage
- reclaimed.
-
-* `friendly_name` - (Optional) A descriptive name for the table.
-
-* `labels` - (Optional) A mapping of labels to assign to the resource.
-
-* `schema` - (Optional) A JSON schema for the table.
-
-* `time_partitioning` - (Optional) If specified, configures time-based
- partitioning for this table. Structure is documented below.
-
-The `time_partitioning` block supports:
-
-* `expiration_ms` - (Optional) Number of milliseconds for which to keep the
- storage for a partition.
-
-* `type` - (Required) The only type supported is DAY, which will generate
- one partition per day based on data loading time.
-
-## Attributes Reference
-
-In addition to the arguments listed above, the following computed attributes are
-exported:
-
-* `creation_time` - The time when this table was created, in milliseconds since the epoch.
-
-* `etag` - A hash of the resource.
-
-* `last_modified_time` - The time when this table was last modified, in milliseconds since the epoch.
-
-* `location` - The geographic location where the table resides. This value is inherited from the dataset.
-
-* `num_bytes` - The size of this table in bytes, excluding any data in the streaming buffer.
-
-* `num_long_term_bytes` - The number of bytes in the table that are considered "long-term storage".
-
-* `num_rows` - The number of rows of data in this table, excluding any data in the streaming buffer.
-
-* `self_link` - The URI of the created resource.
-
-* `type` - Describes the table type.
-
-## Import
-
-Tables can be imported using ID of the table (`projectID`:`datasetID`.`tableID`), e.g.
-
-```
-$ terraform import bigquery_table.default testproject:testdataset.testtable
-```
diff --git a/website/source/docs/providers/google/r/compute_address.html.markdown b/website/source/docs/providers/google/r/compute_address.html.markdown
deleted file mode 100644
index 8fb5427f9..000000000
--- a/website/source/docs/providers/google/r/compute_address.html.markdown
+++ /dev/null
@@ -1,45 +0,0 @@
----
-layout: "google"
-page_title: "Google: google_compute_address"
-sidebar_current: "docs-google-compute-address"
-description: |-
- Creates a static IP address resource for Google Compute Engine.
----
-
-# google\_compute\_address
-
-Creates a static IP address resource for Google Compute Engine. For more information see
-[the official documentation](https://cloud.google.com/compute/docs/instances-and-network) and
-[API](https://cloud.google.com/compute/docs/reference/latest/addresses).
-
-
-## Example Usage
-
-```hcl
-resource "google_compute_address" "default" {
- name = "test-address"
-}
-```
-
-## Argument Reference
-
-The following arguments are supported:
-
-* `name` - (Required) A unique name for the resource, required by GCE.
- Changing this forces a new resource to be created.
-
-- - -
-
-* `project` - (Optional) The project in which the resource belongs. If it
- is not provided, the provider project is used.
-
-* `region` - (Optional) The Region in which the created address should reside.
- If it is not provided, the provider region is used.
-
-## Attributes Reference
-
-In addition to the arguments listed above, the following computed attributes are
-exported:
-
-* `self_link` - The URI of the created resource.
-* `address` - The IP of the created resource.
diff --git a/website/source/docs/providers/google/r/compute_autoscaler.html.markdown b/website/source/docs/providers/google/r/compute_autoscaler.html.markdown
deleted file mode 100644
index c963e537f..000000000
--- a/website/source/docs/providers/google/r/compute_autoscaler.html.markdown
+++ /dev/null
@@ -1,147 +0,0 @@
----
-layout: "google"
-page_title: "Google: google_compute_autoscaler"
-sidebar_current: "docs-google-compute-autoscaler"
-description: |-
- Manages an Autoscaler within GCE.
----
-
-# google\_compute\_autoscaler
-
-A Compute Engine Autoscaler automatically adds or removes virtual machines from
-a managed instance group based on increases or decreases in load. This allows
-your applications to gracefully handle increases in traffic and reduces cost
-when the need for resources is lower. You just define the autoscaling policy and
-the autoscaler performs automatic scaling based on the measured load. For more
-information, see [the official
-documentation](https://cloud.google.com/compute/docs/autoscaler/) and
-[API](https://cloud.google.com/compute/docs/autoscaler/v1beta2/autoscalers)
-
-
-## Example Usage
-
-```hcl
-resource "google_compute_instance_template" "foobar" {
- name = "foobar"
- machine_type = "n1-standard-1"
- can_ip_forward = false
-
- tags = ["foo", "bar"]
-
- disk {
- source_image = "debian-cloud/debian-8"
- }
-
- network_interface {
- network = "default"
- }
-
- metadata {
- foo = "bar"
- }
-
- service_account {
- scopes = ["userinfo-email", "compute-ro", "storage-ro"]
- }
-}
-
-resource "google_compute_target_pool" "foobar" {
- name = "foobar"
-}
-
-resource "google_compute_instance_group_manager" "foobar" {
- name = "foobar"
- zone = "us-central1-f"
-
- instance_template = "${google_compute_instance_template.foobar.self_link}"
- target_pools = ["${google_compute_target_pool.foobar.self_link}"]
- base_instance_name = "foobar"
-}
-
-resource "google_compute_autoscaler" "foobar" {
- name = "foobar"
- zone = "us-central1-f"
- target = "${google_compute_instance_group_manager.foobar.self_link}"
-
- autoscaling_policy = {
- max_replicas = 5
- min_replicas = 1
- cooldown_period = 60
-
- cpu_utilization {
- target = 0.5
- }
- }
-}
-```
-
-## Argument Reference
-
-The following arguments are supported:
-
-* `name` - (Required) The name of the autoscaler.
-
-* `target` - (Required) The full URL to the instance group manager whose size we
- control.
-
-* `zone` - (Required) The zone of the target.
-
-* `autoscaling_policy.` - (Required) The parameters of the autoscaling
- algorithm. Structure is documented below.
-
-- - -
-
-* `description` - (Optional) An optional textual description of the instance
- group manager.
-
-* `project` - (Optional) The project in which the resource belongs. If it
- is not provided, the provider project is used.
-
-The `autoscaling_policy` block contains:
-
-* `max_replicas` - (Required) The group will never be larger than this.
-
-* `min_replicas` - (Required) The group will never be smaller than this.
-
-* `cooldown_period` - (Optional) Period to wait between changes. This should be
- at least double the time your instances take to start up.
-
-* `cpu_utilization` - (Optional) A policy that scales when the cluster's average
- CPU is above or below a given threshold. Structure is documented below.
-
-* `metric` - (Optional) A policy that scales according to Google Cloud
- Monitoring metrics Structure is documented below.
-
-* `load_balancing_utilization` - (Optional) A policy that scales when the load
- reaches a proportion of a limit defined in the HTTP load balancer. Structure
-is documented below.
-
-The `cpu_utilization` block contains:
-
-* `target` - The floating point threshold where CPU utilization should be. E.g.
- for 50% one would specify 0.5.
-
-The `metric` block contains (more documentation
-[here](https://cloud.google.com/monitoring/api/metrics)):
-
-* `name` - The name of the Google Cloud Monitoring metric to follow, e.g.
- `compute.googleapis.com/instance/network/received_bytes_count`
-
-* `type` - Either "cumulative", "delta", or "gauge".
-
-* `target` - The desired metric value per instance. Must be a positive value.
-
-The `load_balancing_utilization` block contains:
-
-* `target` - The floating point threshold where load balancing utilization
- should be. E.g. if the load balancer's `maxRatePerInstance` is 10 requests
- per second (RPS) then setting this to 0.5 would cause the group to be scaled
- such that each instance receives 5 RPS.
-
-
-## Attributes Reference
-
-In addition to the arguments listed above, the following computed attributes are
-exported:
-
-* `self_link` - The URL of the created resource.
diff --git a/website/source/docs/providers/google/r/compute_backend_bucket.html.markdown b/website/source/docs/providers/google/r/compute_backend_bucket.html.markdown
deleted file mode 100644
index 79f1de970..000000000
--- a/website/source/docs/providers/google/r/compute_backend_bucket.html.markdown
+++ /dev/null
@@ -1,52 +0,0 @@
----
-layout: "google"
-page_title: "Google: google_compute_backend_bucket"
-sidebar_current: "docs-google-compute-backend-bucket"
-description: |-
- Creates a Backend Bucket resource for Google Compute Engine.
----
-
-# google\_compute\_backend\_bucket
-
-A Backend Bucket defines a Google Cloud Storage bucket that will serve traffic through Google Cloud
-Load Balancer.
-
-## Example Usage
-
-```hcl
-resource "google_compute_backend_bucket" "foobar" {
- name = "image-backend-bucket"
- description = "Contains beautiful images"
- bucket_name = "${google_storage_bucket.image_bucket.name}"
- enable_cdn = true
-}
-
-resource "google_storage_bucket" "image_bucket" {
- name = "image-store-bucket"
- location = "EU"
-}
-```
-
-## Argument Reference
-
-The following arguments are supported:
-
-* `name` - (Required) The name of the backend bucket.
-
-* `bucket_name` - (Required) The name of the Google Cloud Storage bucket to be used as a backend
- bucket.
-
-- - -
-
-* `description` - (Optional) The textual description for the backend bucket.
-
-* `enable_cdn` - (Optional) Whether or not to enable the Cloud CDN on the backend bucket.
-
-* `project` - (Optional) The project in which the resource belongs. If it is not provided, the
- provider project is used.
-
-## Attributes Reference
-
-In addition to the arguments listed above, the following computed attributes are exported:
-
-* `self_link` - The URI of the created resource.
diff --git a/website/source/docs/providers/google/r/compute_backend_service.html.markdown b/website/source/docs/providers/google/r/compute_backend_service.html.markdown
deleted file mode 100644
index 6143ae956..000000000
--- a/website/source/docs/providers/google/r/compute_backend_service.html.markdown
+++ /dev/null
@@ -1,135 +0,0 @@
----
-layout: "google"
-page_title: "Google: google_compute_backend_service"
-sidebar_current: "docs-google-compute-backend-service"
-description: |-
- Creates a Backend Service resource for Google Compute Engine.
----
-
-# google\_compute\_backend\_service
-
-A Backend Service defines a group of virtual machines that will serve traffic for load balancing. For more information
-see [the official documentation](https://cloud.google.com/compute/docs/load-balancing/http/backend-service)
-and the [API](https://cloud.google.com/compute/docs/reference/latest/backendServices).
-
-For internal load balancing, use a [google_compute_region_backend_service](/docs/providers/google/r/compute_region_backend_service.html).
-
-## Example Usage
-
-```hcl
-resource "google_compute_backend_service" "foobar" {
- name = "blablah"
- description = "Hello World 1234"
- port_name = "http"
- protocol = "HTTP"
- timeout_sec = 10
- enable_cdn = false
-
- backend {
- group = "${google_compute_instance_group_manager.foo.instance_group}"
- }
-
- health_checks = ["${google_compute_http_health_check.default.self_link}"]
-}
-
-resource "google_compute_instance_group_manager" "foo" {
- name = "terraform-test"
- instance_template = "${google_compute_instance_template.foobar.self_link}"
- base_instance_name = "foobar"
- zone = "us-central1-f"
- target_size = 1
-}
-
-resource "google_compute_instance_template" "foobar" {
- name = "terraform-test"
- machine_type = "n1-standard-1"
-
- network_interface {
- network = "default"
- }
-
- disk {
- source_image = "debian-cloud/debian-8"
- auto_delete = true
- boot = true
- }
-}
-
-resource "google_compute_http_health_check" "default" {
- name = "test"
- request_path = "/"
- check_interval_sec = 1
- timeout_sec = 1
-}
-```
-
-## Argument Reference
-
-The following arguments are supported:
-
-* `name` - (Required) The name of the backend service.
-
-* `health_checks` - (Required) Specifies a list of HTTP health check objects
- for checking the health of the backend service.
-
-- - -
-
-* `backend` - (Optional) The list of backends that serve this BackendService. Structure is documented below.
-
-* `description` - (Optional) The textual description for the backend service.
-
-* `enable_cdn` - (Optional) Whether or not to enable the Cloud CDN on the backend service.
-
-* `port_name` - (Optional) The name of a service that has been added to an
- instance group in this backend. See [related docs](https://cloud.google.com/compute/docs/instance-groups/#specifying_service_endpoints) for details. Defaults to http.
-
-* `project` - (Optional) The project in which the resource belongs. If it
- is not provided, the provider project is used.
-
-* `protocol` - (Optional) The protocol for incoming requests. Defaults to
- `HTTP`.
-
-* `session_affinity` - (Optional) How to distribute load. Options are `NONE` (no
- affinity), `CLIENT_IP` (hash of the source/dest addresses / ports), and
- `GENERATED_COOKIE` (distribute load using a generated session cookie).
-
-* `timeout_sec` - (Optional) The number of secs to wait for a backend to respond
- to a request before considering the request failed. Defaults to `30`.
-
-* `connection_draining_timeout_sec` - (Optional) Time for which instance will be drained (not accept new connections,
-but still work to finish started ones). Defaults to `0`.
-
-The `backend` block supports:
-
-* `group` - (Required) The name or URI of a Compute Engine instance group
- (`google_compute_instance_group_manager.xyz.instance_group`) that can
- receive traffic.
-
-* `balancing_mode` - (Optional) Defines the strategy for balancing load.
- Defaults to `UTILIZATION`
-
-* `capacity_scaler` - (Optional) A float in the range [0, 1.0] that scales the
- maximum parameters for the group (e.g., max rate). A value of 0.0 will cause
- no requests to be sent to the group (i.e., it adds the group in a drained
- state). The default is 1.0.
-
-* `description` - (Optional) Textual description for the backend.
-
-* `max_rate` - (Optional) Maximum requests per second (RPS) that the group can
- handle.
-
-* `max_rate_per_instance` - (Optional) The maximum per-instance requests per
- second (RPS).
-
-* `max_utilization` - (Optional) The target CPU utilization for the group as a
- float in the range [0.0, 1.0]. This flag can only be provided when the
- balancing mode is `UTILIZATION`. Defaults to `0.8`.
-
-## Attributes Reference
-
-In addition to the arguments listed above, the following computed attributes are
-exported:
-
-* `fingerprint` - The fingerprint of the backend service.
-
-* `self_link` - The URI of the created resource.
diff --git a/website/source/docs/providers/google/r/compute_disk.html.markdown b/website/source/docs/providers/google/r/compute_disk.html.markdown
deleted file mode 100644
index ef723cb1b..000000000
--- a/website/source/docs/providers/google/r/compute_disk.html.markdown
+++ /dev/null
@@ -1,69 +0,0 @@
----
-layout: "google"
-page_title: "Google: google_compute_disk"
-sidebar_current: "docs-google-compute-disk"
-description: |-
- Creates a new persistent disk within GCE, based on another disk.
----
-
-# google\_compute\_disk
-
-Creates a new persistent disk within GCE, based on another disk.
-
-~> **Note:** All arguments including the disk encryption key will be stored in the raw state as plain-text.
-[Read more about sensitive data in state](/docs/state/sensitive-data.html).
-
-## Example Usage
-
-```hcl
-resource "google_compute_disk" "default" {
- name = "test-disk"
- type = "pd-ssd"
- zone = "us-central1-a"
- image = "debian-cloud/debian-8"
-}
-```
-
-## Argument Reference
-
-The following arguments are supported:
-
-* `name` - (Required) A unique name for the resource, required by GCE.
- Changing this forces a new resource to be created.
-
-* `zone` - (Required) The zone where this disk will be available.
-
-- - -
-
-* `disk_encryption_key_raw` - (Optional) A 256-bit [customer-supplied encryption key]
- (https://cloud.google.com/compute/docs/disks/customer-supplied-encryption),
- encoded in [RFC 4648 base64](https://tools.ietf.org/html/rfc4648#section-4)
- to encrypt this disk.
-
-* `image` - (Optional) The image from which to initialize this disk. This can be
- one of: the image's `self_link`, `projects/{project}/global/images/{image}`,
- `projects/{project}/global/images/family/{family}`, `global/images/{image}`,
- `global/images/family/{family}`, `family/{family}`, `{project}/{family}`,
- `{project}/{image}`, `{family}`, or `{image}`.
-
-* `project` - (Optional) The project in which the resource belongs. If it
- is not provided, the provider project is used.
-
-* `size` - (Optional) The size of the image in gigabytes. If not specified, it
- will inherit the size of its base image.
-
-* `snapshot` - (Optional) Name of snapshot from which to initialize this disk.
-
-* `type` - (Optional) The GCE disk type.
-
-## Attributes Reference
-
-In addition to the arguments listed above, the following computed attributes are
-exported:
-
-* `disk_encryption_key_sha256` - The [RFC 4648 base64]
- (https://tools.ietf.org/html/rfc4648#section-4) encoded SHA-256 hash of the
- [customer-supplied encryption key](https://cloud.google.com/compute/docs/disks/customer-supplied-encryption)
- that protects this resource.
-
-* `self_link` - The URI of the created resource.
diff --git a/website/source/docs/providers/google/r/compute_firewall.html.markdown b/website/source/docs/providers/google/r/compute_firewall.html.markdown
deleted file mode 100644
index bc1f93c28..000000000
--- a/website/source/docs/providers/google/r/compute_firewall.html.markdown
+++ /dev/null
@@ -1,71 +0,0 @@
----
-layout: "google"
-page_title: "Google: google_compute_firewall"
-sidebar_current: "docs-google-compute-firewall"
-description: |-
- Manages a firewall resource within GCE.
----
-
-# google\_compute\_firewall
-
-Manages a firewall resource within GCE.
-
-## Example Usage
-
-```hcl
-resource "google_compute_firewall" "default" {
- name = "test"
- network = "${google_compute_network.other.name}"
-
- allow {
- protocol = "icmp"
- }
-
- allow {
- protocol = "tcp"
- ports = ["80", "8080", "1000-2000"]
- }
-
- source_tags = ["web"]
-}
-```
-
-## Argument Reference
-
-The following arguments are supported:
-
-* `name` - (Required) A unique name for the resource, required by GCE.
- Changing this forces a new resource to be created.
-
-* `network` - (Required) The name of the network to attach this firewall to.
-
-* `allow` - (Required) Can be specified multiple times for each allow
- rule. Each allow block supports fields documented below.
-
-- - -
-
-* `description` - (Optional) Textual description field.
-
-* `project` - (Optional) The project in which the resource belongs. If it
- is not provided, the provider project is used.
-
-* `source_ranges` - (Optional) A list of source CIDR ranges that this
- firewall applies to.
-
-* `source_tags` - (Optional) A list of source tags for this firewall.
-
-* `target_tags` - (Optional) A list of target tags for this firewall.
-
-The `allow` block supports:
-
-* `protocol` - (Required) The name of the protocol to allow.
-
-* `ports` - (Optional) List of ports and/or port ranges to allow. This can
- only be specified if the protocol is TCP or UDP.
-
-## Attributes Reference
-
-In addition to the arguments listed above, the following computed attributes are
-exported:
-
-* `self_link` - The URI of the created resource.
diff --git a/website/source/docs/providers/google/r/compute_forwarding_rule.html.markdown b/website/source/docs/providers/google/r/compute_forwarding_rule.html.markdown
deleted file mode 100644
index a8c735187..000000000
--- a/website/source/docs/providers/google/r/compute_forwarding_rule.html.markdown
+++ /dev/null
@@ -1,79 +0,0 @@
----
-layout: "google"
-page_title: "Google: google_compute_forwarding_rule"
-sidebar_current: "docs-google-compute-forwarding-rule"
-description: |-
- Manages a Forwarding Rule within GCE.
----
-
-# google\_compute\_forwarding\_rule
-
-Manages a Forwarding Rule within GCE. This binds an ip and port range to a target pool. For more
-information see [the official
-documentation](https://cloud.google.com/compute/docs/load-balancing/network/forwarding-rules) and
-[API](https://cloud.google.com/compute/docs/reference/latest/forwardingRules).
-
-## Example Usage
-
-```tf
-resource "google_compute_forwarding_rule" "default" {
- name = "test"
- target = "${google_compute_target_pool.default.self_link}"
- port_range = "80"
-}
-```
-
-## Argument Reference
-
-The following arguments are supported:
-
-* `name` - (Required) A unique name for the resource, required by GCE. Changing
- this forces a new resource to be created.
-
-- - -
-
-* `backend_service` - (Optional) BackendService resource to receive the
- matched traffic. Only used for internal load balancing.
-
-* `description` - (Optional) Textual description field.
-
-* `ip_address` - (Optional) The static IP. (if not set, an ephemeral IP is
- used).
-
-* `ip_protocol` - (Optional) The IP protocol to route, one of "TCP" "UDP" "AH"
- "ESP" or "SCTP" for external load balancing, "TCP" or "UDP" for internal
- (default "TCP").
-
-* `load_balancing_scheme` - (Optional) Type of load balancing to use. Can be
- set to "INTERNAL" or "EXTERNAL" (default "EXTERNAL").
-
-* `network` - (Optional) Network that the load balanced IP should belong to.
- Only used for internal load balancing. If it is not provided, the default
- network is used.
-
-* `port_range` - (Optional) A range e.g. "1024-2048" or a single port "1024"
- (defaults to all ports!). Only used for external load balancing.
-
-* `ports` - (Optional) A list of ports (maximum of 5) to use for internal load
- balancing. Packets addressed to these ports will be forwarded to the backends
- configured with this forwarding rule. Required for internal load balancing.
-
-* `project` - (Optional) The project in which the resource belongs. If it
- is not provided, the provider project is used.
-
-* `region` - (Optional) The Region in which the created address should reside.
- If it is not provided, the provider region is used.
-
-* `subnetwork` - (Optional) Subnetwork that the load balanced IP should belong
- to. Only used for internal load balancing. Must be specified if the network
- is in custom subnet mode.
-
-* `target` - (Optional) URL of target pool. Required for external load
- balancing.
-
-## Attributes Reference
-
-In addition to the arguments listed above, the following computed attributes are
-exported:
-
-* `self_link` - The URI of the created resource.
diff --git a/website/source/docs/providers/google/r/compute_global_address.html.markdown b/website/source/docs/providers/google/r/compute_global_address.html.markdown
deleted file mode 100644
index 3b7f4de2b..000000000
--- a/website/source/docs/providers/google/r/compute_global_address.html.markdown
+++ /dev/null
@@ -1,43 +0,0 @@
----
-layout: "google"
-page_title: "Google: google_compute_global_address"
-sidebar_current: "docs-google-compute-global-address"
-description: |-
- Creates a static global IP address resource for a Google Compute Engine project.
----
-
-# google\_compute\_global\_address
-
-Creates a static IP address resource global to a Google Compute Engine project. For more information see
-[the official documentation](https://cloud.google.com/compute/docs/instances-and-network) and
-[API](https://cloud.google.com/compute/docs/reference/latest/globalAddresses).
-
-
-## Example Usage
-
-```hcl
-resource "google_compute_global_address" "default" {
- name = "test-address"
-}
-```
-
-## Argument Reference
-
-The following arguments are supported:
-
-* `name` - (Required) A unique name for the resource, required by GCE.
- Changing this forces a new resource to be created.
-
-- - -
-
-* `project` - (Optional) The project in which the resource belongs. If it
-is not provided, the provider project is used.
-
-## Attributes Reference
-
-In addition to the arguments listed above, the following computed attributes are
-exported:
-
-* `address` - The assigned address.
-
-* `self_link` - The URI of the created resource.
diff --git a/website/source/docs/providers/google/r/compute_global_forwarding_rule.html.markdown b/website/source/docs/providers/google/r/compute_global_forwarding_rule.html.markdown
deleted file mode 100644
index bc5607da0..000000000
--- a/website/source/docs/providers/google/r/compute_global_forwarding_rule.html.markdown
+++ /dev/null
@@ -1,101 +0,0 @@
----
-layout: "google"
-page_title: "Google: google_compute_global_forwarding_rule"
-sidebar_current: "docs-google-compute-global-forwarding-rule"
-description: |-
- Manages a Target Pool within GCE.
----
-
-# google\_compute\_global\_forwarding\_rule
-
-Manages a Global Forwarding Rule within GCE. This binds an ip and port to a target HTTP(s) proxy. For more
-information see [the official
-documentation](https://cloud.google.com/compute/docs/load-balancing/http/global-forwarding-rules) and
-[API](https://cloud.google.com/compute/docs/reference/latest/globalForwardingRules).
-
-## Example Usage
-
-```hcl
-resource "google_compute_global_forwarding_rule" "default" {
- name = "test"
- target = "${google_compute_target_http_proxy.default.self_link}"
- port_range = "80"
-}
-
-resource "google_compute_target_http_proxy" "default" {
- name = "test-proxy"
- description = "a description"
- url_map = "${google_compute_url_map.default.self_link}"
-}
-
-resource "google_compute_url_map" "default" {
- name = "url-map"
- description = "a description"
- default_service = "${google_compute_backend_service.default.self_link}"
-
- host_rule {
- hosts = ["mysite.com"]
- path_matcher = "allpaths"
- }
-
- path_matcher {
- name = "allpaths"
- default_service = "${google_compute_backend_service.default.self_link}"
-
- path_rule {
- paths = ["/*"]
- service = "${google_compute_backend_service.default.self_link}"
- }
- }
-}
-
-resource "google_compute_backend_service" "default" {
- name = "default-backend"
- port_name = "http"
- protocol = "HTTP"
- timeout_sec = 10
-
- health_checks = ["${google_compute_http_health_check.default.self_link}"]
-}
-
-resource "google_compute_http_health_check" "default" {
- name = "test"
- request_path = "/"
- check_interval_sec = 1
- timeout_sec = 1
-}
-```
-
-## Argument Reference
-
-The following arguments are supported:
-
-* `name` - (Required) A unique name for the resource, required by GCE. Changing
- this forces a new resource to be created.
-
-* `target` - (Required) URL of target HTTP or HTTPS proxy.
-
-- - -
-
-* `description` - (Optional) Textual description field.
-
-* `ip_address` - (Optional) The static IP. (if not set, an ephemeral IP is
- used). This should be the literal IP address to be used, not the `self_link`
- to a `google_compute_address` resource. (If using a `google_compute_address`
- resource, use the `address` property instead of the `self_link` property.)
-
-* `ip_protocol` - (Optional) The IP protocol to route, one of "TCP" "UDP" "AH"
- "ESP" or "SCTP". (default "TCP").
-
-* `port_range` - (Optional) A range e.g. "1024-2048" or a single port "1024"
- (defaults to all ports!).
-
-* `project` - (Optional) The project in which the resource belongs. If it
- is not provided, the provider project is used.
-
-## Attributes Reference
-
-In addition to the arguments listed above, the following computed attributes are
-exported:
-
-* `self_link` - The URI of the created resource.
diff --git a/website/source/docs/providers/google/r/compute_health_check.html.markdown b/website/source/docs/providers/google/r/compute_health_check.html.markdown
deleted file mode 100644
index f9690144c..000000000
--- a/website/source/docs/providers/google/r/compute_health_check.html.markdown
+++ /dev/null
@@ -1,125 +0,0 @@
----
-layout: "google"
-page_title: "Google: google_compute_health_check"
-sidebar_current: "docs-google-compute-health-check"
-description: |-
- Manages a Health Check within GCE.
----
-
-# google\_compute\_health\_check
-
-Manages a health check within GCE. This is used to monitor instances
-behind load balancers. Timeouts or HTTP errors cause the instance to be
-removed from the pool. For more information, see [the official
-documentation](https://cloud.google.com/compute/docs/load-balancing/health-checks)
-and
-[API](https://cloud.google.com/compute/docs/reference/latest/healthChecks).
-
-## Example Usage
-
-```tf
-resource "google_compute_health_check" "default" {
- name = "test"
-
- timeout_sec = 1
- check_interval_sec = 1
-
- tcp_health_check {
- port = "80"
- }
-}
-```
-
-## Argument Reference
-
-The following arguments are supported:
-
-* `name` - (Required) A unique name for the resource, required by GCE.
- Changing this forces a new resource to be created.
-
-- - -
-
-* `check_interval_sec` - (Optional) The number of seconds between each poll of
- the instance instance (default 5).
-
-* `description` - (Optional) Textual description field.
-
-* `healthy_threshold` - (Optional) Consecutive successes required (default 2).
-
-* `http_health_check` - (Optional) An HTTP Health Check.
- See *HTTP Health Check* below.
-
-* `https_health_check` - (Optional) An HTTPS Health Check.
- See *HTTPS Health Check* below.
-
-* `ssl_health_check` - (Optional) An SSL Health Check.
- See *SSL Health Check* below.
-
-* `tcp_health_check` - (Optional) A TCP Health Check.
- See *TCP Health Check* below.
-
-* `project` - (Optional) The project in which the resource belongs. If it
- is not provided, the provider project is used.
-
-* `timeout_sec` - (Optional) The number of seconds to wait before declaring
- failure (default 5).
-
-* `unhealthy_threshold` - (Optional) Consecutive failures required (default 2).
-
-
-**HTTP Health Check** supports the following attributes:
-
-* `host` - (Optional) HTTP host header field (default instance's public ip).
-
-* `port` - (Optional) TCP port to connect to (default 80).
-
-* `proxy_header` - (Optional) Type of proxy header to append before sending
- data to the backend, either NONE or PROXY_V1 (default NONE).
-
-* `request_path` - (Optional) URL path to query (default /).
-
-
-**HTTPS Health Check** supports the following attributes:
-
-* `host` - (Optional) HTTPS host header field (default instance's public ip).
-
-* `port` - (Optional) TCP port to connect to (default 443).
-
-* `proxy_header` - (Optional) Type of proxy header to append before sending
- data to the backend, either NONE or PROXY_V1 (default NONE).
-
-* `request_path` - (Optional) URL path to query (default /).
-
-
-**SSL Health Check** supports the following attributes:
-
-* `port` - (Optional) TCP port to connect to (default 443).
-
-* `proxy_header` - (Optional) Type of proxy header to append before sending
- data to the backend, either NONE or PROXY_V1 (default NONE).
-
-* `request` - (Optional) Application data to send once the SSL connection has
- been established (default "").
-
-* `response` - (Optional) The response that indicates health (default "")
-
-
-**TCP Health Check** supports the following attributes:
-
-* `port` - (Optional) TCP port to connect to (default 80).
-
-* `proxy_header` - (Optional) Type of proxy header to append before sending
- data to the backend, either NONE or PROXY_V1 (default NONE).
-
-* `request` - (Optional) Application data to send once the TCP connection has
- been established (default "").
-
-* `response` - (Optional) The response that indicates health (default "")
-
-
-## Attributes Reference
-
-In addition to the arguments listed above, the following computed attributes are
-exported:
-
-* `self_link` - The URI of the created resource.
diff --git a/website/source/docs/providers/google/r/compute_http_health_check.html.markdown b/website/source/docs/providers/google/r/compute_http_health_check.html.markdown
deleted file mode 100644
index c5ee13755..000000000
--- a/website/source/docs/providers/google/r/compute_http_health_check.html.markdown
+++ /dev/null
@@ -1,66 +0,0 @@
----
-layout: "google"
-page_title: "Google: google_compute_http_health_check"
-sidebar_current: "docs-google-compute-http-health-check"
-description: |-
- Manages an HTTP Health Check within GCE.
----
-
-# google\_compute\_http\_health\_check
-
-Manages an HTTP health check within GCE. This is used to monitor instances
-behind load balancers. Timeouts or HTTP errors cause the instance to be
-removed from the pool. For more information, see [the official
-documentation](https://cloud.google.com/compute/docs/load-balancing/health-checks)
-and
-[API](https://cloud.google.com/compute/docs/reference/latest/httpHealthChecks).
-
-## Example Usage
-
-```hcl
-resource "google_compute_http_health_check" "default" {
- name = "test"
- request_path = "/health_check"
-
- timeout_sec = 1
- check_interval_sec = 1
-}
-```
-
-## Argument Reference
-
-The following arguments are supported:
-
-* `name` - (Required) A unique name for the resource, required by GCE.
- Changing this forces a new resource to be created.
-
-- - -
-
-* `check_interval_sec` - (Optional) The number of seconds between each poll of
- the instance instance (default 5).
-
-* `description` - (Optional) Textual description field.
-
-* `healthy_threshold` - (Optional) Consecutive successes required (default 2).
-
-* `host` - (Optional) HTTP host header field (default instance's public ip).
-
-* `port` - (Optional) TCP port to connect to (default 80).
-
-* `project` - (Optional) The project in which the resource belongs. If it
- is not provided, the provider project is used.
-
-* `request_path` - (Optional) URL path to query (default /).
-
-* `timeout_sec` - (Optional) The number of seconds to wait before declaring
- failure (default 5).
-
-* `unhealthy_threshold` - (Optional) Consecutive failures required (default 2).
-
-
-## Attributes Reference
-
-In addition to the arguments listed above, the following computed attributes are
-exported:
-
-* `self_link` - The URI of the created resource.
diff --git a/website/source/docs/providers/google/r/compute_https_health_check.html.markdown b/website/source/docs/providers/google/r/compute_https_health_check.html.markdown
deleted file mode 100644
index 79bb102ce..000000000
--- a/website/source/docs/providers/google/r/compute_https_health_check.html.markdown
+++ /dev/null
@@ -1,63 +0,0 @@
----
-layout: "google"
-page_title: "Google: google_compute_https_health_check"
-sidebar_current: "docs-google-compute-https-health-check"
-description: |-
- Manages an HTTPS Health Check within GCE.
----
-
-# google\_compute\_https\_health\_check
-
-Manages an HTTPS health check within GCE. This is used to monitor instances
-behind load balancers. Timeouts or HTTPS errors cause the instance to be
-removed from the pool. For more information, see [the official
-documentation](https://cloud.google.com/compute/docs/load-balancing/health-checks)
-and
-[API](https://cloud.google.com/compute/docs/reference/latest/httpsHealthChecks).
-
-## Example Usage
-
-```hcl
-resource "google_compute_https_health_check" "default" {
- name = "test"
- request_path = "/health_check"
-
- timeout_sec = 1
- check_interval_sec = 1
-}
-```
-
-## Argument Reference
-
-The following arguments are supported:
-
-* `name` - (Required) A unique name for the resource, required by GCE. Changing
- this forces a new resource to be created.
-
-- - -
-
-* `check_interval_sec` - (Optional) How often to poll each instance (default 5).
-
-* `description` - (Optional) Textual description field.
-
-* `healthy_threshold` - (Optional) Consecutive successes required (default 2).
-
-* `host` - (Optional) HTTPS host header field (default instance's public ip).
-
-* `port` - (Optional) TCP port to connect to (default 443).
-
-* `project` - (Optional) The project in which the resource belongs. If it
- is not provided, the provider project is used.
-
-* `request_path` - (Optional) URL path to query (default /).
-
-* `timeout_sec` - (Optional) How long before declaring failure (default 5).
-
-* `unhealthy_threshold` - (Optional) Consecutive failures required (default 2).
-
-
-## Attributes Reference
-
-The following attributes are exported:
-
-* `self_link` - The URL of the created resource.
diff --git a/website/source/docs/providers/google/r/compute_image.html.markdown b/website/source/docs/providers/google/r/compute_image.html.markdown
deleted file mode 100644
index fe5bea7af..000000000
--- a/website/source/docs/providers/google/r/compute_image.html.markdown
+++ /dev/null
@@ -1,85 +0,0 @@
----
-layout: "google"
-page_title: "Google: google_compute_image"
-sidebar_current: "docs-google-compute-image"
-description: |-
- Creates a bootable VM image for Google Compute Engine from an existing tarball.
----
-
-# google\_compute\_image
-
-Creates a bootable VM image resource for Google Compute Engine from an existing
-tarball. For more information see [the official documentation](https://cloud.google.com/compute/docs/images) and
-[API](https://cloud.google.com/compute/docs/reference/latest/images).
-
-
-## Example Usage
-
-```hcl
-resource "google_compute_image" "bootable-image" {
- name = "my-custom-image"
-
- raw_disk {
- source = "https://storage.googleapis.com/my-bucket/my-disk-image-tarball.tar.gz"
- }
-}
-
-resource "google_compute_instance" "vm" {
- name = "vm-from-custom-image"
- machine_type = "n1-standard-1"
- zone = "us-east1-c"
-
- disk {
- image = "${google_compute_image.bootable-image.self_link}"
- }
-
- network_interface {
- network = "default"
- }
-}
-```
-
-## Argument Reference
-
-The following arguments are supported: (Note that one of either source_disk or
- raw_disk is required)
-
-* `name` - (Required) A unique name for the resource, required by GCE.
- Changing this forces a new resource to be created.
-
-* `source_disk` - The URL of a disk that will be used as the source of the
- image. Changing this forces a new resource to be created.
-
-* `raw_disk` - The raw disk that will be used as the source of the image.
- Changing this forces a new resource to be created. Structure is documented
- below.
-
-* `create_timeout` - Configurable timeout in minutes for creating images. Default is 4 minutes.
- Changing this forces a new resource to be created.
-
-The `raw_disk` block supports:
-
-* `source` - (Required) The full Google Cloud Storage URL where the disk
- image is stored.
-
-* `sha1` - (Optional) SHA1 checksum of the source tarball that will be used
- to verify the source before creating the image.
-
-* `container_type` - (Optional) The format used to encode and transmit the
- block device. TAR is the only supported type and is the default.
-
-- - -
-
-* `project` - (Optional) The project in which the resource belongs. If it
- is not provided, the provider project is used.
-
-* `description` - (Optional) The description of the image to be created
-
-* `family` - (Optional) The name of the image family to which this image belongs.
-
-## Attributes Reference
-
-In addition to the arguments listed above, the following computed attributes are
-exported:
-
-* `self_link` - The URI of the created resource.
diff --git a/website/source/docs/providers/google/r/compute_instance.html.markdown b/website/source/docs/providers/google/r/compute_instance.html.markdown
deleted file mode 100644
index e0e7647f7..000000000
--- a/website/source/docs/providers/google/r/compute_instance.html.markdown
+++ /dev/null
@@ -1,217 +0,0 @@
----
-layout: "google"
-page_title: "Google: google_compute_instance"
-sidebar_current: "docs-google-compute-instance"
-description: |-
- Manages a VM instance resource within GCE.
----
-
-# google\_compute\_instance
-
-Manages a VM instance resource within GCE. For more information see
-[the official documentation](https://cloud.google.com/compute/docs/instances)
-and
-[API](https://cloud.google.com/compute/docs/reference/latest/instances).
-
-
-## Example Usage
-
-```hcl
-resource "google_compute_instance" "default" {
- name = "test"
- machine_type = "n1-standard-1"
- zone = "us-central1-a"
-
- tags = ["foo", "bar"]
-
- disk {
- image = "debian-cloud/debian-8"
- }
-
- // Local SSD disk
- disk {
- type = "local-ssd"
- scratch = true
- }
-
- network_interface {
- network = "default"
-
- access_config {
- // Ephemeral IP
- }
- }
-
- metadata {
- foo = "bar"
- }
-
- metadata_startup_script = "echo hi > /test.txt"
-
- service_account {
- scopes = ["userinfo-email", "compute-ro", "storage-ro"]
- }
-}
-```
-
-## Argument Reference
-
-The following arguments are supported:
-
-* `disk` - (Required) Disks to attach to the instance. This can be specified
- multiple times for multiple disks. Structure is documented below.
-
-* `machine_type` - (Required) The machine type to create. To create a custom
- machine type, value should be set as specified
- [here](https://cloud.google.com/compute/docs/reference/latest/instances#machineType)
-
-* `name` - (Required) A unique name for the resource, required by GCE.
- Changing this forces a new resource to be created.
-
-* `zone` - (Required) The zone that the machine should be created in.
-
-* `network_interface` - (Required) Networks to attach to the instance. This can
- be specified multiple times for multiple networks, but GCE is currently
- limited to just 1. Structure is documented below.
-
-- - -
-
-* `can_ip_forward` - (Optional) Whether to allow sending and receiving of
- packets with non-matching source or destination IPs.
- This defaults to false.
-
-* `description` - (Optional) A brief description of this resource.
-
-* `metadata` - (Optional) Metadata key/value pairs to make available from
- within the instance.
-
-* `metadata_startup_script` - (Optional) An alternative to using the
- startup-script metadata key, except this one forces the instance to be
- recreated (thus re-running the script) if it is changed. This replaces the
- startup-script metadata key on the created instance and thus the two
- mechanisms are not allowed to be used simultaneously.
-
-* `project` - (Optional) The project in which the resource belongs. If it
- is not provided, the provider project is used.
-
-* `scheduling` - (Optional) The scheduling strategy to use. More details about
- this configuration option are detailed below.
-
-* `service_account` - (Optional) Service account to attach to the instance.
- Structure is documented below.
-
-* `tags` - (Optional) A list of tags to attach to the instance.
-
-* `create_timeout` - (Optional) Configurable timeout in minutes for creating instances. Default is 4 minutes.
- Changing this forces a new resource to be created.
-
----
-
-* `network` - (DEPRECATED, Required) Networks to attach to the instance. This
- can be specified multiple times for multiple networks. Structure is
- documented below.
-
-The `disk` block supports: (Note that either disk or image is required, unless
-the type is "local-ssd", in which case scratch must be true).
-
-* `disk` - The name of the existing disk (such as those managed by
- `google_compute_disk`) to attach.
-
-* `image` - The image from which to initialize this disk. This can be
- one of: the image's `self_link`, `projects/{project}/global/images/{image}`,
- `projects/{project}/global/images/family/{family}`, `global/images/{image}`,
- `global/images/family/{family}`, `family/{family}`, `{project}/{family}`,
- `{project}/{image}`, `{family}`, or `{image}`.
-
-* `auto_delete` - (Optional) Whether or not the disk should be auto-deleted.
- This defaults to true. Leave true for local SSDs.
-
-* `type` - (Optional) The GCE disk type, e.g. pd-standard, pd-ssd, or local-ssd.
-
-* `scratch` - (Optional) Whether the disk is a scratch disk as opposed to a
- persistent disk (required for local-ssd).
-
-* `size` - (Optional) The size of the image in gigabytes. If not specified, it
- will inherit the size of its base image. Do not specify for local SSDs as
- their size is fixed.
-
-* `device_name` - (Optional) Name with which attached disk will be accessible
- under `/dev/disk/by-id/`
-
-* `disk_encryption_key_raw` - (Optional) A 256-bit [customer-supplied encryption key]
- (https://cloud.google.com/compute/docs/disks/customer-supplied-encryption),
- encoded in [RFC 4648 base64](https://tools.ietf.org/html/rfc4648#section-4)
- to encrypt this disk.
-
-The `network_interface` block supports:
-
-* `network` - (Optional) The name or self_link of the network to attach this interface to.
- Either `network` or `subnetwork` must be provided.
-
-* `subnetwork` - (Optional) The name of the subnetwork to attach this interface
- to. The subnetwork must exist in the same region this instance will be
- created in. Either `network` or `subnetwork` must be provided.
-
-* `subnetwork_project` - (Optional) The project in which the subnetwork belongs.
- If it is not provided, the provider project is used.
-
-* `address` - (Optional) The private IP address to assign to the instance. If
- empty, the address will be automatically assigned.
-
-* `access_config` - (Optional) Access configurations, i.e. IPs via which this
- instance can be accessed via the Internet. Omit to ensure that the instance
- is not accessible from the Internet (this means that ssh provisioners will
- not work unless you are running Terraform can send traffic to the instance's
- network (e.g. via tunnel or because it is running on another cloud instance
- on that network). This block can be repeated multiple times. Structure
- documented below.
-
-The `access_config` block supports:
-
-* `nat_ip` - (Optional) The IP address that will be 1:1 mapped to the instance's
- network ip. If not given, one will be generated.
-
-The `service_account` block supports:
-
-* `email` - (Optional) The service account e-mail address. If not given, the
- default Google Compute Engine service account is used.
-
-* `scopes` - (Required) A list of service scopes. Both OAuth2 URLs and gcloud
- short names are supported.
-
-(DEPRECATED) The `network` block supports:
-
-* `source` - (Required) The name of the network to attach this interface to.
-
-* `address` - (Optional) The IP address of a reserved IP address to assign
- to this interface.
-
-The `scheduling` block supports:
-
-* `preemptible` - (Optional) Is the instance preemptible.
-
-* `on_host_maintenance` - (Optional) Describes maintenance behavior for the
- instance. Can be MIGRATE or TERMINATE, for more info, read
- [here](https://cloud.google.com/compute/docs/instances/setting-instance-scheduling-options)
-
-* `automatic_restart` - (Optional) Specifies if the instance should be
- restarted if it was terminated by Compute Engine (not a user).
-
-## Attributes Reference
-
-In addition to the arguments listed above, the following computed attributes are
-exported:
-
-* `metadata_fingerprint` - The unique fingerprint of the metadata.
-
-* `self_link` - The URI of the created resource.
-
-* `tags_fingerprint` - The unique fingerprint of the tags.
-
-* `network_interface.0.address` - The internal ip address of the instance, either manually or dynamically assigned.
-
-* `network_interface.0.access_config.0.assigned_nat_ip` - If the instance has an access config, either the given external ip (in the `nat_ip` field) or the ephemeral (generated) ip (if you didn't provide one).
-
-* `disk.0.disk_encryption_key_sha256` - The [RFC 4648 base64](https://tools.ietf.org/html/rfc4648#section-4)
- encoded SHA-256 hash of the [customer-supplied encryption key]
- (https://cloud.google.com/compute/docs/disks/customer-supplied-encryption) that protects this resource.
diff --git a/website/source/docs/providers/google/r/compute_instance_group.html.markdown b/website/source/docs/providers/google/r/compute_instance_group.html.markdown
deleted file mode 100644
index 766365ddb..000000000
--- a/website/source/docs/providers/google/r/compute_instance_group.html.markdown
+++ /dev/null
@@ -1,99 +0,0 @@
----
-layout: "google"
-page_title: "Google: google_compute_instance_group"
-sidebar_current: "docs-google-compute-instance-group"
-description: |-
- Manages an Instance Group within GCE.
----
-
-# google\_compute\_instance\_group
-
-The Google Compute Engine Instance Group API creates and manages pools
-of homogeneous Compute Engine virtual machine instances from a common instance
-template. For more information, see [the official documentation](https://cloud.google.com/compute/docs/instance-groups/#unmanaged_instance_groups)
-and [API](https://cloud.google.com/compute/docs/reference/latest/instanceGroups)
-
-## Example Usage
-
-### Empty instance group
-
-```hcl
-resource "google_compute_instance_group" "test" {
- name = "terraform-test"
- description = "Terraform test instance group"
- zone = "us-central1-a"
- network = "${google_compute_network.default.self_link}"
-}
-```
-
-### With instances and named ports
-
-```hcl
-resource "google_compute_instance_group" "webservers" {
- name = "terraform-webservers"
- description = "Terraform test instance group"
-
- instances = [
- "${google_compute_instance.test.self_link}",
- "${google_compute_instance.test2.self_link}",
- ]
-
- named_port {
- name = "http"
- port = "8080"
- }
-
- named_port {
- name = "https"
- port = "8443"
- }
-
- zone = "us-central1-a"
-}
-```
-
-## Argument Reference
-
-The following arguments are supported:
-
-* `name` - (Required) The name of the instance group. Must be 1-63
- characters long and comply with
- [RFC1035](https://www.ietf.org/rfc/rfc1035.txt). Supported characters
- include lowercase letters, numbers, and hyphens.
-
-* `zone` - (Required) The zone that this instance group should be created in.
-
-- - -
-
-* `description` - (Optional) An optional textual description of the instance
- group.
-
-* `instances` - (Optional) List of instances in the group. They should be given
- as self_link URLs. When adding instances they must all be in the same
- network and zone as the instance group.
-
-* `named_port` - (Optional) The named port configuration. See the section below
- for details on configuration.
-
-* `project` - (Optional) The project in which the resource belongs. If it
- is not provided, the provider project is used.
-
-* `network` - (Optional) The URL of the network the instance group is in. If
- this is different from the network where the instances are in, the creation
- fails. Defaults to the network where the instances are in (if neither
- `network` nor `instances` is specified, this field will be blank).
-
-The `named_port` block supports:
-
-* `name` - (Required) The name which the port will be mapped to.
-
-* `port` - (Required) The port number to map the name to.
-
-## Attributes Reference
-
-In addition to the arguments listed above, the following computed attributes are
-exported:
-
-* `self_link` - The URI of the created resource.
-
-* `size` - The number of instances in the group.
diff --git a/website/source/docs/providers/google/r/compute_instance_group_manager.html.markdown b/website/source/docs/providers/google/r/compute_instance_group_manager.html.markdown
deleted file mode 100644
index b7d8c6cd9..000000000
--- a/website/source/docs/providers/google/r/compute_instance_group_manager.html.markdown
+++ /dev/null
@@ -1,100 +0,0 @@
----
-layout: "google"
-page_title: "Google: google_compute_instance_group_manager"
-sidebar_current: "docs-google-compute-instance-group-manager"
-description: |-
- Manages an Instance Group within GCE.
----
-
-# google\_compute\_instance\_group\_manager
-
-The Google Compute Engine Instance Group Manager API creates and manages pools
-of homogeneous Compute Engine virtual machine instances from a common instance
-template. For more information, see [the official documentation](https://cloud.google.com/compute/docs/instance-groups/manager)
-and [API](https://cloud.google.com/compute/docs/instance-groups/manager/v1beta2/instanceGroupManagers)
-
-## Example Usage
-
-```hcl
-resource "google_compute_instance_group_manager" "foobar" {
- name = "terraform-test"
- description = "Terraform test instance group manager"
-
- base_instance_name = "foobar"
- instance_template = "${google_compute_instance_template.foobar.self_link}"
- update_strategy = "NONE"
- zone = "us-central1-a"
-
- target_pools = ["${google_compute_target_pool.foobar.self_link}"]
- target_size = 2
-
- named_port {
- name = "customHTTP"
- port = 8888
- }
-}
-```
-
-## Argument Reference
-
-The following arguments are supported:
-
-* `base_instance_name` - (Required) The base instance name to use for
- instances in this group. The value must be a valid
- [RFC1035](https://www.ietf.org/rfc/rfc1035.txt) name. Supported characters
- are lowercase letters, numbers, and hyphens (-). Instances are named by
- appending a hyphen and a random four-character string to the base instance
- name.
-
-* `instance_template` - (Required) The full URL to an instance template from
- which all new instances will be created.
-
-* `name` - (Required) The name of the instance group manager. Must be 1-63
- characters long and comply with
- [RFC1035](https://www.ietf.org/rfc/rfc1035.txt). Supported characters
- include lowercase letters, numbers, and hyphens.
-
-* `zone` - (Required) The zone that instances in this group should be created
- in.
-
-- - -
-
-* `description` - (Optional) An optional textual description of the instance
- group manager.
-
-* `named_port` - (Optional) The named port configuration. See the section below
- for details on configuration.
-
-* `project` - (Optional) The project in which the resource belongs. If it
- is not provided, the provider project is used.
-
-* `update_strategy` - (Optional, Default `"RESTART"`) If the `instance_template`
- resource is modified, a value of `"NONE"` will prevent any of the managed
- instances from being restarted by Terraform. A value of `"RESTART"` will
- restart all of the instances at once. In the future, as the GCE API matures
- we will support `"ROLLING_UPDATE"` as well.
-
-* `target_size` - (Optional) If not given at creation time, this defaults to 1.
- Do not specify this if you are managing the group with an autoscaler, as
- this will cause fighting.
-
-* `target_pools` - (Optional) The full URL of all target pools to which new
- instances in the group are added. Updating the target pools attribute does
- not affect existing instances.
-
-The `named_port` block supports: (Include a `named_port` block for each named-port required).
-
-* `name` - (Required) The name of the port.
-
-* `port` - (Required) The port number.
-
-## Attributes Reference
-
-In addition to the arguments listed above, the following computed attributes are
-exported:
-
-* `fingerprint` - The fingerprint of the instance group manager.
-
-* `instance_group` - The full URL of the instance group created by the manager.
-
-* `self_link` - The URL of the created resource.
diff --git a/website/source/docs/providers/google/r/compute_instance_template.html.markdown b/website/source/docs/providers/google/r/compute_instance_template.html.markdown
deleted file mode 100644
index 78bd47bca..000000000
--- a/website/source/docs/providers/google/r/compute_instance_template.html.markdown
+++ /dev/null
@@ -1,263 +0,0 @@
----
-layout: "google"
-page_title: "Google: google_compute_instance_template"
-sidebar_current: "docs-google-compute-instance-template"
-description: |-
- Manages a VM instance template resource within GCE.
----
-
-
-# google\_compute\_instance\_template
-
-Manages a VM instance template resource within GCE. For more information see
-[the official documentation](https://cloud.google.com/compute/docs/instance-templates)
-and
-[API](https://cloud.google.com/compute/docs/reference/latest/instanceTemplates).
-
-
-## Example Usage
-
-```hcl
-resource "google_compute_instance_template" "foobar" {
- name = "terraform-test"
- description = "template description"
-
- tags = ["foo", "bar"]
-
- instance_description = "description assigned to instances"
- machine_type = "n1-standard-1"
- can_ip_forward = false
-
- scheduling {
- automatic_restart = true
- on_host_maintenance = "MIGRATE"
- }
-
- // Create a new boot disk from an image
- disk {
- source_image = "debian-cloud/debian-8"
- auto_delete = true
- boot = true
- }
-
- // Use an existing disk resource
- disk {
- source = "foo_existing_disk"
- auto_delete = false
- boot = false
- }
-
- network_interface {
- network = "default"
- }
-
- metadata {
- foo = "bar"
- }
-
- service_account {
- scopes = ["userinfo-email", "compute-ro", "storage-ro"]
- }
-}
-```
-
-## Using with Instance Group Manager
-
-Instance Templates cannot be updated after creation with the Google
-Cloud Platform API. In order to update an Instance Template, Terraform will
-destroy the existing resource and create a replacement. In order to effectively
-use an Instance Template resource with an [Instance Group Manager resource][1],
-it's recommended to specify `create_before_destroy` in a [lifecycle][2] block.
-Either omit the Instance Template `name` attribute, or specify a partial name
-with `name_prefix`. Example:
-
-```hcl
-resource "google_compute_instance_template" "instance_template" {
- name_prefix = "instance-template-"
- machine_type = "n1-standard-1"
- region = "us-central1"
-
- // boot disk
- disk {
- # ...
- }
-
- // networking
- network_interface {
- # ...
- }
-
- lifecycle {
- create_before_destroy = true
- }
-}
-
-resource "google_compute_instance_group_manager" "instance_group_manager" {
- name = "instance-group-manager"
- instance_template = "${google_compute_instance_template.instance_template.self_link}"
- base_instance_name = "instance-group-manager"
- zone = "us-central1-f"
- target_size = "1"
-}
-```
-
-With this setup Terraform generates a unique name for your Instance
-Template and can then update the Instance Group manager without conflict before
-destroying the previous Instance Template.
-
-
-## Argument Reference
-
-Note that changing any field for this resource forces a new resource to be created.
-
-The following arguments are supported:
-
-* `disk` - (Required) Disks to attach to instances created from this template.
- This can be specified multiple times for multiple disks. Structure is
- documented below.
-
-* `machine_type` - (Required) The machine type to create.
-
-- - -
-* `name` - (Optional) The name of the instance template. If you leave
- this blank, Terraform will auto-generate a unique name.
-
-* `name_prefix` - (Optional) Creates a unique name beginning with the specified
- prefix. Conflicts with `name`.
-
-* `can_ip_forward` - (Optional) Whether to allow sending and receiving of
- packets with non-matching source or destination IPs. This defaults to false.
-
-* `description` - (Optional) A brief description of this resource.
-
-* `instance_description` - (Optional) A brief description to use for instances
- created from this template.
-
-* `metadata` - (Optional) Metadata key/value pairs to make available from
- within instances created from this template.
-
-* `metadata_startup_script` - (Optional) An alternative to using the
- startup-script metadata key, mostly to match the compute_instance resource.
- This replaces the startup-script metadata key on the created instance and
- thus the two mechanisms are not allowed to be used simultaneously.
-
-* `network_interface` - (Required) Networks to attach to instances created from
- this template. This can be specified multiple times for multiple networks.
- Structure is documented below.
-
-* `project` - (Optional) The project in which the resource belongs. If it
- is not provided, the provider project is used.
-
-* `region` - (Optional) An instance template is a global resource that is not
- bound to a zone or a region. However, you can still specify some regional
- resources in an instance template, which restricts the template to the
- region where that resource resides. For example, a custom `subnetwork`
- resource is tied to a specific region. Defaults to the region of the
- Provider if no value is given.
-
-* `scheduling` - (Optional) The scheduling strategy to use. More details about
- this configuration option are detailed below.
-
-* `service_account` - (Optional) Service account to attach to the instance. Structure is documented below.
-
-* `tags` - (Optional) Tags to attach to the instance.
-
-The `disk` block supports:
-
-* `auto_delete` - (Optional) Whether or not the disk should be auto-deleted.
- This defaults to true.
-
-* `boot` - (Optional) Indicates that this is a boot disk.
-
-* `device_name` - (Optional) A unique device name that is reflected into the
- /dev/ tree of a Linux operating system running within the instance. If not
- specified, the server chooses a default device name to apply to this disk.
-
-* `disk_name` - (Optional) Name of the disk. When not provided, this defaults
- to the name of the instance.
-
-* `source_image` - (Required if source not set) The image from which to
- initialize this disk. This can be one of: the image's `self_link`,
- `projects/{project}/global/images/{image}`,
- `projects/{project}/global/images/family/{family}`, `global/images/{image}`,
- `global/images/family/{family}`, `family/{family}`, `{project}/{family}`,
- `{project}/{image}`, `{family}`, or `{image}`.
-
-* `interface` - (Optional) Specifies the disk interface to use for attaching
- this disk.
-
-* `mode` - (Optional) The mode in which to attach this disk, either READ_WRITE
- or READ_ONLY. If you are attaching or creating a boot disk, this must
- read-write mode.
-
-* `source` - (Required if source_image not set) The name of the disk (such as
- those managed by `google_compute_disk`) to attach.
-
-* `disk_type` - (Optional) The GCE disk type. Can be either `"pd-ssd"`,
- `"local-ssd"`, or `"pd-standard"`.
-
-* `disk_size_gb` - (Optional) The size of the image in gigabytes. If not
- specified, it will inherit the size of its base image.
-
-* `type` - (Optional) The type of GCE disk, can be either `"SCRATCH"` or
- `"PERSISTENT"`.
-
-The `network_interface` block supports:
-
-* `network` - (Optional) The name or self_link of the network to attach this interface to.
- Use `network` attribute for Legacy or Auto subnetted networks and
- `subnetwork` for custom subnetted networks.
-
-* `subnetwork` - (Optional) the name of the subnetwork to attach this interface
- to. The subnetwork must exist in the same `region` this instance will be
- created in. Either `network` or `subnetwork` must be provided.
-
-* `subnetwork_project` - (Optional) The project in which the subnetwork belongs.
- If it is not provided, the provider project is used.
-
-* `access_config` - (Optional) Access configurations, i.e. IPs via which this
- instance can be accessed via the Internet. Omit to ensure that the instance
- is not accessible from the Internet (this means that ssh provisioners will
- not work unless you are running Terraform can send traffic to the instance's
- network (e.g. via tunnel or because it is running on another cloud instance
- on that network). This block can be repeated multiple times. Structure documented below.
-
-The `access_config` block supports:
-
-* `nat_ip` - (Optional) The IP address that will be 1:1 mapped to the instance's
- network ip. If not given, one will be generated.
-
-The `service_account` block supports:
-
-* `email` - (Optional) The service account e-mail address. If not given, the
- default Google Compute Engine service account is used.
-
-* `scopes` - (Required) A list of service scopes. Both OAuth2 URLs and gcloud
- short names are supported.
-
-The `scheduling` block supports:
-
-* `automatic_restart` - (Optional) Specifies whether the instance should be
- automatically restarted if it is terminated by Compute Engine (not
- terminated by a user). This defaults to true.
-
-* `on_host_maintenance` - (Optional) Defines the maintenance behavior for this
- instance.
-
-* `preemptible` - (Optional) Allows instance to be preempted. This defaults to
- false. Read more on this
- [here](https://cloud.google.com/compute/docs/instances/preemptible).
-
-## Attributes Reference
-
-In addition to the arguments listed above, the following computed attributes are
-exported:
-
-* `metadata_fingerprint` - The unique fingerprint of the metadata.
-
-* `self_link` - The URI of the created resource.
-
-* `tags_fingerprint` - The unique fingerprint of the tags.
-
-[1]: /docs/providers/google/r/compute_instance_group_manager.html
-[2]: /docs/configuration/resources.html#lifecycle
diff --git a/website/source/docs/providers/google/r/compute_network.html.markdown b/website/source/docs/providers/google/r/compute_network.html.markdown
deleted file mode 100644
index a93136880..000000000
--- a/website/source/docs/providers/google/r/compute_network.html.markdown
+++ /dev/null
@@ -1,67 +0,0 @@
----
-layout: "google"
-page_title: "Google: google_compute_network"
-sidebar_current: "docs-google-compute-network"
-description: |-
- Manages a network within GCE.
----
-
-# google\_compute\_network
-
-Manages a network within GCE.
-
-## Example Usage
-
-```hcl
-resource "google_compute_network" "default" {
- name = "test"
- auto_create_subnetworks = "true"
-}
-```
-
-## Argument Reference
-
-The following arguments are supported:
-
-* `name` - (Required) A unique name for the resource, required by GCE.
- Changing this forces a new resource to be created.
-
-- - -
-
-* `auto_create_subnetworks` - (Optional) If set to true, this network will be
- created in auto subnet mode, and Google will create a subnet for each region
- automatically. If set to false, and `ipv4_range` is not set, a custom
- subnetted network will be created that can support
- `google_compute_subnetwork` resources. This attribute may not be used if
- `ipv4_range` is specified.
-
-* `description` - (Optional) A brief description of this resource.
-
-* `ipv4_range` - (DEPRECATED, Optional) The IPv4 address range that machines in this network
- are assigned to, represented as a CIDR block. If not set, an auto or custom
- subnetted network will be created, depending on the value of
- `auto_create_subnetworks` attribute. This attribute may not be used if
- `auto_create_subnetworks` is specified. This attribute is deprecated.
-
-* `project` - (Optional) The project in which the resource belongs. If it
- is not provided, the provider project is used.
-
-## Attributes Reference
-
-In addition to the arguments listed above, the following computed attributes are
-exported:
-
-* `gateway_ipv4` - The IPv4 address of the gateway.
-
-* `name` - The unique name of the network.
-
-* `self_link` - The URI of the created resource.
-
-
-## Import
-
-Networks can be imported using the `name`, e.g.
-
-```
-$ terraform import google_compute_network.public my_network_name
-```
\ No newline at end of file
diff --git a/website/source/docs/providers/google/r/compute_project_metadata.html.markdown b/website/source/docs/providers/google/r/compute_project_metadata.html.markdown
deleted file mode 100644
index 3b7dd19df..000000000
--- a/website/source/docs/providers/google/r/compute_project_metadata.html.markdown
+++ /dev/null
@@ -1,39 +0,0 @@
----
-layout: "google"
-page_title: "Google: google_compute_project_metadata"
-sidebar_current: "docs-google-compute-project-metadata"
-description: |-
- Manages common instance metadata
----
-
-# google\_compute\_project\_metadata
-
-Manages metadata common to all instances for a project in GCE.
-
-## Example Usage
-
-```hcl
-resource "google_compute_project_metadata" "default" {
- metadata {
- foo = "bar"
- fizz = "buzz"
- "13" = "42"
- }
-}
-```
-
-## Argument Reference
-
-The following arguments are supported:
-
-* `metadata` - (Required) A series of key value pairs. Changing this resource
- updates the GCE state.
-
-- - -
-
-* `project` - (Optional) The project in which the resource belongs. If it
- is not provided, the provider project is used.
-
-## Attributes Reference
-
-Only the arguments listed above are exposed as attributes.
diff --git a/website/source/docs/providers/google/r/compute_region_backend_service.html.markdown b/website/source/docs/providers/google/r/compute_region_backend_service.html.markdown
deleted file mode 100644
index a54ee2b7b..000000000
--- a/website/source/docs/providers/google/r/compute_region_backend_service.html.markdown
+++ /dev/null
@@ -1,132 +0,0 @@
----
-layout: "google"
-page_title: "Google: google_compute_region_backend_service"
-sidebar_current: "docs-google-compute-region-backend-service"
-description: |-
- Creates a Region Backend Service resource for Google Compute Engine.
----
-
-# google\_compute\_region\_backend\_service
-
-A Region Backend Service defines a regionally-scoped group of virtual machines that will serve traffic for load balancing.
-For more information see [the official documentation](https://cloud.google.com/compute/docs/load-balancing/internal/)
-and [API](https://cloud.google.com/compute/docs/reference/latest/backendServices).
-
-## Example Usage
-
-```tf
-resource "google_compute_region_backend_service" "foobar" {
- name = "blablah"
- description = "Hello World 1234"
- protocol = "TCP"
- timeout_sec = 10
- session_affinity = "CLIENT_IP"
-
- backend {
- group = "${google_compute_instance_group_manager.foo.instance_group}"
- }
-
- health_checks = ["${google_compute_health_check.default.self_link}"]
-}
-
-resource "google_compute_instance_group_manager" "foo" {
- name = "terraform-test"
- instance_template = "${google_compute_instance_template.foobar.self_link}"
- base_instance_name = "foobar"
- zone = "us-central1-f"
- target_size = 1
-}
-
-resource "google_compute_instance_template" "foobar" {
- name = "terraform-test"
- machine_type = "n1-standard-1"
-
- network_interface {
- network = "default"
- }
-
- disk {
- source_image = "debian-cloud/debian-8"
- auto_delete = true
- boot = true
- }
-}
-
-resource "google_compute_health_check" "default" {
- name = "test"
- check_interval_sec = 1
- timeout_sec = 1
-
- tcp_health_check {
- port = "80"
- }
-}
-```
-
-## Argument Reference
-
-The following arguments are supported:
-
-* `name` - (Required) The name of the backend service.
-
-* `health_checks` - (Required) Specifies a list of health check objects
- for checking the health of the backend service.
-
-- - -
-
-* `backend` - (Optional) The list of backends that serve this BackendService.
- Structure is documented below.
-
-* `description` - (Optional) The textual description for the backend service.
-
-* `project` - (Optional) The project in which the resource belongs. If it
- is not provided, the provider project is used.
-
-* `protocol` - (Optional) The protocol for incoming requests. Defaults to
- `HTTP`.
-
-* `session_affinity` - (Optional) How to distribute load. Options are `NONE` (no
- affinity), `CLIENT_IP`, `CLIENT_IP_PROTO`, or `CLIENT_IP_PORT_PROTO`.
- Defaults to `NONE`.
-
-* `region` - (Optional) The Region in which the created address should reside.
- If it is not provided, the provider region is used.
-
-* `timeout_sec` - (Optional) The number of secs to wait for a backend to respond
- to a request before considering the request failed. Defaults to `30`.
-
-
-The `backend` block supports:
-
-* `group` - (Required) The name or URI of a Compute Engine instance group
- (`google_compute_instance_group_manager.xyz.instance_group`) that can
- receive traffic. Instance groups must contain at least one instance.
-
-* `balancing_mode` - (Optional) Defines the strategy for balancing load.
- Defaults to `UTILIZATION`
-
-* `capacity_scaler` - (Optional) A float in the range [0, 1.0] that scales the
- maximum parameters for the group (e.g., max rate). A value of 0.0 will cause
- no requests to be sent to the group (i.e., it adds the group in a drained
- state). The default is 1.0.
-
-* `description` - (Optional) Textual description for the backend.
-
-* `max_rate` - (Optional) Maximum requests per second (RPS) that the group can
- handle.
-
-* `max_rate_per_instance` - (Optional) The maximum per-instance requests per
- second (RPS).
-
-* `max_utilization` - (Optional) The target CPU utilization for the group as a
- float in the range [0.0, 1.0]. This flag can only be provided when the
- balancing mode is `UTILIZATION`. Defaults to `0.8`.
-
-## Attributes Reference
-
-In addition to the arguments listed above, the following computed attributes are
-exported:
-
-* `fingerprint` - The fingerprint of the backend service.
-
-* `self_link` - The URI of the created resource.
diff --git a/website/source/docs/providers/google/r/compute_route.html.markdown b/website/source/docs/providers/google/r/compute_route.html.markdown
deleted file mode 100644
index c44566209..000000000
--- a/website/source/docs/providers/google/r/compute_route.html.markdown
+++ /dev/null
@@ -1,74 +0,0 @@
----
-layout: "google"
-page_title: "Google: google_compute_route"
-sidebar_current: "docs-google-compute-route"
-description: |-
- Manages a network route within GCE.
----
-
-# google\_compute\_route
-
-Manages a network route within GCE.
-
-## Example Usage
-
-```hcl
-resource "google_compute_network" "foobar" {
- name = "test"
- ipv4_range = "10.0.0.0/16"
-}
-
-resource "google_compute_route" "foobar" {
- name = "test"
- dest_range = "15.0.0.0/24"
- network = "${google_compute_network.foobar.name}"
- next_hop_ip = "10.0.1.5"
- priority = 100
-}
-```
-
-## Argument Reference
-
-The following arguments are supported:
-
-* `dest_range` - (Required) The destination IPv4 address range that this
- route applies to.
-
-* `name` - (Required) A unique name for the resource, required by GCE.
- Changing this forces a new resource to be created.
-
-* `network` - (Required) The name or self_link of the network to attach this route to.
-
-* `priority` - (Required) The priority of this route, used to break ties.
-
-- - -
-
-* `next_hop_gateway` - (Optional) The URL of the internet gateway to route
- to if this route is matched. The alias "default-internet-gateway" can also
- be used.
-
-* `next_hop_instance` - (Optional) The name of the VM instance to route to
- if this route is matched.
-
-* `next_hop_instance_zone` - (Required when `next_hop_instance` is specified)
- The zone of the instance specified in `next_hop_instance`.
-
-* `next_hop_ip` - (Optional) The IP address of the next hop if this route
- is matched.
-
-* `next_hop_vpn_tunnel` - (Optional) The name of the VPN to route to if this
- route is matched.
-
-* `project` - (Optional) The project in which the resource belongs. If it
- is not provided, the provider project is used.
-
-* `tags` - (Optional) The tags that this route applies to.
-
-## Attributes Reference
-
-In addition to the arguments listed above, the following computed attributes are
-exported:
-
-* `next_hop_network` - The name of the next hop network, if available.
-
-* `self_link` - The URI of the created resource.
diff --git a/website/source/docs/providers/google/r/compute_router.html.markdown b/website/source/docs/providers/google/r/compute_router.html.markdown
deleted file mode 100644
index 73e915656..000000000
--- a/website/source/docs/providers/google/r/compute_router.html.markdown
+++ /dev/null
@@ -1,151 +0,0 @@
----
-layout: "google"
-page_title: "Google: google_compute_router"
-sidebar_current: "docs-google-compute-router"
-description: |-
- Manages a Cloud Router resource.
----
-
-# google\_compute\_router
-
-Manages a Cloud Router resource. For more info, read the
-[documentation](https://cloud.google.com/compute/docs/cloudrouter).
-
-## Example Usage
-
-```hcl
-resource "google_compute_network" "foobar" {
- name = "network-1"
-}
-
-resource "google_compute_subnetwork" "foobar" {
- name = "subnet-1"
- network = "${google_compute_network.foobar.self_link}"
- ip_cidr_range = "10.0.0.0/16"
- region = "us-central1"
-}
-
-resource "google_compute_address" "foobar" {
- name = "vpn-gateway-1-address"
- region = "${google_compute_subnetwork.foobar.region}"
-}
-
-resource "google_compute_vpn_gateway" "foobar" {
- name = "vpn-gateway-1"
- network = "${google_compute_network.foobar.self_link}"
- region = "${google_compute_subnetwork.foobar.region}"
-}
-
-resource "google_compute_forwarding_rule" "foobar_esp" {
- name = "vpn-gw-1-esp"
- region = "${google_compute_vpn_gateway.foobar.region}"
- ip_protocol = "ESP"
- ip_address = "${google_compute_address.foobar.address}"
- target = "${google_compute_vpn_gateway.foobar.self_link}"
-}
-
-resource "google_compute_forwarding_rule" "foobar_udp500" {
- name = "vpn-gw-1-udp-500"
- region = "${google_compute_forwarding_rule.foobar_esp.region}"
- ip_protocol = "UDP"
- port_range = "500-500"
- ip_address = "${google_compute_address.foobar.address}"
- target = "${google_compute_vpn_gateway.foobar.self_link}"
-}
-
-resource "google_compute_forwarding_rule" "foobar_udp4500" {
- name = "vpn-gw-1-udp-4500"
- region = "${google_compute_forwarding_rule.foobar_udp500.region}"
- ip_protocol = "UDP"
- port_range = "4500-4500"
- ip_address = "${google_compute_address.foobar.address}"
- target = "${google_compute_vpn_gateway.foobar.self_link}"
-}
-
-resource "google_compute_router" "foobar" {
- name = "router-1"
- region = "${google_compute_forwarding_rule.foobar_udp500.region}"
- network = "${google_compute_network.foobar.self_link}"
-
- bgp {
- asn = 64512
- }
-}
-
-resource "google_compute_vpn_tunnel" "foobar" {
- name = "vpn-tunnel-1"
- region = "${google_compute_forwarding_rule.foobar_udp4500.region}"
- target_vpn_gateway = "${google_compute_vpn_gateway.foobar.self_link}"
- shared_secret = "unguessable"
- peer_ip = "8.8.8.8"
- router = "${google_compute_router.foobar.name}"
-}
-
-resource "google_compute_router_interface" "foobar" {
- name = "interface-1"
- router = "${google_compute_router.foobar.name}"
- region = "${google_compute_router.foobar.region}"
- ip_range = "169.254.1.1/30"
- vpn_tunnel = "${google_compute_vpn_tunnel.foobar.name}"
-}
-
-resource "google_compute_router_peer" "foobar" {
- name = "peer-1"
- router = "${google_compute_router.foobar.name}"
- region = "${google_compute_router.foobar.region}"
- peer_ip_address = "169.254.1.2"
- peer_asn = 65513
- advertised_route_priority = 100
- interface = "${google_compute_router_interface.foobar.name}"
-}
-```
-
-## Argument Reference
-
-The following arguments are supported:
-
-* `name` - (Required) A unique name for the router, required by GCE. Changing
- this forces a new router to be created.
-
-* `network` - (Required) The name or resource link to the network this Cloud Router
- will use to learn and announce routes. Changing this forces a new router to be created.
-
-* `bgp` - (Required) BGP information specific to this router.
- Changing this forces a new router to be created.
- Structure is documented below.
-
-- - -
-
-* `description` - (Optional) A description of the resource.
- Changing this forces a new router to be created.
-
-* `project` - (Optional) The project in which the resource belongs. If it
- is not provided, the provider project is used.
- Changing this forces a new router to be created.
-
-* `region` - (Optional) The region this router should sit in. If not specified,
- the project region will be used. Changing this forces a new router to be
- created.
-
-- - -
-
-The `bgp` block supports:
-
-* `asn` - (Required) Local BGP Autonomous System Number (ASN). Must be an
- RFC6996 private ASN.
-
-## Attributes Reference
-
-In addition to the arguments listed above, the following computed attributes are
-exported:
-
-* `self_link` - The URI of the created resource.
-
-## Import
-
-Routers can be imported using the `region` and `name`, e.g.
-
-```
-$ terraform import google_compute_router.router-1 us-central1/router-1
-```
-
diff --git a/website/source/docs/providers/google/r/compute_router_interface.html.markdown b/website/source/docs/providers/google/r/compute_router_interface.html.markdown
deleted file mode 100644
index 5c3a17b55..000000000
--- a/website/source/docs/providers/google/r/compute_router_interface.html.markdown
+++ /dev/null
@@ -1,62 +0,0 @@
----
-layout: "google"
-page_title: "Google: google_compute_router_interface"
-sidebar_current: "docs-google-compute-router-interface"
-description: |-
- Manages a Cloud Router interface.
----
-
-# google\_compute\_router_interface
-
-Manages a Cloud Router interface. For more info, read the
-[documentation](https://cloud.google.com/compute/docs/cloudrouter).
-
-## Example Usage
-
-```hcl
-resource "google_compute_router_interface" "foobar" {
- name = "interface-1"
- router = "router-1"
- region = "us-central1"
- ip_range = "169.254.1.1/30"
- vpn_tunnel = "tunnel-1"
-}
-```
-
-## Argument Reference
-
-The following arguments are supported:
-
-* `name` - (Required) A unique name for the interface, required by GCE. Changing
- this forces a new interface to be created.
-
-* `router` - (Required) The name of the router this interface will be attached to.
- Changing this forces a new interface to be created.
-
-* `vpn_tunnel` - (Required) The name or resource link to the VPN tunnel this
- interface will be linked to. Changing this forces a new interface to be created.
-
-- - -
-
-* `ip_range` - (Optional) IP address and range of the interface. The IP range must be
- in the RFC3927 link-local IP space. Changing this forces a new interface to be created.
-
-* `project` - (Optional) The project in which this interface's router belongs. If it
- is not provided, the provider project is used. Changing this forces a new interface to be created.
-
-* `region` - (Optional) The region this interface's router sits in. If not specified,
- the project region will be used. Changing this forces a new interface to be
- created.
-
-## Attributes Reference
-
-Only the arguments listed above are exposed as attributes.
-
-## Import
-
-Router interfaces can be imported using the `region`, `router` and `name`, e.g.
-
-```
-$ terraform import google_compute_router_interface.interface-1 us-central1/router-1/interface-1
-```
-
diff --git a/website/source/docs/providers/google/r/compute_router_peer.html.markdown b/website/source/docs/providers/google/r/compute_router_peer.html.markdown
deleted file mode 100644
index d5305be4e..000000000
--- a/website/source/docs/providers/google/r/compute_router_peer.html.markdown
+++ /dev/null
@@ -1,72 +0,0 @@
----
-layout: "google"
-page_title: "Google: google_compute_router_peer"
-sidebar_current: "docs-google-compute-router-peer"
-description: |-
- Manages a Cloud Router BGP peer.
----
-
-# google\_compute\_router
-
-Manages a Cloud Router BGP peer. For more info, read the
-[documentation](https://cloud.google.com/compute/docs/cloudrouter).
-
-## Example Usage
-
-```hcl
-resource "google_compute_router_peer" "foobar" {
- name = "peer-1"
- router = "router-1"
- region = "us-central1"
- peer_ip_address = "169.254.1.2"
- peer_asn = 65513
- advertised_route_priority = 100
- interface = "interface-1"
-}
-```
-
-## Argument Reference
-
-The following arguments are supported:
-
-* `name` - (Required) A unique name for BGP peer, required by GCE. Changing
- this forces a new peer to be created.
-
-* `router` - (Required) The name of the router in which this BGP peer will be configured.
- Changing this forces a new peer to be created.
-
-* `interface` - (Required) The name of the interface the BGP peer is associated with.
- Changing this forces a new peer to be created.
-
-* `peer_ip_address` - (Required) IP address of the BGP interface outside Google Cloud.
- Changing this forces a new peer to be created.
-
-* `peer_asn` - (Required) Peer BGP Autonomous System Number (ASN).
- Changing this forces a new peer to be created.
-
-- - -
-
-* `advertised_route_priority` - (Optional) The priority of routes advertised to this BGP peer.
- Changing this forces a new peer to be created.
-
-* `project` - (Optional) The project in which this peer's router belongs. If it
- is not provided, the provider project is used. Changing this forces a new peer to be created.
-
-* `region` - (Optional) The region this peer's router sits in. If not specified,
- the project region will be used. Changing this forces a new peer to be
- created.
-
-## Attributes Reference
-
-In addition to the arguments listed above, the following computed attributes are
-exported:
-
-* `ip_address` - IP address of the interface inside Google Cloud Platform.
-
-## Import
-
-Router BGP peers can be imported using the `region`, `router` and `name`, e.g.
-
-```
-$ terraform import google_compute_router_peer.peer-1 us-central1/router-1/peer-1
-```
diff --git a/website/source/docs/providers/google/r/compute_snapshot.html.markdown b/website/source/docs/providers/google/r/compute_snapshot.html.markdown
deleted file mode 100644
index cdeb4fea9..000000000
--- a/website/source/docs/providers/google/r/compute_snapshot.html.markdown
+++ /dev/null
@@ -1,66 +0,0 @@
----
-layout: "google"
-page_title: "Google: google_compute_snapshot"
-sidebar_current: "docs-google-compute-snapshot"
-description: |-
- Creates a new snapshot of a disk within GCE.
----
-
-# google\_compute\_snapshot
-
-Creates a new snapshot of a disk within GCE.
-
-## Example Usage
-
-```js
-resource "google_compute_snapshot" "default" {
- name = "test-snapshot"
- source_disk = "test-disk"
- zone = "us-central1-a"
-}
-```
-
-## Argument Reference
-
-The following arguments are supported:
-
-* `name` - (Required) A unique name for the resource, required by GCE.
- Changing this forces a new resource to be created.
-
-* `zone` - (Required) The zone where the source disk is located.
-
-* `source_disk` - (Required) The disk which will be used as the source of the snapshot.
-
-- - -
-
-* `source_disk_encryption_key_raw` - (Optional) A 256-bit [customer-supplied encryption key]
- (https://cloud.google.com/compute/docs/disks/customer-supplied-encryption),
- encoded in [RFC 4648 base64](https://tools.ietf.org/html/rfc4648#section-4)
- to decrypt the source disk.
-
-* `snapshot_encryption_key_raw` - (Optional) A 256-bit [customer-supplied encryption key]
- (https://cloud.google.com/compute/docs/disks/customer-supplied-encryption),
- encoded in [RFC 4648 base64](https://tools.ietf.org/html/rfc4648#section-4)
- to encrypt this snapshot.
-
-* `project` - (Optional) The project in which the resource belongs. If it
- is not provided, the provider project is used.
-
-## Attributes Reference
-
-In addition to the arguments listed above, the following computed attributes are
-exported:
-
-* `snapshot_encryption_key_sha256` - The [RFC 4648 base64]
- (https://tools.ietf.org/html/rfc4648#section-4) encoded SHA-256 hash of the
- [customer-supplied encryption key](https://cloud.google.com/compute/docs/disks/customer-supplied-encryption)
- that protects this resource.
-
-* `source_disk_encryption_key_sha256` - The [RFC 4648 base64]
- (https://tools.ietf.org/html/rfc4648#section-4) encoded SHA-256 hash of the
- [customer-supplied encryption key](https://cloud.google.com/compute/docs/disks/customer-supplied-encryption)
- that protects the source disk.
-
-* `source_disk_link` - The URI of the source disk.
-
-* `self_link` - The URI of the created resource.
diff --git a/website/source/docs/providers/google/r/compute_ssl_certificate.html.markdown b/website/source/docs/providers/google/r/compute_ssl_certificate.html.markdown
deleted file mode 100644
index 48d94742a..000000000
--- a/website/source/docs/providers/google/r/compute_ssl_certificate.html.markdown
+++ /dev/null
@@ -1,60 +0,0 @@
----
-layout: "google"
-page_title: "Google: google_compute_ssl_certificate"
-sidebar_current: "docs-google-compute-ssl-certificate"
-description: |-
- Creates an SSL certificate resource necessary for HTTPS load balancing in GCE.
----
-
-# google\_compute\_ssl\_certificate
-
-Creates an SSL certificate resource necessary for HTTPS load balancing in GCE.
-For more information see
-[the official documentation](https://cloud.google.com/compute/docs/load-balancing/http/ssl-certificates) and
-[API](https://cloud.google.com/compute/docs/reference/latest/sslCertificates).
-
-
-## Example Usage
-
-```hcl
-resource "google_compute_ssl_certificate" "default" {
- name_prefix = "my-certificate-"
- description = "a description"
- private_key = "${file("path/to/private.key")}"
- certificate = "${file("path/to/certificate.crt")}"
-}
-```
-
-## Argument Reference
-
-The following arguments are supported:
-
-* `certificate` - (Required) A local certificate file in PEM format. The chain
- may be at most 5 certs long, and must include at least one intermediate
- cert. Changing this forces a new resource to be created.
-
-* `private_key` - (Required) Write only private key in PEM format.
- Changing this forces a new resource to be created.
-
-- - -
-
-* `name` - (Optional) A unique name for the SSL certificate. If you leave
- this blank, Terraform will auto-generate a unique name.
-
-* `name_prefix` - (Optional) Creates a unique name beginning with the specified
- prefix. Conflicts with `name`.
-
-* `description` - (Optional) An optional description of this resource.
- Changing this forces a new resource to be created.
-
-* `project` - (Optional) The project in which the resource belongs. If it
- is not provided, the provider project is used.
-
-## Attributes Reference
-
-In addition to the arguments listed above, the following computed attributes are
-exported:
-
-* `id` - A unique ID for the certificated, assigned by GCE.
-
-* `self_link` - The URI of the created resource.
diff --git a/website/source/docs/providers/google/r/compute_subnetwork.html.markdown b/website/source/docs/providers/google/r/compute_subnetwork.html.markdown
deleted file mode 100644
index 80f9b14b8..000000000
--- a/website/source/docs/providers/google/r/compute_subnetwork.html.markdown
+++ /dev/null
@@ -1,62 +0,0 @@
----
-layout: "google"
-page_title: "Google: google_compute_subnetwork"
-sidebar_current: "docs-google-compute-subnetwork"
-description: |-
- Manages a subnetwork within GCE.
----
-
-# google\_compute\_subnetwork
-
-Manages a subnetwork within GCE. For more information see
-[the official documentation](https://cloud.google.com/compute/docs/vpc/#vpc_networks_and_subnets)
-and
-[API](https://cloud.google.com/compute/docs/reference/latest/subnetworks).
-
-## Example Usage
-
-```hcl
-resource "google_compute_subnetwork" "default-us-east1" {
- name = "default-us-east1"
- ip_cidr_range = "10.0.0.0/16"
- network = "${google_compute_network.default.self_link}"
- region = "us-east1"
-}
-```
-
-## Argument Reference
-
-The following arguments are supported:
-
-* `ip_cidr_range` - (Required) The IP address range that machines in this
- network are assigned to, represented as a CIDR block.
-
-* `name` - (Required) A unique name for the resource, required by GCE.
- Changing this forces a new resource to be created.
-
-* `network` - (Required) The network name or resource link to the parent
- network of this subnetwork. The parent network must have been created
- in custom subnet mode.
-
-- - -
-
-* `description` - (Optional) Description of this subnetwork.
-
-* `project` - (Optional) The project in which the resource belongs. If it
- is not provided, the provider project is used.
-
-* `region` - (Optional) The region this subnetwork will be created in. If
- unspecified, this defaults to the region configured in the provider.
-
-* `private_ip_google_access` - (Optional) Whether the VMs in this subnet
- can access Google services without assigned external IP
- addresses.
-
-## Attributes Reference
-
-In addition to the arguments listed above, the following computed attributes are
-exported:
-
-* `gateway_address` - The IP address of the gateway.
-
-* `self_link` - The URI of the created resource.
diff --git a/website/source/docs/providers/google/r/compute_target_http_proxy.html.markdown b/website/source/docs/providers/google/r/compute_target_http_proxy.html.markdown
deleted file mode 100644
index dc23841bf..000000000
--- a/website/source/docs/providers/google/r/compute_target_http_proxy.html.markdown
+++ /dev/null
@@ -1,88 +0,0 @@
----
-layout: "google"
-page_title: "Google: google_compute_target_http_proxy"
-sidebar_current: "docs-google-compute-target-http-proxy"
-description: |-
- Creates a Target HTTP Proxy resource in GCE.
----
-
-# google\_compute\_target\_http\_proxy
-
-Creates a target HTTP proxy resource in GCE. For more information see
-[the official
-documentation](https://cloud.google.com/compute/docs/load-balancing/http/target-proxies) and
-[API](https://cloud.google.com/compute/docs/reference/latest/targetHttpProxies).
-
-
-## Example Usage
-
-```hcl
-resource "google_compute_target_http_proxy" "default" {
- name = "test-proxy"
- description = "a description"
- url_map = "${google_compute_url_map.default.self_link}"
-}
-
-resource "google_compute_url_map" "default" {
- name = "url-map"
- description = "a description"
-
- default_service = "${google_compute_backend_service.default.self_link}"
-
- host_rule {
- hosts = ["mysite.com"]
- path_matcher = "allpaths"
- }
-
- path_matcher {
- name = "allpaths"
- default_service = "${google_compute_backend_service.default.self_link}"
-
- path_rule {
- paths = ["/*"]
- service = "${google_compute_backend_service.default.self_link}"
- }
- }
-}
-
-resource "google_compute_backend_service" "default" {
- name = "default-backend"
- port_name = "http"
- protocol = "HTTP"
- timeout_sec = 10
-
- health_checks = ["${google_compute_http_health_check.default.self_link}"]
-}
-
-resource "google_compute_http_health_check" "default" {
- name = "test"
- request_path = "/"
- check_interval_sec = 1
- timeout_sec = 1
-}
-```
-
-## Argument Reference
-
-The following arguments are supported:
-
-* `name` - (Required) A unique name for the resource, required by GCE. Changing
- this forces a new resource to be created.
-
-* `url_map` - (Required) The URL of a URL Map resource that defines the mapping
- from the URL to the BackendService.
-
-- - -
-
-* `description` - (Optional) A description of this resource. Changing this
- forces a new resource to be created.
-
-
-## Attributes Reference
-
-In addition to the arguments listed above, the following computed attributes are
-exported:
-
-* `id` - A unique ID assigned by GCE.
-
-* `self_link` - The URI of the created resource.
diff --git a/website/source/docs/providers/google/r/compute_target_https_proxy.html.markdown b/website/source/docs/providers/google/r/compute_target_https_proxy.html.markdown
deleted file mode 100644
index c8c2e398e..000000000
--- a/website/source/docs/providers/google/r/compute_target_https_proxy.html.markdown
+++ /dev/null
@@ -1,102 +0,0 @@
----
-layout: "google"
-page_title: "Google: google_compute_target_https_proxy"
-sidebar_current: "docs-google-compute-target-https-proxy"
-description: |-
- Creates a Target HTTPS Proxy resource in GCE.
----
-
-# google\_compute\_target\_https\_proxy
-
-Creates a target HTTPS proxy resource in GCE. For more information see
-[the official
-documentation](https://cloud.google.com/compute/docs/load-balancing/http/target-proxies) and
-[API](https://cloud.google.com/compute/docs/reference/latest/targetHttpsProxies).
-
-
-## Example Usage
-
-```hcl
-resource "google_compute_target_https_proxy" "default" {
- name = "test-proxy"
- description = "a description"
- url_map = "${google_compute_url_map.default.self_link}"
- ssl_certificates = ["${google_compute_ssl_certificate.default.self_link}"]
-}
-
-resource "google_compute_ssl_certificate" "default" {
- name = "my-certificate"
- description = "a description"
- private_key = "${file("path/to/private.key")}"
- certificate = "${file("path/to/certificate.crt")}"
-}
-
-resource "google_compute_url_map" "default" {
- name = "url-map"
- description = "a description"
-
- default_service = "${google_compute_backend_service.default.self_link}"
-
- host_rule {
- hosts = ["mysite.com"]
- path_matcher = "allpaths"
- }
-
- path_matcher {
- name = "allpaths"
- default_service = "${google_compute_backend_service.default.self_link}"
-
- path_rule {
- paths = ["/*"]
- service = "${google_compute_backend_service.default.self_link}"
- }
- }
-}
-
-resource "google_compute_backend_service" "default" {
- name = "default-backend"
- port_name = "http"
- protocol = "HTTP"
- timeout_sec = 10
-
- health_checks = ["${google_compute_http_health_check.default.self_link}"]
-}
-
-resource "google_compute_http_health_check" "default" {
- name = "test"
- request_path = "/"
- check_interval_sec = 1
- timeout_sec = 1
-}
-```
-
-## Argument Reference
-
-The following arguments are supported:
-
-* `name` - (Required) A unique name for the resource, required by GCE. Changing
- this forces a new resource to be created.
-
-* `ssl_certificates` - (Required) The URLs of the SSL Certificate resources that
- authenticate connections between users and load balancing. Currently exactly
- one must be specified.
-
-* `url_map` - (Required) The URL of a URL Map resource that defines the mapping
- from the URL to the BackendService.
-
-- - -
-
-* `description` - (Optional) A description of this resource. Changing this
- forces a new resource to be created.
-
-* `project` - (Optional) The project in which the resource belongs. If it
- is not provided, the provider project is used.
-
-## Attributes Reference
-
-In addition to the arguments listed above, the following computed attributes are
-exported:
-
-* `id` - A unique ID assigned by GCE.
-
-* `self_link` - The URI of the created resource.
diff --git a/website/source/docs/providers/google/r/compute_target_pool.html.markdown b/website/source/docs/providers/google/r/compute_target_pool.html.markdown
deleted file mode 100644
index 72a3ce153..000000000
--- a/website/source/docs/providers/google/r/compute_target_pool.html.markdown
+++ /dev/null
@@ -1,75 +0,0 @@
----
-layout: "google"
-page_title: "Google: google_compute_target_pool"
-sidebar_current: "docs-google-compute-target-pool"
-description: |-
- Manages a Target Pool within GCE.
----
-
-# google\_compute\_target\_pool
-
-Manages a Target Pool within GCE. This is a collection of instances used as
-target of a network load balancer (Forwarding Rule). For more information see
-[the official
-documentation](https://cloud.google.com/compute/docs/load-balancing/network/target-pools)
-and [API](https://cloud.google.com/compute/docs/reference/latest/targetPools).
-
-
-## Example Usage
-
-```hcl
-resource "google_compute_target_pool" "default" {
- name = "test"
-
- instances = [
- "us-central1-a/myinstance1",
- "us-central1-b/myinstance2",
- ]
-
- health_checks = [
- "${google_compute_http_health_check.default.name}",
- ]
-}
-```
-
-## Argument Reference
-
-The following arguments are supported:
-
-* `name` - (Required) A unique name for the resource, required by GCE. Changing
- this forces a new resource to be created.
-
-- - -
-
-* `backup_pool` - (Optional) URL to the backup target pool. Must also set
- failover\_ratio.
-
-* `description` - (Optional) Textual description field.
-
-* `failover_ratio` - (Optional) Ratio (0 to 1) of failed nodes before using the
- backup pool (which must also be set).
-
-* `health_checks` - (Optional) List of zero or one healthcheck names.
-
-* `instances` - (Optional) List of instances in the pool. They can be given as
- URLs, or in the form of "zone/name". Note that the instances need not exist
- at the time of target pool creation, so there is no need to use the
- Terraform interpolators to create a dependency on the instances from the
- target pool.
-
-* `project` - (Optional) The project in which the resource belongs. If it
- is not provided, the provider project is used.
-
-* `region` - (Optional) Where the target pool resides. Defaults to project
- region.
-
-* `session_affinity` - (Optional) How to distribute load. Options are "NONE" (no
- affinity). "CLIENT\_IP" (hash of the source/dest addresses / ports), and
- "CLIENT\_IP\_PROTO" also includes the protocol (default "NONE").
-
-## Attributes Reference
-
-In addition to the arguments listed above, the following computed attributes are
-exported:
-
-* `self_link` - The URI of the created resource.
diff --git a/website/source/docs/providers/google/r/compute_url_map.html.markdown b/website/source/docs/providers/google/r/compute_url_map.html.markdown
deleted file mode 100644
index f28e83816..000000000
--- a/website/source/docs/providers/google/r/compute_url_map.html.markdown
+++ /dev/null
@@ -1,173 +0,0 @@
----
-layout: "google"
-page_title: "Google: google_compute_url_map"
-sidebar_current: "docs-google-compute-url-map"
-description: |-
- Manages a URL Map resource in GCE.
----
-
-# google\_compute\_url\_map
-
-Manages a URL Map resource within GCE. For more information see
-[the official documentation](https://cloud.google.com/compute/docs/load-balancing/http/url-map)
-and
-[API](https://cloud.google.com/compute/docs/reference/latest/urlMaps).
-
-
-## Example Usage
-
-```hcl
-resource "google_compute_url_map" "foobar" {
- name = "urlmap"
- description = "a description"
-
- default_service = "${google_compute_backend_service.home.self_link}"
-
- host_rule {
- hosts = ["mysite.com"]
- path_matcher = "allpaths"
- }
-
- path_matcher {
- name = "allpaths"
- default_service = "${google_compute_backend_service.home.self_link}"
-
- path_rule {
- paths = ["/home"]
- service = "${google_compute_backend_service.home.self_link}"
- }
-
- path_rule {
- paths = ["/login"]
- service = "${google_compute_backend_service.login.self_link}"
- }
-
- path_rule {
- paths = ["/static"]
- service = "${google_compute_backend_bucket.static.self_link}"
- }
- }
-
- test {
- service = "${google_compute_backend_service.home.self_link}"
- host = "hi.com"
- path = "/home"
- }
-}
-
-resource "google_compute_backend_service" "login" {
- name = "login-backend"
- port_name = "http"
- protocol = "HTTP"
- timeout_sec = 10
-
- health_checks = ["${google_compute_http_health_check.default.self_link}"]
-}
-
-resource "google_compute_backend_service" "home" {
- name = "home-backend"
- port_name = "http"
- protocol = "HTTP"
- timeout_sec = 10
-
- health_checks = ["${google_compute_http_health_check.default.self_link}"]
-}
-
-resource "google_compute_http_health_check" "default" {
- name = "test"
- request_path = "/"
- check_interval_sec = 1
- timeout_sec = 1
-}
-
-resource "google_compute_backend_bucket" "static" {
- name = "static-asset-backend-bucket"
- bucket_name = "${google_storage_bucket.static.name}"
- enable_cdn = true
-}
-
-resource "google_storage_bucket" "static" {
- name = "static-asset-bucket"
- location = "US"
-}
-```
-
-## Argument Reference
-
-The following arguments are supported:
-
-* `default_service` - (Required) The URL of the backend service or backend bucket to use when none
- of the given rules match. See the documentation for formatting the service/bucket
- URL
- [here](https://cloud.google.com/compute/docs/reference/latest/urlMaps#defaultService)
-
-* `name` - (Required) A unique name for the resource, required by GCE.
- Changing this forces a new resource to be created.
-
-- - -
-
-* `description` - (Optional) A brief description of this resource.
-
-* `host_rule` - (Optional) A list of host rules. See below for configuration
- options.
-
-* `path_matcher` - (Optional) A list of paths to match. See below for
- configuration options.
-
-* `project` - (Optional) The project in which the resource belongs. If it
- is not provided, the provider project is used.
-
-* `test` - (Optional) The test to perform. See below for configuration options.
-
-The `host_rule` block supports: (This block can be defined multiple times).
-
-* `hosts` (Required) - A list of hosts to match against. See the documentation
- for formatting each host
- [here](https://cloud.google.com/compute/docs/reference/latest/urlMaps#hostRules.hosts)
-
-* `description` - (Optional) An optional description of the host rule.
-
-* `path_matcher` - (Required) The name of the `path_matcher` (defined below)
- to apply this host rule to.
-
-The `path_matcher` block supports: (This block can be defined multiple times)
-
-* `default_service` - (Required) The URL for the backend service or backend bucket to use if none
- of the given paths match. See the documentation for formatting the service/bucket
- URL [here](https://cloud.google.com/compute/docs/reference/latest/urlMaps#pathMatcher.defaultService)
-
-* `name` - (Required) The name of the `path_matcher` resource. Used by the
- `host_rule` block above.
-
-* `description` - (Optional) An optional description of the host rule.
-
-The `path_matcher.path_rule` sub-block supports: (This block can be defined
-multiple times)
-
-* `paths` - (Required) The list of paths to match against. See the
- documentation for formatting these [here](https://cloud.google.com/compute/docs/reference/latest/urlMaps#pathMatchers.pathRules.paths)
-
-* `service` - (Required) The URL for the backend service or backend bucket to use if any
- of the given paths match. See the documentation for formatting the service/bucket
- URL [here](https://cloud.google.com/compute/docs/reference/latest/urlMaps#pathMatcher.defaultService)
-
-The optional `test` block supports: (This block can be defined multiple times)
-
-* `service` - (Required) The backend service or backend bucket that should be matched by this test.
-
-* `host` - (Required) The host component of the URL being tested.
-
-* `path` - (Required) The path component of the URL being tested.
-
-* `description` - (Optional) An optional description of this test.
-
-## Attributes Reference
-
-In addition to the arguments listed above, the following computed attributes are
-exported:
-
-* `fingerprint` - The unique fingerprint for this resource.
-
-* `id` - The GCE assigned ID of the resource.
-
-* `self_link` - The URI of the created resource.
diff --git a/website/source/docs/providers/google/r/compute_vpn_gateway.html.markdown b/website/source/docs/providers/google/r/compute_vpn_gateway.html.markdown
deleted file mode 100644
index c0f100a03..000000000
--- a/website/source/docs/providers/google/r/compute_vpn_gateway.html.markdown
+++ /dev/null
@@ -1,112 +0,0 @@
----
-layout: "google"
-page_title: "Google: google_compute_vpn_gateway"
-sidebar_current: "docs-google-compute-vpn-gateway"
-description: |-
- Manages a VPN Gateway in the GCE network
----
-
-# google\_compute\_vpn\_gateway
-
-Manages a VPN Gateway in the GCE network. For more info, read the
-[documentation](https://cloud.google.com/compute/docs/vpn).
-
-
-## Example Usage
-
-```hcl
-resource "google_compute_network" "network1" {
- name = "network1"
- ipv4_range = "10.120.0.0/16"
-}
-
-resource "google_compute_vpn_gateway" "target_gateway" {
- name = "vpn1"
- network = "${google_compute_network.network1.self_link}"
- region = "${var.region}"
-}
-
-resource "google_compute_address" "vpn_static_ip" {
- name = "vpn-static-ip"
- region = "${var.region}"
-}
-
-resource "google_compute_forwarding_rule" "fr_esp" {
- name = "fr-esp"
- region = "${var.region}"
- ip_protocol = "ESP"
- ip_address = "${google_compute_address.vpn_static_ip.address}"
- target = "${google_compute_vpn_gateway.target_gateway.self_link}"
-}
-
-resource "google_compute_forwarding_rule" "fr_udp500" {
- name = "fr-udp500"
- region = "${var.region}"
- ip_protocol = "UDP"
- port_range = "500"
- ip_address = "${google_compute_address.vpn_static_ip.address}"
- target = "${google_compute_vpn_gateway.target_gateway.self_link}"
-}
-
-resource "google_compute_forwarding_rule" "fr_udp4500" {
- name = "fr-udp4500"
- region = "${var.region}"
- ip_protocol = "UDP"
- port_range = "4500"
- ip_address = "${google_compute_address.vpn_static_ip.address}"
- target = "${google_compute_vpn_gateway.target_gateway.self_link}"
-}
-
-resource "google_compute_vpn_tunnel" "tunnel1" {
- name = "tunnel1"
- region = "${var.region}"
- peer_ip = "15.0.0.120"
- shared_secret = "a secret message"
-
- target_vpn_gateway = "${google_compute_vpn_gateway.target_gateway.self_link}"
-
- depends_on = [
- "google_compute_forwarding_rule.fr_esp",
- "google_compute_forwarding_rule.fr_udp500",
- "google_compute_forwarding_rule.fr_udp4500",
- ]
-}
-
-resource "google_compute_route" "route1" {
- name = "route1"
- network = "${google_compute_network.network1.name}"
- dest_range = "15.0.0.0/24"
- priority = 1000
-
- next_hop_vpn_tunnel = "${google_compute_vpn_tunnel.tunnel1.self_link}"
-}
-```
-
-## Argument Reference
-
-The following arguments are supported:
-
-* `name` - (Required) A unique name for the resource, required by GCE. Changing
- this forces a new resource to be created.
-
-* `network` - (Required) The name or resource link to the network this VPN gateway
- is accepting traffic for. Changing this forces a new resource to be created.
-
-- - -
-
-* `description` - (Optional) A description of the resource.
- Changing this forces a new resource to be created.
-
-* `project` - (Optional) The project in which the resource belongs. If it
- is not provided, the provider project is used.
-
-* `region` - (Optional) The region this gateway should sit in. If not specified,
- the project region will be used. Changing this forces a new resource to be
- created.
-
-## Attributes Reference
-
-In addition to the arguments listed above, the following computed attributes are
-exported:
-
-* `self_link` - The URI of the created resource.
diff --git a/website/source/docs/providers/google/r/compute_vpn_tunnel.html.markdown b/website/source/docs/providers/google/r/compute_vpn_tunnel.html.markdown
deleted file mode 100644
index 0fa53c89c..000000000
--- a/website/source/docs/providers/google/r/compute_vpn_tunnel.html.markdown
+++ /dev/null
@@ -1,141 +0,0 @@
----
-layout: "google"
-page_title: "Google: google_compute_vpn_tunnel"
-sidebar_current: "docs-google-compute-vpn-tunnel"
-description: |-
- Manages a VPN Tunnel to the GCE network
----
-
-# google\_compute\_vpn\_tunnel
-
-Manages a VPN Tunnel to the GCE network. For more info, read the
-[documentation](https://cloud.google.com/compute/docs/vpn).
-
-## Example Usage
-
-```hcl
-resource "google_compute_network" "network1" {
- name = "network1"
-}
-
-resource "google_compute_subnetwork" "subnet1" {
- name = "subnet1"
- network = "${google_compute_network.network1.self_link}"
- ip_cidr_range = "10.120.0.0/16"
- region = "us-central1"
-}
-
-resource "google_compute_vpn_gateway" "target_gateway" {
- name = "vpn1"
- network = "${google_compute_network.network1.self_link}"
- region = "${google_compute_subnetwork.subnet1.region}"
-}
-
-resource "google_compute_address" "vpn_static_ip" {
- name = "vpn-static-ip"
- region = "${google_compute_subnetwork.subnet1.region}"
-}
-
-resource "google_compute_forwarding_rule" "fr_esp" {
- name = "fr-esp"
- ip_protocol = "ESP"
- ip_address = "${google_compute_address.vpn_static_ip.address}"
- target = "${google_compute_vpn_gateway.target_gateway.self_link}"
-}
-
-resource "google_compute_forwarding_rule" "fr_udp500" {
- name = "fr-udp500"
- ip_protocol = "UDP"
- port_range = "500-500"
- ip_address = "${google_compute_address.vpn_static_ip.address}"
- target = "${google_compute_vpn_gateway.target_gateway.self_link}"
-}
-
-resource "google_compute_forwarding_rule" "fr_udp4500" {
- name = "fr-udp4500"
- ip_protocol = "UDP"
- port_range = "4500-4500"
- ip_address = "${google_compute_address.vpn_static_ip.address}"
- target = "${google_compute_vpn_gateway.target_gateway.self_link}"
-}
-
-resource "google_compute_vpn_tunnel" "tunnel1" {
- name = "tunnel1"
- peer_ip = "15.0.0.120"
- shared_secret = "a secret message"
-
- target_vpn_gateway = "${google_compute_vpn_gateway.target_gateway.self_link}"
-
- local_traffic_selector = ["${google_compute_subnetwork.subnet1.ip_cidr_range}"]
- remote_traffic_selector = ["172.16.0.0/12"]
-
- depends_on = [
- "google_compute_forwarding_rule.fr_esp",
- "google_compute_forwarding_rule.fr_udp500",
- "google_compute_forwarding_rule.fr_udp4500",
- ]
-}
-
-resource "google_compute_route" "route1" {
- name = "route1"
- network = "${google_compute_network.network1.name}"
- dest_range = "15.0.0.0/24"
- priority = 1000
-
- next_hop_vpn_tunnel = "${google_compute_vpn_tunnel.tunnel1.self_link}"
-}
-```
-
-## Argument Reference
-
-The following arguments are supported:
-
-* `name` - (Required) A unique name for the resource, required by GCE. Changing
- this forces a new resource to be created.
-
-* `peer_ip` - (Required) The VPN gateway sitting outside of GCE. Changing this
- forces a new resource to be created.
-
-* `shared_secret` - (Required) A passphrase shared between the two VPN gateways.
- Changing this forces a new resource to be created.
-
-* `target_vpn_gateway` - (Required) A link to the VPN gateway sitting inside
- GCE. Changing this forces a new resource to be created.
-
-- - -
-
-* `description` - (Optional) A description of the resource. Changing this forces
- a new resource to be created.
-
-* `ike_version` - (Optional) Either version 1 or 2. Default is 2. Changing this
- forces a new resource to be created.
-
-* `local_traffic_selector` - (Optional) Specifies which CIDR ranges are
- announced to the VPN peer. Mandatory if the VPN gateway is attached to a
- custom subnetted network. Refer to Google documentation for more
- information.
-
-* `remote_traffic_selector` - (Optional) Specifies which CIDR ranges the VPN
- tunnel can route to the remote side. Mandatory if the VPN gateway is attached to a
- custom subnetted network. Refer to Google documentation for more
- information.
-
-* `router` - (Optional) Name of a Cloud Router in the same region
- to be used for dynamic routing. Refer to Google documentation for more
- information.
-
-* `project` - (Optional) The project in which the resource belongs. If it
- is not provided, the provider project is used.
-
-* `region` - (Optional) The region this tunnel should sit in. If not specified,
- the project region will be used. Changing this forces a new resource to be
- created.
-
-## Attributes Reference
-
-In addition to the arguments listed above, the following computed attributes are
-exported:
-
-* `detailed_status` - Information about the status of the VPN tunnel.
-
-* `self_link` - The URI of the created resource.
diff --git a/website/source/docs/providers/google/r/container_cluster.html.markdown b/website/source/docs/providers/google/r/container_cluster.html.markdown
deleted file mode 100644
index c0167dd2b..000000000
--- a/website/source/docs/providers/google/r/container_cluster.html.markdown
+++ /dev/null
@@ -1,188 +0,0 @@
----
-layout: "google"
-page_title: "Google: google_container_cluster"
-sidebar_current: "docs-google-container-cluster"
-description: |-
- Creates a GKE cluster.
----
-
-# google\_container\_cluster
-
-!> **Warning:** Due to limitations of the API, all arguments except
-`node_version` are non-updateable. Changing any will cause recreation of the
-whole cluster!
-
-~> **Note:** All arguments including the username and password will be stored in the raw state as plain-text.
-[Read more about sensitive data in state](/docs/state/sensitive-data.html).
-
-## Example usage
-
-```hcl
-resource "google_container_cluster" "primary" {
- name = "marcellus-wallace"
- zone = "us-central1-a"
- initial_node_count = 3
-
- additional_zones = [
- "us-central1-b",
- "us-central1-c",
- ]
-
- master_auth {
- username = "mr.yoda"
- password = "adoy.rm"
- }
-
- node_config {
- oauth_scopes = [
- "https://www.googleapis.com/auth/compute",
- "https://www.googleapis.com/auth/devstorage.read_only",
- "https://www.googleapis.com/auth/logging.write",
- "https://www.googleapis.com/auth/monitoring",
- ]
- }
-}
-```
-
-## Argument Reference
-
-* `initial_node_count` - (Required) The number of nodes to create in this
- cluster (not including the Kubernetes master).
-
-* `name` - (Required) The name of the cluster, unique within the project and
- zone.
-
-* `zone` - (Required) The zone that the master and the number of nodes specified
- in `initial_node_count` should be created in.
-
-- - -
-* `master_auth` - (Optional) The authentication information for accessing the
- Kubernetes master.
-
-* `additional_zones` - (Optional) If additional zones are configured, the number
- of nodes specified in `initial_node_count` is created in all specified zones.
-
-* `addons_config` - (Optional) The configuration for addons supported by Google
- Container Engine
-
-* `cluster_ipv4_cidr` - (Optional) The IP address range of the container pods in
- this cluster. Default is an automatically assigned CIDR.
-
-* `description` - (Optional) Description of the cluster.
-
-* `logging_service` - (Optional) The logging service that the cluster should
- write logs to. Available options include `logging.googleapis.com` and
- `none`. Defaults to `logging.googleapis.com`
-
-* `monitoring_service` - (Optional) The monitoring service that the cluster
- should write metrics to. Available options include
- `monitoring.googleapis.com` and `none`. Defaults to
- `monitoring.googleapis.com`
-
-* `network` - (Optional) The name or self_link of the Google Compute Engine
- network to which the cluster is connected
-
-* `node_config` - (Optional) The machine type and image to use for all nodes in
- this cluster
-
-* `node_pool` - (Optional) List of node pools associated with this cluster.
-
-* `node_version` - (Optional) The Kubernetes version on the nodes. Also affects
- the initial master version on cluster creation. Updates affect nodes only.
- Defaults to the default version set by GKE which is not necessarily the latest
- version.
-
-* `project` - (Optional) The project in which the resource belongs. If it
- is not provided, the provider project is used.
-
-* `subnetwork` - (Optional) The name of the Google Compute Engine subnetwork in
-which the cluster's instances are launched
-
-**Master Auth** supports the following arguments:
-
-* `password` - (Required) The password to use for HTTP basic authentication when accessing
- the Kubernetes master endpoint
-
-* `username` - (Required) The username to use for HTTP basic authentication when accessing
- the Kubernetes master endpoint
-
-**Node Config** supports the following arguments:
-
-* `machine_type` - (Optional) The name of a Google Compute Engine machine type.
- Defaults to `n1-standard-1`.
-
-* `disk_size_gb` - (Optional) Size of the disk attached to each node, specified
- in GB. The smallest allowed disk size is 10GB. Defaults to 100GB.
-
-* `local_ssd_count` - (Optional) The amount of local SSD disks that will be
- attached to each cluster node. Defaults to 0.
-
-* `oauth_scopes` - (Optional) The set of Google API scopes to be made available
- on all of the node VMs under the "default" service account. These can be
- either FQDNs, or scope aliases. The following scopes are necessary to ensure
- the correct functioning of the cluster:
-
- * `compute-rw` (`https://www.googleapis.com/auth/compute`)
- * `storage-ro` (`https://www.googleapis.com/auth/devstorage.read_only`)
- * `logging-write` (`https://www.googleapis.com/auth/logging.write`),
- if `logging_service` points to Google
- * `monitoring` (`https://www.googleapis.com/auth/monitoring`),
- if `monitoring_service` points to Google
-
-* `service_account` - (Optional) The service account to be used by the Node VMs.
- If not specified, the "default" service account is used.
-
-* `metadata` - (Optional) The metadata key/value pairs assigned to instances in
- the cluster.
-
-* `image_type` - (Optional) The image type to use for this node.
-
-**Addons Config** supports the following addons:
-
-* `http_load_balancing` - (Optional) The status of the HTTP Load Balancing
- add-on. It is enabled by default; set `disabled = true` to disable.
-* `horizontal_pod_autoscaling` - (Optional) The status of the Horizontal Pod
- Autoscaling addon. It is enabled by default; set `disabled = true` to
- disable.
-
-This example `addons_config` disables both addons:
-
-```
-addons_config {
- http_load_balancing {
- disabled = true
- }
- horizontal_pod_autoscaling {
- disabled = true
- }
-}
-```
-
-**Node Pool** supports the following arguments:
-
-* `initial_node_count` - (Required) The initial node count for the pool.
-
-* `name` - (Optional) The name of the node pool. If left blank, Terraform will
- auto-generate a unique name.
-
-* `name_prefix` - (Optional) Creates a unique name for the node pool beginning
- with the specified prefix. Conflicts with `name`.
-
-## Attributes Reference
-
-In addition to the arguments listed above, the following computed attributes are
-exported:
-
-* `endpoint` - The IP address of this cluster's Kubernetes master
-
-* `instance_group_urls` - List of instance group URLs which have been assigned
- to the cluster
-
-* `master_auth.client_certificate` - Base64 encoded public certificate
- used by clients to authenticate to the cluster endpoint.
-
-* `master_auth.client_key` - Base64 encoded private key used by clients
- to authenticate to the cluster endpoint
-
-* `master_auth.cluster_ca_certificate` - Base64 encoded public certificate
- that is the root of trust for the cluster
diff --git a/website/source/docs/providers/google/r/container_node_pool.html.markdown b/website/source/docs/providers/google/r/container_node_pool.html.markdown
deleted file mode 100644
index 386d912ee..000000000
--- a/website/source/docs/providers/google/r/container_node_pool.html.markdown
+++ /dev/null
@@ -1,69 +0,0 @@
----
-layout: "google"
-page_title: "Google: google_container_node_pool"
-sidebar_current: "docs-google-container-node-pool"
-description: |-
- Manages a GKE NodePool resource.
----
-
-# google\_container\_node\_pool
-
-Manages a Node Pool resource within GKE. For more information see
-[the official documentation](https://cloud.google.com/container-engine/docs/node-pools)
-and
-[API](https://cloud.google.com/container-engine/reference/rest/v1/projects.zones.clusters.nodePools).
-
-## Example usage
-
-```hcl
-resource "google_container_node_pool" "np" {
- name = "my-node-pool"
- zone = "us-central1-a"
- cluster = "${google_container_cluster.primary.name}"
- initial_node_count = 3
-}
-
-resource "google_container_cluster" "primary" {
- name = "marcellus-wallace"
- zone = "us-central1-a"
- initial_node_count = 3
-
- additional_zones = [
- "us-central1-b",
- "us-central1-c",
- ]
-
- master_auth {
- username = "mr.yoda"
- password = "adoy.rm"
- }
-
- node_config {
- oauth_scopes = [
- "https://www.googleapis.com/auth/compute",
- "https://www.googleapis.com/auth/devstorage.read_only",
- "https://www.googleapis.com/auth/logging.write",
- "https://www.googleapis.com/auth/monitoring",
- ]
- }
-}
-```
-
-## Argument Reference
-
-* `zone` - (Required) The zone in which the cluster resides.
-
-* `cluster` - (Required) The cluster to create the node pool for.
-
-* `initial_node_count` - (Required) The initial node count for the pool.
-
-- - -
-
-* `project` - (Optional) The project in which to create the node pool. If blank,
- the provider-configured project will be used.
-
-* `name` - (Optional) The name of the node pool. If left blank, Terraform will
- auto-generate a unique name.
-
-* `name_prefix` - (Optional) Creates a unique name for the node pool beginning
- with the specified prefix. Conflicts with `name`.
diff --git a/website/source/docs/providers/google/r/dns_managed_zone.markdown b/website/source/docs/providers/google/r/dns_managed_zone.markdown
deleted file mode 100644
index 5f5bc95ad..000000000
--- a/website/source/docs/providers/google/r/dns_managed_zone.markdown
+++ /dev/null
@@ -1,46 +0,0 @@
----
-layout: "google"
-page_title: "Google: google_dns_managed_zone"
-sidebar_current: "docs-google-dns-managed-zone"
-description: |-
- Manages a zone within Google Cloud DNS.
----
-
-# google\_dns\_managed_zone
-
-Manages a zone within Google Cloud DNS.
-
-## Example Usage
-
-```hcl
-resource "google_dns_managed_zone" "prod" {
- name = "prod-zone"
- dns_name = "prod.mydomain.com."
- description = "Production DNS zone"
-}
-```
-
-## Argument Reference
-
-The following arguments are supported:
-
-* `dns_name` - (Required) The DNS name of this zone, e.g. "terraform.io".
-
-* `name` - (Required) A unique name for the resource, required by GCE.
- Changing this forces a new resource to be created.
-
-- - -
-
-* `description` - (Optional) A textual description field. Defaults to 'Managed by Terraform'.
-
-* `project` - (Optional) The project in which the resource belongs. If it
- is not provided, the provider project is used.
-
-## Attributes Reference
-
-In addition to the arguments listed above, the following computed attributes are
-exported:
-
-* `name_servers` - The list of nameservers that will be authoritative for this
- domain. Use NS records to redirect from your DNS provider to these names,
- thus making Google Cloud DNS authoritative for this zone.
diff --git a/website/source/docs/providers/google/r/dns_record_set.markdown b/website/source/docs/providers/google/r/dns_record_set.markdown
deleted file mode 100644
index dcf33371c..000000000
--- a/website/source/docs/providers/google/r/dns_record_set.markdown
+++ /dev/null
@@ -1,72 +0,0 @@
----
-layout: "google"
-page_title: "Google: google_dns_record_set"
-sidebar_current: "docs-google-dns-record-set"
-description: |-
- Manages a set of DNS records within Google Cloud DNS.
----
-
-# google\_dns\_record\_set
-
-Manages a set of DNS records within Google Cloud DNS.
-
-## Example Usage
-
-This example is the common case of binding a DNS name to the ephemeral IP of a new instance:
-
-```hcl
-resource "google_compute_instance" "frontend" {
- name = "frontend"
- machine_type = "g1-small"
- zone = "us-central1-b"
-
- disk {
- image = "debian-cloud/debian-8"
- }
-
- network_interface {
- network = "default"
- access_config = {}
- }
-}
-
-resource "google_dns_managed_zone" "prod" {
- name = "prod-zone"
- dns_name = "prod.mydomain.com."
-}
-
-resource "google_dns_record_set" "frontend" {
- name = "frontend.${google_dns_managed_zone.prod.dns_name}"
- type = "A"
- ttl = 300
-
- managed_zone = "${google_dns_managed_zone.prod.name}"
-
- rrdatas = ["${google_compute_instance.frontend.network_interface.0.access_config.0.assigned_nat_ip}"]
-}
-```
-
-## Argument Reference
-
-The following arguments are supported:
-
-* `managed_zone` - (Required) The name of the zone in which this record set will
- reside.
-
-* `name` - (Required) The DNS name this record set will apply to.
-
-* `rrdatas` - (Required) The string data for the records in this record set
- whose meaning depends on the DNS type.
-
-* `ttl` - (Required) The time-to-live of this record set (seconds).
-
-* `type` - (Required) The DNS record set type.
-
-- - -
-
-* `project` - (Optional) The project in which the resource belongs. If it
- is not provided, the provider project is used.
-
-## Attributes Reference
-
-Only the arguments listed above are exposed as attributes.
diff --git a/website/source/docs/providers/google/r/google_project.html.markdown b/website/source/docs/providers/google/r/google_project.html.markdown
deleted file mode 100755
index 8994c8eec..000000000
--- a/website/source/docs/providers/google/r/google_project.html.markdown
+++ /dev/null
@@ -1,103 +0,0 @@
----
-layout: "google"
-page_title: "Google: google_project"
-sidebar_current: "docs-google-project"
-description: |-
- Allows management of a Google Cloud Platform project.
----
-
-# google\_project
-
-Allows creation and management of a Google Cloud Platform project.
-
-Projects created with this resource must be associated with an Organization.
-See the [Organization documentation](https://cloud.google.com/resource-manager/docs/quickstarts) for more details.
-
-The service account used to run Terraform when creating a `google_project`
-resource must have `roles/resourcemanager.projectCreator`. See the
-[Access Control for Organizations Using IAM](https://cloud.google.com/resource-manager/docs/access-control-org)
-doc for more information.
-
-Note that prior to 0.8.5, `google_project` functioned like a data source,
-meaning any project referenced by it had to be created and managed outside
-Terraform. As of 0.8.5, `google_project` functions like any other Terraform
-resource, with Terraform creating and managing the project. To replicate the old
-behavior, either:
-
-* Use the project ID directly in whatever is referencing the project, using the
- [google_project_iam_policy](/docs/providers/google/r/google_project_iam_policy.html)
- to replace the old `policy_data` property.
-* Use the [import](/docs/import/usage.html) functionality
- to import your pre-existing project into Terraform, where it can be referenced and
- used just like always, keeping in mind that Terraform will attempt to undo any changes
- made outside Terraform.
-
-~> It's important to note that any project resources that were added to your Terraform config
-prior to 0.8.5 will continue to function as they always have, and will not be managed by
-Terraform. Only newly added projects are affected.
-
-## Example Usage
-
-```hcl
-resource "google_project" "my_project" {
- project_id = "your-project-id"
- org_id = "1234567"
-}
-```
-
-## Argument Reference
-
-The following arguments are supported:
-
-* `project_id` - (Optional) The project ID.
- Changing this forces a new project to be created. If this attribute is not
- set, `id` must be set. As `id` is deprecated, consider this attribute
- required. If you are using `project_id` and creating a new project, the
- `org_id` and `name` attributes are also required.
-
-* `id` - (Deprecated) The project ID.
- This attribute has unexpected behaviour and probably does not work
- as users would expect; it has been deprecated, and will be removed in future
- versions of Terraform. The `project_id` attribute should be used instead. See
- [below](#id-field) for more information about its behaviour.
-
-* `org_id` - (Optional) The numeric ID of the organization this project belongs to.
- This is required if you are creating a new project.
- Changing this forces a new project to be created.
-
-* `billing_account` - (Optional) The alphanumeric ID of the billing account this project
- belongs to. The user or service account performing this operation with Terraform
- must have Billing Account Administrator privileges (`roles/billing.admin`) in
- the organization. See [Google Cloud Billing API Access Control](https://cloud.google.com/billing/v1/how-tos/access-control)
- for more details.
-
-* `name` - (Optional) The display name of the project.
- This is required if you are creating a new project.
-
-* `skip_delete` - (Optional) If true, the Terraform resource can be deleted
- without deleting the Project via the Google API.
-
-* `policy_data` - (Deprecated) The IAM policy associated with the project.
- This argument is no longer supported, and will be removed in a future version
- of Terraform. It should be replaced with a `google_project_iam_policy` resource.
-
-## Attributes Reference
-
-In addition to the arguments listed above, the following computed attributes are
-exported:
-
-* `number` - The numeric identifier of the project.
-* `policy_etag` - (Deprecated) The etag of the project's IAM policy, used to
- determine if the IAM policy has changed. Please use `google_project_iam_policy`'s
- `etag` property instead; future versions of Terraform will remove the `policy_etag`
- attribute
-
-## ID Field
-
-In versions of Terraform prior to 0.8.5, `google_project` resources used an `id` field in
-config files to specify the project ID. Unfortunately, due to limitations in Terraform,
-this field always looked empty to Terraform. Terraform fell back on using the project
-the Google Cloud provider is configured with. If you're using the `id` field in your
-configurations, know that it is being ignored, and its value will always be seen as the
-ID of the project being used to authenticate Terraform's requests. You should move to the
-`project_id` field as soon as possible.
diff --git a/website/source/docs/providers/google/r/google_project_iam_policy.html.markdown b/website/source/docs/providers/google/r/google_project_iam_policy.html.markdown
deleted file mode 100644
index b7c3e654d..000000000
--- a/website/source/docs/providers/google/r/google_project_iam_policy.html.markdown
+++ /dev/null
@@ -1,73 +0,0 @@
----
-layout: "google"
-page_title: "Google: google_project_iam_policy"
-sidebar_current: "docs-google-project-iam-policy"
-description: |-
- Allows management of an IAM policy for a Google Cloud Platform project.
----
-
-# google\_project\_iam\_policy
-
-Allows creation and management of an IAM policy for an existing Google Cloud
-Platform project.
-
-~> **Be careful!** You can accidentally lock yourself out of your project
- using this resource. Proceed with caution.
-
-## Example Usage
-
-```hcl
-resource "google_project_iam_policy" "project" {
- project = "your-project-id"
- policy_data = "${data.google_iam_policy.admin.policy_data}"
-}
-
-data "google_iam_policy" "admin" {
- binding {
- role = "roles/editor"
-
- members = [
- "user:jane@example.com",
- ]
- }
-}
-```
-
-## Argument Reference
-
-The following arguments are supported:
-
-* `project` - (Required) The project ID.
- Changing this forces a new project to be created.
-
-* `policy_data` - (Required) The `google_iam_policy` data source that represents
- the IAM policy that will be applied to the project. The policy will be
- merged with any existing policy applied to the project.
-
- Changing this updates the policy.
-
- Deleting this removes the policy, but leaves the original project policy
- intact. If there are overlapping `binding` entries between the original
- project policy and the data source policy, they will be removed.
-
-* `authoritative` - (Optional) A boolean value indicating if this policy
- should overwrite any existing IAM policy on the project. When set to true,
- **any policies not in your config file will be removed**. This can **lock
- you out** of your project until an Organization Administrator grants you
- access again, so please exercise caution. If this argument is `true` and you
- want to delete the resource, you must set the `disable_project` argument to
- `true`, acknowledging that the project will be inaccessible to anyone but the
- Organization Admins, as it will no longer have an IAM policy.
-
-* `disable_project` - (Optional) A boolean value that must be set to `true`
- if you want to delete a `google_project_iam_policy` that is authoritative.
-
-## Attributes Reference
-
-In addition to the arguments listed above, the following computed attributes are
-exported:
-
-* `etag` - (Computed) The etag of the project's IAM policy.
-
-* `restore_policy` - (Computed) The IAM policy that will be restored when a
- non-authoritative policy resource is deleted.
diff --git a/website/source/docs/providers/google/r/google_project_services.html.markdown b/website/source/docs/providers/google/r/google_project_services.html.markdown
deleted file mode 100644
index d6698f2a1..000000000
--- a/website/source/docs/providers/google/r/google_project_services.html.markdown
+++ /dev/null
@@ -1,35 +0,0 @@
----
-layout: "google"
-page_title: "Google: google_project_services"
-sidebar_current: "docs-google-project-services"
-description: |-
- Allows management of API services for a Google Cloud Platform project.
----
-
-# google\_project\_services
-
-Allows management of enabled API services for an existing Google Cloud
-Platform project. Services in an existing project that are not defined
-in the config will be removed.
-
-For a list of services available, visit the
-[API library page](https://console.cloud.google.com/apis/library) or run `gcloud service-management list`.
-
-## Example Usage
-
-```hcl
-resource "google_project_services" "project" {
- project = "your-project-id"
- services = ["iam.googleapis.com", "cloudresourcemanager.googleapis.com"]
-}
-```
-
-## Argument Reference
-
-The following arguments are supported:
-
-* `project` - (Required) The project ID.
- Changing this forces a new project to be created.
-
-* `services` - (Required) The list of services that are enabled. Supports
- update.
diff --git a/website/source/docs/providers/google/r/google_service_account.html.markdown b/website/source/docs/providers/google/r/google_service_account.html.markdown
deleted file mode 100644
index c3cd1c899..000000000
--- a/website/source/docs/providers/google/r/google_service_account.html.markdown
+++ /dev/null
@@ -1,73 +0,0 @@
----
-layout: "google"
-page_title: "Google: google_service_account"
-sidebar_current: "docs-google-service-account"
-description: |-
- Allows management of a Google Cloud Platform service account.
----
-
-# google\_service\_account
-
-Allows management of a [Google Cloud Platform service account](https://cloud.google.com/compute/docs/access/service-accounts)
-
-## Example Usage
-
-This snippet creates a service account, then gives it objectViewer
-permission in a project.
-
-```hcl
-resource "google_service_account" "object_viewer" {
- account_id = "object-viewer"
- display_name = "Object viewer"
-}
-
-resource "google_project" "my_project" {
- id = "your-project-id"
- policy_data = "${data.google_iam_policy.admin.policy_data}"
-}
-
-data "google_iam_policy" "admin" {
- binding {
- role = "roles/storage.objectViewer"
-
- members = [
- "serviceAccount:${google_service_account.object_viewer.email}",
- ]
- }
-}
-```
-
-## Argument Reference
-
-The following arguments are supported:
-
-* `account_id` - (Required) The service account ID.
- Changing this forces a new service account to be created.
-
-* `display_name` - (Optional) The display name for the service account.
- Can be updated without creating a new resource.
-
-* `project` - (Optional) The project that the service account will be created in.
- Defaults to the provider project configuration.
-
-* `policy_data` - (Optional) The `google_iam_policy` data source that represents
- the IAM policy that will be applied to the service account. The policy will be
- merged with any existing policy.
-
- Changing this updates the policy.
-
- Deleting this removes the policy declared in Terraform. Any policy bindings
- associated with the project before Terraform was used are not deleted.
-
-## Attributes Reference
-
-In addition to the arguments listed above, the following computed attributes are
-exported:
-
-* `email` - The e-mail address of the service account. This value
- should be referenced from any `google_iam_policy` data sources
- that would grant the service account privileges.
-
-* `name` - The fully-qualified name of the service account.
-
-* `unique_id` - The unique id of the service account.
diff --git a/website/source/docs/providers/google/r/pubsub_subscription.html.markdown b/website/source/docs/providers/google/r/pubsub_subscription.html.markdown
deleted file mode 100644
index e5cf641d2..000000000
--- a/website/source/docs/providers/google/r/pubsub_subscription.html.markdown
+++ /dev/null
@@ -1,71 +0,0 @@
----
-layout: "google"
-page_title: "Google: google_pubsub_subscription"
-sidebar_current: "docs-google-pubsub-subscription"
-description: |-
- Creates a subscription in Google's pubsub queueing system
----
-
-# google\_pubsub\_subscription
-
-Creates a subscription in Google's pubsub queueing system. For more information see
-[the official documentation](https://cloud.google.com/pubsub/docs) and
-[API](https://cloud.google.com/pubsub/reference/rest/v1/projects.subscriptions).
-
-
-## Example Usage
-
-```hcl
-resource "google_pubsub_subscription" "default" {
- name = "default-subscription"
- topic = "default-topic"
-
- ack_deadline_seconds = 20
-
- push_config {
- endpoint = "https://example.com/push"
-
- attributes {
- x-goog-version = "v1"
- }
- }
-}
-```
-
-## Argument Reference
-
-The following arguments are supported:
-
-* `name` - (Required) A unique name for the resource, required by pubsub.
- Changing this forces a new resource to be created.
-
-* `topic` - (Required) A topic to bind this subscription to, required by pubsub.
- Changing this forces a new resource to be created.
-
-- - -
-
-* `ack_deadline_seconds` - (Optional) The maximum number of seconds a
- subscriber has to acknowledge a received message, otherwise the message is
- redelivered. Changing this forces a new resource to be created.
-
-* `project` - (Optional) The project in which the resource belongs. If it
- is not provided, the provider project is used.
-
-* `push_config` - (Optional) Block configuration for push options. More
- configuration options are detailed below.
-
-The optional `push_config` block supports:
-
-* `push_endpoint` - (Optional) The URL of the endpoint to which messages should
- be pushed. Changing this forces a new resource to be created.
-
-* `attributes` - (Optional) Key-value pairs of API supported attributes used
- to control aspects of the message delivery. Currently, only
- `x-goog-version` is supported, which controls the format of the data
- delivery. For more information, read [the API docs
- here](https://cloud.google.com/pubsub/reference/rest/v1/projects.subscriptions#PushConfig.FIELDS.attributes).
- Changing this forces a new resource to be created.
-
-## Attributes Reference
-
-* `path` - Path of the subscription in the format `projects/{project}/subscriptions/{sub}`
diff --git a/website/source/docs/providers/google/r/pubsub_topic.html.markdown b/website/source/docs/providers/google/r/pubsub_topic.html.markdown
deleted file mode 100644
index ceb3aebc2..000000000
--- a/website/source/docs/providers/google/r/pubsub_topic.html.markdown
+++ /dev/null
@@ -1,38 +0,0 @@
----
-layout: "google"
-page_title: "Google: google_pubsub_topic"
-sidebar_current: "docs-google-pubsub-topic"
-description: |-
- Creates a topic in Google's pubsub queueing system
----
-
-# google\_pubsub\_topic
-
-Creates a topic in Google's pubsub queueing system. For more information see
-[the official documentation](https://cloud.google.com/pubsub/docs) and
-[API](https://cloud.google.com/pubsub/docs/reference/rest/v1/projects.topics).
-
-
-## Example Usage
-
-```hcl
-resource "google_pubsub_topic" "default" {
- name = "default-topic"
-}
-```
-
-## Argument Reference
-
-The following arguments are supported:
-
-* `name` - (Required) A unique name for the resource, required by pubsub.
- Changing this forces a new resource to be created.
-
-- - -
-
-* `project` - (Optional) The project in which the resource belongs. If it
- is not provided, the provider project is used.
-
-## Attributes Reference
-
-Only the arguments listed above are exposed as attributes.
diff --git a/website/source/docs/providers/google/r/sql_database.html.markdown b/website/source/docs/providers/google/r/sql_database.html.markdown
deleted file mode 100644
index d4c6b9802..000000000
--- a/website/source/docs/providers/google/r/sql_database.html.markdown
+++ /dev/null
@@ -1,50 +0,0 @@
----
-layout: "google"
-page_title: "Google: google_sql_database"
-sidebar_current: "docs-google-sql-database"
-description: |-
- Creates a new SQL database in Google Cloud SQL.
----
-
-# google\_sql\_database
-
-Creates a new Google SQL Database on a Google SQL Database Instance. For more information, see the [official documentation](https://cloud.google.com/sql/), or the [JSON API](https://cloud.google.com/sql/docs/admin-api/v1beta4/databases).
-
-## Example Usage
-
-Example creating a SQL Database.
-
-```hcl
-resource "google_sql_database_instance" "master" {
- name = "master-instance"
-
- settings {
- tier = "D0"
- }
-}
-
-resource "google_sql_database" "users" {
- name = "image-store-bucket"
- instance = "${google_sql_database_instance.master.name}"
-}
-```
-
-## Argument Reference
-
-The following arguments are supported:
-
-* `name` - (Required) The name of the database.
-
-* `instance` - (Required) The name of containing instance.
-
-- - -
-
-* `project` - (Optional) The project in which the resource belongs. If it
- is not provided, the provider project is used.
-
-## Attributes Reference
-
-In addition to the arguments listed above, the following computed attributes are
-exported:
-
-* `self_link` - The URI of the created resource.
diff --git a/website/source/docs/providers/google/r/sql_database_instance.html.markdown b/website/source/docs/providers/google/r/sql_database_instance.html.markdown
deleted file mode 100644
index f9fdc6c4d..000000000
--- a/website/source/docs/providers/google/r/sql_database_instance.html.markdown
+++ /dev/null
@@ -1,196 +0,0 @@
----
-layout: "google"
-page_title: "Google: google_sql_database_instance"
-sidebar_current: "docs-google-sql-database-instance"
-description: |-
- Creates a new SQL database instance in Google Cloud SQL.
----
-
-# google\_sql\_database\_instance
-
-Creates a new Google SQL Database Instance. For more information, see the [official documentation](https://cloud.google.com/sql/), or the [JSON API](https://cloud.google.com/sql/docs/admin-api/v1beta4/instances).
-
-~> **NOTE on `google_sql_database_instance`:** - Second-generation instances include a
-default 'root'@'%' user with no password. This user will be deleted by Terraform on
-instance creation. You should use a `google_sql_user` to define a customer user with
-a restricted host and strong password.
-
-
-## Example Usage
-
-Example creating a SQL Database.
-
-```hcl
-resource "google_sql_database_instance" "master" {
- name = "master-instance"
-
- settings {
- tier = "D0"
- }
-}
-```
-
-## Argument Reference
-
-The following arguments are supported:
-
-* `region` - (Required) The region the instance will sit in. Note, this does
- not line up with the Google Compute Engine (GCE) regions - your options are
- `us-central`, `asia-west1`, `europe-west1`, and `us-east1`.
-
-* `settings` - (Required) The settings to use for the database. The
- configuration is detailed below.
-
-- - -
-
-* `database_version` - (Optional, Default: `MYSQL_5_6`) The MySQL version to
- use. Can be either `MYSQL_5_6` or `MYSQL_5_7` for second-generation
- instances, or `MYSQL_5_5` or `MYSQL_5_6` for first-generation instances.
- See Google's [Second Generation Capabilities](https://cloud.google.com/sql/docs/1st-2nd-gen-differences)
- for more information.
-
-* `name` - (Optional, Computed) The name of the instance. If the name is left
- blank, Terraform will randomly generate one when the instance is first
- created. This is done because after a name is used, it cannot be reused for
- up to [one week](https://cloud.google.com/sql/docs/delete-instance).
-
-* `master_instance_name` - (Optional) The name of the instance that will act as
- the master in the replication setup. Note, this requires the master to have
- `binary_log_enabled` set, as well as existing backups.
-
-* `project` - (Optional) The project in which the resource belongs. If it
- is not provided, the provider project is used.
-
-* `replica_configuration` - (Optional) The configuration for replication. The
- configuration is detailed below.
-
-The required `settings` block supports:
-
-* `tier` - (Required) The machine tier (First Generation) or type (Second Generation) to use. See
- [tiers](https://cloud.google.com/sql/docs/admin-api/v1beta4/tiers) for more details and
- supported versions.
-
-* `activation_policy` - (Optional) This specifies when the instance should be
- active. Can be either `ALWAYS`, `NEVER` or `ON_DEMAND`.
-
-* `authorized_gae_applications` - (Optional) A list of Google App Engine (GAE)
- project names that are allowed to access this instance.
-
-* `crash_safe_replication` - (Optional) Specific to read instances, indicates
- when crash-safe replication flags are enabled.
-
-* `disk_autoresize` - (Optional, Second Generation, Default: `true`) Configuration to increase storage size automatically.
-
-* `disk_size` - (Optional, Second Generation, Default: `10`) The size of data disk, in GB. Size of a running instance cannot be reduced but can be increased.
-
-* `disk_type` - (Optional, Second Generation, Default: `PD_SSD`) The type of data disk: PD_SSD or PD_HDD.
-
-* `pricing_plan` - (Optional, First Generation) Pricing plan for this instance, can be one of
- `PER_USE` or `PACKAGE`.
-
-* `replication_type` - (Optional) Replication type for this instance, can be one
- of `ASYNCHRONOUS` or `SYNCHRONOUS`.
-
-The optional `settings.database_flags` sublist supports:
-
-* `name` - (Optional) Name of the flag.
-
-* `value` - (Optional) Value of the flag.
-
-The optional `settings.backup_configuration` subblock supports:
-
-* `binary_log_enabled` - (Optional) True iff binary logging is enabled. If
- `logging` is false, this must be as well.
-
-* `enabled` - (Optional) True iff backup configuration is enabled.
-
-* `start_time` - (Optional) `HH:MM` format time indicating when backup
- configuration starts.
-
-The optional `settings.ip_configuration` subblock supports:
-
-* `ipv4_enabled` - (Optional) True iff the instance should be assigned an IP
- address.
-
-* `require_ssl` - (Optional) True iff mysqld should default to `REQUIRE X509`
- for users connecting over IP.
-
-The optional `settings.ip_configuration.authorized_networks[]` sublist supports:
-
-* `expiration_time` - (Optional) The [RFC 3339](https://tools.ietf.org/html/rfc3339)
- formatted date time string indicating when this whitelist expires.
-
-* `name` - (Optional) A name for this whitelist entry.
-
-* `value` - (Optional) A CIDR notation IPv4 or IPv6 address that is allowed to
- access this instance. Must be set even if other two attributes are not for
- the whitelist to become active.
-
-The optional `settings.location_preference` subblock supports:
-
-* `follow_gae_application` - (Optional) A GAE application whose zone to remain
- in. Must be in the same region as this instance.
-
-* `zone` - (Optional) The preferred compute engine
- [zone](https://cloud.google.com/compute/docs/zones?hl=en).
-
-The optional `settings.maintenance_window` subblock for Second Generation
-instances declares a one-hour [maintenance window](https://cloud.google.com/sql/docs/instance-settings?hl=en#maintenance-window-2ndgen)
-when an Instance can automatically restart to apply updates. It supports:
-
-* `day` - (Optional) Day of week (`1-7`), starting on Monday
-
-* `hour` - (Optional) Hour of day (`0-23`), ignored if `day` not set
-
-* `update_track` - (Optional) Receive updates earlier (`canary`) or later
-(`stable`)
-
-The optional `replica_configuration` block must have `master_instance_name` set
-to work, cannot be updated, and supports:
-
-* `ca_certificate` - (Optional) PEM representation of the trusted CA's x509
- certificate.
-
-* `client_certificate` - (Optional) PEM representation of the slave's x509
- certificate.
-
-* `client_key` - (Optional) PEM representation of the slave's private key. The
- corresponding public key in encoded in the `client_certificate`.
-
-* `connect_retry_interval` - (Optional, Default: 60) The number of seconds
- between connect retries.
-
-* `dump_file_path` - (Optional) Path to a SQL file in GCS from which slave
- instances are created. Format is `gs://bucket/filename`.
-
-* `failover_target` - (Optional) Specifies if the replica is the failover target.
- If the field is set to true the replica will be designated as a failover replica.
- If the master instance fails, the replica instance will be promoted as
- the new master instance.
-
-* `master_heartbeat_period` - (Optional) Time in ms between replication
- heartbeats.
-
-* `password` - (Optional) Password for the replication connection.
-
-* `sslCipher` - (Optional) Permissible ciphers for use in SSL encryption.
-
-* `username` - (Optional) Username for replication connection.
-
-* `verify_server_certificate` - (Optional) True iff the master's common name
- value is checked during the SSL handshake.
-
-## Attributes Reference
-
-In addition to the arguments listed above, the following computed attributes are
-exported:
-
-* `ip_address.0.ip_address` - The IPv4 address assigned.
-
-* `ip_address.0.time_to_retire` - The time this IP address will be retired, in RFC
- 3339 format.
-
-* `self_link` - The URI of the created resource.
-
-* `settings.version` - Used to make sure changes to the `settings` block are
- atomic.
diff --git a/website/source/docs/providers/google/r/sql_user.html.markdown b/website/source/docs/providers/google/r/sql_user.html.markdown
deleted file mode 100644
index 0b35fa1b8..000000000
--- a/website/source/docs/providers/google/r/sql_user.html.markdown
+++ /dev/null
@@ -1,74 +0,0 @@
----
-layout: "google"
-page_title: "Google: google_sql_user"
-sidebar_current: "docs-google-sql-user"
-description: |-
- Creates a new SQL user in Google Cloud SQL.
----
-
-# google\_sql\_user
-
-Creates a new Google SQL User on a Google SQL User Instance. For more information, see the [official documentation](https://cloud.google.com/sql/), or the [JSON API](https://cloud.google.com/sql/docs/admin-api/v1beta4/users).
-
-~> **Note:** All arguments including the username and password will be stored in the raw state as plain-text.
-[Read more about sensitive data in state](/docs/state/sensitive-data.html). Passwords will not be retrieved when running
-"terraform import".
-
-## Example Usage
-
-Example creating a SQL User.
-
-```hcl
-resource "google_sql_database_instance" "master" {
- name = "master-instance"
-
- settings {
- tier = "D0"
- }
-}
-
-resource "google_sql_user" "users" {
- name = "me"
- instance = "${google_sql_database_instance.master.name}"
- host = "me.com"
- password = "changeme"
-}
-```
-
-## Argument Reference
-
-The following arguments are supported:
-
-* `host` - (Required) The host the user can connect from. Can be an IP address.
- Changing this forces a new resource to be created.
-
-* `instance` - (Required) The name of the Cloud SQL instance. Changing this
- forces a new resource to be created.
-
-* `name` - (Required) The name of the user. Changing this forces a new resource
- to be created.
-
-* `password` - (Required) The users password. Can be updated.
-
-- - -
-
-* `project` - (Optional) The project in which the resource belongs. If it
- is not provided, the provider project is used.
-
-## Attributes Reference
-
-Only the arguments listed above are exposed as attributes.
-
-## Import Format
-
-Importing an SQL user is formatted as:
-
-```bash
-terraform import google_sql_user.$RESOURCENAME $INSTANCENAME/$SQLUSERNAME
-```
-
-For example, the sample at the top of this page could be imported with:
-
-```bash
-terraform import google_sql_user.users master-instance/me
-```
\ No newline at end of file
diff --git a/website/source/docs/providers/google/r/storage_bucket.html.markdown b/website/source/docs/providers/google/r/storage_bucket.html.markdown
deleted file mode 100644
index 8a2ff2577..000000000
--- a/website/source/docs/providers/google/r/storage_bucket.html.markdown
+++ /dev/null
@@ -1,88 +0,0 @@
----
-layout: "google"
-page_title: "Google: google_storage_bucket"
-sidebar_current: "docs-google-storage-bucket"
-description: |-
- Creates a new bucket in Google Cloud Storage.
----
-
-# google\_storage\_bucket
-
-Creates a new bucket in Google cloud storage service (GCS).
-Once a bucket has been created, its location can't be changed.
-[ACLs](https://cloud.google.com/storage/docs/access-control/lists) can be applied using the `google_storage_bucket_acl` resource.
-For more information see
-[the official documentation](https://cloud.google.com/storage/docs/overview)
-and
-[API](https://cloud.google.com/storage/docs/json_api/v1/buckets).
-
-
-## Example Usage
-
-Example creating a private bucket in standard storage, in the EU region.
-
-```hcl
-resource "google_storage_bucket" "image-store" {
- name = "image-store-bucket"
- location = "EU"
-
- website {
- main_page_suffix = "index.html"
- not_found_page = "404.html"
- }
-}
-```
-
-## Argument Reference
-
-The following arguments are supported:
-
-* `name` - (Required) The name of the bucket.
-
-- - -
-
-* `force_destroy` - (Optional, Default: false) When deleting a bucket, this
- boolean option will delete all contained objects. If you try to delete a
- bucket that contains objects, Terraform will fail that run.
-
-* `location` - (Optional, Default: 'US') The [GCS location](https://cloud.google.com/storage/docs/bucket-locations)
-
-
-* `predefined_acl` - (Optional, Deprecated) The [canned GCS ACL](https://cloud.google.com/storage/docs/access-control#predefined-acl) to apply. Please switch
-to `google_storage_bucket_acl.predefined_acl`.
-
-* `project` - (Optional) The project in which the resource belongs. If it
- is not provided, the provider project is used.
-
-* `storage_class` - (Optional) The [Storage Class](https://cloud.google.com/storage/docs/storage-classes) of the new bucket. Supported values include: `MULTI_REGIONAL`, `REGIONAL`, `NEARLINE`, `COLDLINE`.
-
-* `website` - (Optional) Configuration if the bucket acts as a website. Structure is documented below.
-
-* `cors` - (Optional) The bucket's [Cross-Origin Resource Sharing (CORS)](https://www.w3.org/TR/cors/) configuration. Multiple blocks of this type are permitted. Structure is documented below.
-
-The `website` block supports:
-
-* `main_page_suffix` - (Optional) Behaves as the bucket's directory index where
- missing objects are treated as potential directories.
-
-* `not_found_page` - (Optional) The custom object to return when a requested
- resource is not found.
-
-The `cors` block supports:
-
-* `origin` - (Optional) The list of [Origins](https://tools.ietf.org/html/rfc6454) eligible to receive CORS response headers. Note: "*" is permitted in the list of origins, and means "any Origin".
-
-* `method` - (Optional) The list of HTTP methods on which to include CORS response headers, (GET, OPTIONS, POST, etc) Note: "*" is permitted in the list of methods, and means "any method".
-
-* `response_header` - (Optional) The list of HTTP headers other than the [simple response headers](https://www.w3.org/TR/cors/#simple-response-header) to give permission for the user-agent to share across domains.
-
-* `max_age_seconds` - (Optional) The value, in seconds, to return in the [Access-Control-Max-Age header](https://www.w3.org/TR/cors/#access-control-max-age-response-header) used in preflight responses.
-
-## Attributes Reference
-
-In addition to the arguments listed above, the following computed attributes are
-exported:
-
-* `self_link` - The URI of the created resource.
-
-* `url` - The base URL of the bucket, in the format `gs://`.
diff --git a/website/source/docs/providers/google/r/storage_bucket_acl.html.markdown b/website/source/docs/providers/google/r/storage_bucket_acl.html.markdown
deleted file mode 100644
index 4d8563108..000000000
--- a/website/source/docs/providers/google/r/storage_bucket_acl.html.markdown
+++ /dev/null
@@ -1,48 +0,0 @@
----
-layout: "google"
-page_title: "Google: google_storage_bucket_acl"
-sidebar_current: "docs-google-storage-bucket-acl"
-description: |-
- Creates a new bucket ACL in Google Cloud Storage.
----
-
-# google\_storage\_bucket\_acl
-
-Creates a new bucket ACL in Google cloud storage service (GCS). For more information see
-[the official documentation](https://cloud.google.com/storage/docs/access-control/lists)
-and
-[API](https://cloud.google.com/storage/docs/json_api/v1/bucketAccessControls).
-
-## Example Usage
-
-Example creating an ACL on a bucket with one owner, and one reader.
-
-```hcl
-resource "google_storage_bucket" "image-store" {
- name = "image-store-bucket"
- location = "EU"
-}
-
-resource "google_storage_bucket_acl" "image-store-acl" {
- bucket = "${google_storage_bucket.image-store.name}"
-
- role_entity = [
- "OWNER:user-my.email@gmail.com",
- "READER:group-mygroup",
- ]
-}
-```
-
-## Argument Reference
-
-* `bucket` - (Required) The name of the bucket it applies to.
-
-- - -
-
-* `predefined_acl` - (Optional) The [canned GCS ACL](https://cloud.google.com/storage/docs/access-control/lists#predefined-acl) to apply. Must be set if `role_entity` is not.
-
-* `role_entity` - (Optional) List of role/entity pairs in the form `ROLE:entity`. See [GCS Bucket ACL documentation](https://cloud.google.com/storage/docs/json_api/v1/bucketAccessControls) for more details. Must be set if `predefined_acl` is not.
-
-## Attributes Reference
-
-Only the arguments listed above are exposed as attributes.
diff --git a/website/source/docs/providers/google/r/storage_bucket_object.html.markdown b/website/source/docs/providers/google/r/storage_bucket_object.html.markdown
deleted file mode 100644
index d2cfe1ed6..000000000
--- a/website/source/docs/providers/google/r/storage_bucket_object.html.markdown
+++ /dev/null
@@ -1,74 +0,0 @@
----
-layout: "google"
-page_title: "Google: google_storage_bucket_object"
-sidebar_current: "docs-google-storage-bucket-object"
-description: |-
- Creates a new object inside a specified bucket
----
-
-# google\_storage\_bucket\_object
-
-Creates a new object inside an existing bucket in Google cloud storage service (GCS).
-[ACLs](https://cloud.google.com/storage/docs/access-control/lists) can be applied using the `google_storage_object_acl` resource.
- For more information see
-[the official documentation](https://cloud.google.com/storage/docs/key-terms#objects)
-and
-[API](https://cloud.google.com/storage/docs/json_api/v1/objects).
-
-
-## Example Usage
-
-Example creating a public object in an existing `image-store` bucket.
-
-```hcl
-resource "google_storage_bucket_object" "picture" {
- name = "butterfly01"
- source = "/images/nature/garden-tiger-moth.jpg"
- bucket = "image-store"
-}
-```
-
-## Argument Reference
-
-The following arguments are supported:
-
-* `bucket` - (Required) The name of the containing bucket.
-
-* `name` - (Required) The name of the object.
-
-One of the following is required:
-
-* `content` - (Optional) Data as `string` to be uploaded. Must be defined if
- `source` is not.
-
-* `source` - (Optional) A path to the data you want to upload. Must be defined
- if `content` is not.
-
-- - -
-
-* `cache_control` - (Optional) [Cache-Control](https://tools.ietf.org/html/rfc7234#section-5.2)
- directive to specify caching behavior of object data. If omitted and object is accessible to all anonymous users, the default will be public, max-age=3600
-
-* `content_disposition` - (Optional) [Content-Disposition](https://tools.ietf.org/html/rfc6266) of the object data.
-
-* `content_encoding` - (Optional) [Content-Encoding](https://tools.ietf.org/html/rfc7231#section-3.1.2.2) of the object data.
-
-* `content_language` - (Optional) [Content-Language](https://tools.ietf.org/html/rfc7231#section-3.1.3.2) of the object data.
-
-* `content_type` - (Optional) [Content-Type](https://tools.ietf.org/html/rfc7231#section-3.1.1.5) of the object data. Defaults to "application/octet-stream" or "text/plain; charset=utf-8".
-
-* `predefined_acl` - (Optional, Deprecated) The [canned GCS ACL](https://cloud.google.com/storage/docs/access-control#predefined-acl) apply. Please switch
-to `google_storage_object_acl.predefined_acl`.
-
-* `storage_class` - (Optional) The [StorageClass](https://cloud.google.com/storage/docs/storage-classes) of the new bucket object.
- Supported values include: `MULTI_REGIONAL`, `REGIONAL`, `NEARLINE`, `COLDLINE`. If not provided, this defaults to the bucket's default
- storage class or to a [standard](https://cloud.google.com/storage/docs/storage-classes#standard) class.
-
-## Attributes Reference
-
-In addition to the arguments listed above, the following computed attributes are
-exported:
-
-* `crc32c` - (Computed) Base 64 CRC32 hash of the uploaded data.
-
-* `md5hash` - (Computed) Base 64 MD5 hash of the uploaded data.
diff --git a/website/source/docs/providers/google/r/storage_object_acl.html.markdown b/website/source/docs/providers/google/r/storage_object_acl.html.markdown
deleted file mode 100644
index 01d1f83d8..000000000
--- a/website/source/docs/providers/google/r/storage_object_acl.html.markdown
+++ /dev/null
@@ -1,57 +0,0 @@
----
-layout: "google"
-page_title: "Google: google_storage_object_acl"
-sidebar_current: "docs-google-storage-object-acl"
-description: |-
- Creates a new object ACL in Google Cloud Storage.
----
-
-# google\_storage\_object\_acl
-
-Creates a new object ACL in Google cloud storage service (GCS). For more information see
-[the official documentation](https://cloud.google.com/storage/docs/access-control/lists)
-and
-[API](https://cloud.google.com/storage/docs/json_api/v1/objectAccessControls).
-
-## Example Usage
-
-Create an object ACL with one owner and one reader.
-
-```hcl
-resource "google_storage_bucket" "image-store" {
- name = "image-store-bucket"
- location = "EU"
-}
-
-resource "google_storage_bucket_object" "image" {
- name = "image1"
- bucket = "${google_storage_bucket.name}"
- source = "image1.jpg"
-}
-
-resource "google_storage_object_acl" "image-store-acl" {
- bucket = "${google_storage_bucket.image_store.name}"
- object = "${google_storage_bucket_object.image_store.name}"
-
- role_entity = [
- "OWNER:user-my.email@gmail.com",
- "READER:group-mygroup",
- ]
-}
-```
-
-## Argument Reference
-
-* `bucket` - (Required) The name of the bucket it applies to.
-
-* `object` - (Required) The name of the object it applies to.
-
-- - -
-
-* `predefined_acl` - (Optional) The [canned GCS ACL](https://cloud.google.com/storage/docs/access-control#predefined-acl) to apply. Must be set if `role_entity` is not.
-
-* `role_entity` - (Optional) List of role/entity pairs in the form `ROLE:entity`. See [GCS Object ACL documentation](https://cloud.google.com/storage/docs/json_api/v1/objectAccessControls) for more details. Must be set if `predefined_acl` is not.
-
-## Attributes Reference
-
-Only the arguments listed above are exposed as attributes.
diff --git a/website/source/docs/providers/grafana/index.html.markdown b/website/source/docs/providers/grafana/index.html.markdown
deleted file mode 100644
index afecda124..000000000
--- a/website/source/docs/providers/grafana/index.html.markdown
+++ /dev/null
@@ -1,38 +0,0 @@
----
-layout: "grafana"
-page_title: "Provider: Grafana"
-sidebar_current: "docs-grafana-index"
-description: |-
- The Grafana provider configures data sources and dashboards in Grafana.
----
-
-# Grafana Provider
-
-The Grafana provider configures data sources and dashboards in
-[Grafana](http://grafana.org/), which is a web application for creating,
-viewing and sharing metrics dashboards.
-
-The provider configuration block accepts the following arguments:
-
-* ``url`` - (Required) The root URL of a Grafana server. May alternatively be
- set via the ``GRAFANA_URL`` environment variable.
-
-* ``auth`` - (Required) The API token or username/password to use to
- authenticate to the Grafana server. If username/password is used, they
- are provided in a single string and separated by a colon. May alternatively
- be set via the ``GRAFANA_AUTH`` environment variable.
-
-Use the navigation to the left to read about the available resources.
-
-## Example Usage
-
-```hcl
-provider "grafana" {
- url = "http://grafana.example.com/"
- auth = "1234abcd"
-}
-
-resource "grafana_dashboard" "metrics" {
- config_json = "${file("grafana-dashboard.json")}"
-}
-```
diff --git a/website/source/docs/providers/grafana/r/dashboard.html.md b/website/source/docs/providers/grafana/r/dashboard.html.md
deleted file mode 100644
index b37a69e7f..000000000
--- a/website/source/docs/providers/grafana/r/dashboard.html.md
+++ /dev/null
@@ -1,47 +0,0 @@
----
-layout: "grafana"
-page_title: "Grafana: grafana_dashboard"
-sidebar_current: "docs-influxdb-resource-dashboard"
-description: |-
- The grafana_dashboard resource allows a Grafana dashboard to be created.
----
-
-# grafana\_dashboard
-
-The dashboard resource allows a dashboard to created on a Grafana server.
-
-## Example Usage
-
-```hcl
-resource "grafana_dashboard" "metrics" {
- config_json = "${file("grafana-dashboard.json")}"
-}
-```
-
-Dashboards can be exported from Grafana's web UI in JSON format and provided
-via the `config_json` argument.
-
-The exported JSON will include references to Grafana data sources, but the
-data source configuration is not exported in this way. In order to fully
-manage a dashboard with Terraform, necessary data sources can be created
-using the `grafana_data_source` resource. In order to ensure that a data
-source is created before a dashboard that refers to it, use the `depends_on`
-meta-parameter:
-
-```hcl
- depends_on = ["grafana_data_source.metrics"]
-```
-
-## Argument Reference
-
-The following arguments are supported:
-
-* `config_json` - (Required) The JSON configuration for the dashboard.
-
-## Attributes Reference
-
-The resource exports the following attributes:
-
-* `slug` - A URL "slug" for this dashboard, generated by Grafana by removing
- certain characters from the dashboard name given as part of the `config_json`
- argument. This can be used to generate the URL for a dashboard.
diff --git a/website/source/docs/providers/grafana/r/data_source.html.md b/website/source/docs/providers/grafana/r/data_source.html.md
deleted file mode 100644
index aa3df4f8c..000000000
--- a/website/source/docs/providers/grafana/r/data_source.html.md
+++ /dev/null
@@ -1,77 +0,0 @@
----
-layout: "grafana"
-page_title: "Grafana: grafana_data_source"
-sidebar_current: "docs-influxdb-resource-data-source"
-description: |-
- The grafana_data_source resource allows a Grafana data source to be created.
----
-
-# grafana\_data\_source
-
-The data source resource allows a data source to created on a Grafana server.
-
-## Example Usage
-
-The required arguments for this resource vary depending on the type of data
-source selected (via the `type` argument). The following example is for
-InfluxDB. See
-[Grafana's *Data Sources Guides*](http://docs.grafana.org/#data-sources-guides)
-for more details on the supported data source types and the arguments they use.
-
-```hcl
-resource "grafana_data_source" "metrics" {
- type = "influxdb"
- name = "myapp-metrics"
- url = "http://influxdb.example.net:8086/"
- username = "myapp"
- password = "foobarbaz"
- database_name = "${influxdb_database.metrics.name}"
-}
-```
-
-## Argument Reference
-
-The following arguments are supported:
-
-* `type` - (Required) The data source type. Must be one of the data source
- keywords supported by the Grafana server.
-
-* `name` - (Required) A unique name for the data source within the Grafana
- server.
-
-* `url` - (Required) The URL for the data source. The type of URL required
- varies depending on the chosen data source type.
-
-* `is_default` - (Optional) If true, the dashboard will be shown as the
- root page on the Grafana server. Only one dashboard on a server can be
- the default.
-
-* `basic_auth_enabled` - (Optional) - If true, HTTP basic authentication will
- be used to make requests.
-
-* `basic_auth_username` - (Required if `basic_auth_enabled` is true) The
- username to use for basic auth.
-
-* `basic_auth_password` - (Required if `basic_auth_enabled` is true) The
- password to use for basic auth.
-
-* `username` - (Required by some data source types) The username to use to
- authenticate to the data source.
-
-* `password` - (Required by some data source types) The password to use to
- authenticate to the data source.
-
-* `database_name` - (Required by some data source types) The name of the
- database to use on the selected data source server.
-
-* `access_mode` - (Optional) The method by which the browser-based Grafana
- application will access the data source. The default is "proxy", which means
- that the application will make requests via a proxy endpoint on the Grafana
- server.
-
-## Attributes Reference
-
-The resource exports the following attributes:
-
-* `id` - The opaque unique id assigned to the data source by the Grafana
- server.
diff --git a/website/source/docs/providers/heroku/index.html.markdown b/website/source/docs/providers/heroku/index.html.markdown
deleted file mode 100644
index 2bec7aa85..000000000
--- a/website/source/docs/providers/heroku/index.html.markdown
+++ /dev/null
@@ -1,40 +0,0 @@
----
-layout: "heroku"
-page_title: "Provider: Heroku"
-sidebar_current: "docs-heroku-index"
-description: |-
- The Heroku provider is used to interact with the resources supported by Heroku. The provider needs to be configured with the proper credentials before it can be used.
----
-
-# Heroku Provider
-
-The Heroku provider is used to interact with the
-resources supported by Heroku. The provider needs to be configured
-with the proper credentials before it can be used.
-
-Use the navigation to the left to read about the available resources.
-
-## Example Usage
-
-```hcl
-# Configure the Heroku provider
-provider "heroku" {
- email = "ops@company.com"
- api_key = "${var.heroku_api_key}"
-}
-
-# Create a new application
-resource "heroku_app" "default" {
- # ...
-}
-```
-
-## Argument Reference
-
-The following arguments are supported:
-
-* `api_key` - (Required) Heroku API token. It must be provided, but it can also
- be sourced from the `HEROKU_API_KEY` environment variable.
-* `email` - (Required) Email to be notified by Heroku. It must be provided, but
- it can also be sourced from the `HEROKU_EMAIL` environment variable.
-
diff --git a/website/source/docs/providers/heroku/r/addon.html.markdown b/website/source/docs/providers/heroku/r/addon.html.markdown
deleted file mode 100644
index 9f6b41cc3..000000000
--- a/website/source/docs/providers/heroku/r/addon.html.markdown
+++ /dev/null
@@ -1,63 +0,0 @@
----
-layout: "heroku"
-page_title: "Heroku: heroku_addon"
-sidebar_current: "docs-heroku-resource-addon"
-description: |-
- Provides a Heroku Add-On resource. These can be attach services to a Heroku app.
----
-
-# heroku\_addon
-
-Provides a Heroku Add-On resource. These can be attach
-services to a Heroku app.
-
-## Example Usage
-
-```hcl
-# Create a new Heroku app
-resource "heroku_app" "default" {
- name = "test-app"
-}
-
-# Create a database, and configure the app to use it
-resource "heroku_addon" "database" {
- app = "${heroku_app.default.name}"
- plan = "heroku-postgresql:hobby-basic"
-}
-
-# Add a web-hook addon for the app
-resource "heroku_addon" "webhook" {
- app = "${heroku_app.default.name}"
- plan = "deployhooks:http"
-
- config {
- url = "http://google.com"
- }
-}
-```
-
-## Argument Reference
-
-The following arguments are supported:
-
-* `app` - (Required) The Heroku app to add to.
-* `plan` - (Required) The addon to add.
-* `config` - (Optional) Optional plan configuration.
-
-## Attributes Reference
-
-The following attributes are exported:
-
-* `id` - The ID of the add-on
-* `name` - The add-on name
-* `plan` - The plan name
-* `provider_id` - The ID of the plan provider
-
-
-## Import
-
-Addons can be imported using the Addon `id`, e.g.
-
-```
-$ terraform import heroku_addon.foobar 12345678
-```
diff --git a/website/source/docs/providers/heroku/r/app.html.markdown b/website/source/docs/providers/heroku/r/app.html.markdown
deleted file mode 100644
index 9407eb779..000000000
--- a/website/source/docs/providers/heroku/r/app.html.markdown
+++ /dev/null
@@ -1,78 +0,0 @@
----
-layout: "heroku"
-page_title: "Heroku: heroku_app"
-sidebar_current: "docs-heroku-resource-app-x"
-description: |-
- Provides a Heroku App resource. This can be used to create and manage applications on Heroku.
----
-
-# heroku\_app
-
-Provides a Heroku App resource. This can be used to
-create and manage applications on Heroku.
-
-## Example Usage
-
-```hcl
-# Create a new Heroku app
-resource "heroku_app" "default" {
- name = "my-cool-app"
- region = "us"
-
- config_vars {
- FOOBAR = "baz"
- }
-
- buildpacks = [
- "heroku/go"
- ]
-}
-```
-
-## Argument Reference
-
-The following arguments are supported:
-
-* `name` - (Required) The name of the application. In Heroku, this is also the
- unique ID, so it must be unique and have a minimum of 3 characters.
-* `region` - (Required) The region that the app should be deployed in.
-* `stack` - (Optional) The application stack is what platform to run the application
- in.
-* `buildpacks` - (Optional) Buildpack names or URLs for the application.
- Buildpacks configured externally won't be altered if this is not present.
-* `config_vars` - (Optional) Configuration variables for the application.
- The config variables in this map are not the final set of configuration
- variables, but rather variables you want present. That is, other
- configuration variables set externally won't be removed by Terraform
- if they aren't present in this list.
-* `space` - (Optional) The name of a private space to create the app in.
-* `organization` - (Optional) A block that can be specified once to define
- organization settings for this app. The fields for this block are
- documented below.
-
-The `organization` block supports:
-
-* `name` (string) - The name of the organization.
-* `locked` (boolean)
-* `personal` (boolean)
-
-## Attributes Reference
-
-The following attributes are exported:
-
-* `id` - The ID of the app. This is also the name of the application.
-* `name` - The name of the application. In Heroku, this is also the
- unique ID.
-* `stack` - The application stack is what platform to run the application
- in.
-* `space` - The private space the app should run in.
-* `region` - The region that the app should be deployed in.
-* `git_url` - The Git URL for the application. This is used for
- deploying new versions of the app.
-* `web_url` - The web (HTTP) URL that the application can be accessed
- at by default.
-* `heroku_hostname` - A hostname for the Heroku application, suitable
- for pointing DNS records.
-* `all_config_vars` - A map of all of the configuration variables that
- exist for the app, containing both those set by Terraform and those
- set externally.
diff --git a/website/source/docs/providers/heroku/r/app_feature.html.markdown b/website/source/docs/providers/heroku/r/app_feature.html.markdown
deleted file mode 100644
index c532b47d7..000000000
--- a/website/source/docs/providers/heroku/r/app_feature.html.markdown
+++ /dev/null
@@ -1,28 +0,0 @@
----
-layout: "heroku"
-page_title: "Heroku: heroku_app_feature"
-sidebar_current: "docs-heroku-resource-app-feature"
-description: |-
- Provides a Heroku App Feature resource. This can be used to create and manage App Features on Heroku.
----
-
-# heroku\_app\_feature
-
-Provides a Heroku App Feature resource. This can be used to create and manage App Features on Heroku.
-
-## Example Usage
-
-```hcl
-resource "heroku_app_feature" "log_runtime_metrics" {
- app = "test-app"
- name = "log-runtime-metrics"
-}
-```
-
-## Argument Reference
-
-The following arguments are supported:
-
-* `app` - (Required) The Heroku app to link to.
-* `name` - (Required) The name of the App Feature to manage.
-* `enabled` - (Optional) Whether to enable or disable the App Feature. The default value is true.
diff --git a/website/source/docs/providers/heroku/r/cert.html.markdown b/website/source/docs/providers/heroku/r/cert.html.markdown
deleted file mode 100644
index 0a2f8a530..000000000
--- a/website/source/docs/providers/heroku/r/cert.html.markdown
+++ /dev/null
@@ -1,51 +0,0 @@
----
-layout: "heroku"
-page_title: "Heroku: heroku_cert"
-sidebar_current: "docs-heroku-resource-cert"
-description: |-
- Provides a Heroku SSL certificate resource. It allows to set a given certificate for a Heroku app.
----
-
-# heroku\_cert
-
-Provides a Heroku SSL certificate resource. It allows to set a given certificate for a Heroku app.
-
-## Example Usage
-
-```hcl
-# Create a new Heroku app
-resource "heroku_app" "default" {
- name = "test-app"
-}
-
-# Add-on SSL to application
-resource "heroku_addon" "ssl" {
- app = "${heroku_app.default.name}"
- plan = "ssl"
-}
-
-# Establish certificate for a given application
-resource "heroku_cert" "ssl_certificate" {
- app = "${heroku_app.default.name}"
- certificate_chain = "${file("server.crt")}"
- private_key = "${file("server.key")}"
- depends_on = "heroku_addon.ssl"
-}
-```
-
-## Argument Reference
-
-The following arguments are supported:
-
-* `app` - (Required) The Heroku app to add to.
-* `certificate_chain` - (Required) The certificate chain to add
-* `private_key` - (Required) The private key for a given certificate chain
-
-## Attributes Reference
-
-The following attributes are exported:
-
-* `id` - The ID of the add-on
-* `cname` - The CNAME for the SSL endpoint
-* `name` - The name of the SSL certificate
-
diff --git a/website/source/docs/providers/heroku/r/domain.html.markdown b/website/source/docs/providers/heroku/r/domain.html.markdown
deleted file mode 100644
index 451c3f411..000000000
--- a/website/source/docs/providers/heroku/r/domain.html.markdown
+++ /dev/null
@@ -1,43 +0,0 @@
----
-layout: "heroku"
-page_title: "Heroku: heroku_domain"
-sidebar_current: "docs-heroku-resource-domain"
-description: |-
- Provides a Heroku App resource. This can be used to create and manage applications on Heroku.
----
-
-# heroku\_domain
-
-Provides a Heroku App resource. This can be used to
-create and manage applications on Heroku.
-
-## Example Usage
-
-```hcl
-# Create a new Heroku app
-resource "heroku_app" "default" {
- name = "test-app"
-}
-
-# Associate a custom domain
-resource "heroku_domain" "default" {
- app = "${heroku_app.default.name}"
- hostname = "terraform.example.com"
-}
-```
-
-## Argument Reference
-
-The following arguments are supported:
-
-* `hostname` - (Required) The hostname to serve requests from.
-* `app` - (Required) The Heroku app to link to.
-
-## Attributes Reference
-
-The following attributes are exported:
-
-* `id` - The ID of the of the domain record.
-* `hostname` - The hostname traffic will be served as.
-* `cname` - The CNAME traffic should route to.
-
diff --git a/website/source/docs/providers/heroku/r/drain.html.markdown b/website/source/docs/providers/heroku/r/drain.html.markdown
deleted file mode 100644
index a91fced45..000000000
--- a/website/source/docs/providers/heroku/r/drain.html.markdown
+++ /dev/null
@@ -1,35 +0,0 @@
----
-layout: "heroku"
-page_title: "Heroku: heroku_drain"
-sidebar_current: "docs-heroku-resource-drain"
-description: |-
- Provides a Heroku Drain resource. This can be used to create and manage Log Drains on Heroku.
----
-
-# heroku\_drain
-
-Provides a Heroku Drain resource. This can be used to
-create and manage Log Drains on Heroku.
-
-## Example Usage
-
-```hcl
-resource "heroku_drain" "default" {
- app = "test-app"
- url = "syslog://terraform.example.com:1234"
-}
-```
-
-## Argument Reference
-
-The following arguments are supported:
-
-* `url` - (Required) The URL for Heroku to drain your logs to.
-* `app` - (Required) The Heroku app to link to.
-
-## Attributes Reference
-
-The following attributes are exported:
-
-* `token` - The unique token for your created drain.
-
diff --git a/website/source/docs/providers/heroku/r/pipeline.html.markdown b/website/source/docs/providers/heroku/r/pipeline.html.markdown
deleted file mode 100644
index dcd38ab60..000000000
--- a/website/source/docs/providers/heroku/r/pipeline.html.markdown
+++ /dev/null
@@ -1,62 +0,0 @@
----
-layout: "heroku"
-page_title: "Heroku: heroku_pipeline_"
-sidebar_current: "docs-heroku-resource-pipeline-x"
-description: |-
- Provides a Heroku Pipeline resource.
----
-
-# heroku\_pipeline
-
-
-Provides a [Heroku Pipeline](https://devcenter.heroku.com/articles/pipelines)
-resource.
-
-A pipeline is a group of Heroku apps that share the same codebase. Once a
-pipeline is created, and apps are added to different stages using
-[`heroku_pipeline_coupling`](./pipeline_coupling.html), you can promote app
-slugs to the next stage.
-
-## Example Usage
-
-```hcl
-# Create Heroku apps for staging and production
-resource "heroku_app" "staging" {
- name = "test-app-staging"
-}
-
-resource "heroku_app" "production" {
- name = "test-app-production"
-}
-
-# Create a Heroku pipeline
-resource "heroku_pipeline" "test-app" {
- name = "test-app"
-}
-
-# Couple apps to different pipeline stages
-resource "heroku_pipeline_coupling" "staging" {
- app = "${heroku_app.staging.name}"
- pipeline = "${heroku_pipeline.test-app.id}"
- stage = "staging"
-}
-
-resource "heroku_pipeline_coupling" "production" {
- app = "${heroku_app.production.name}"
- pipeline = "${heroku_pipeline.test-app.id}"
- stage = "production"
-}
-```
-
-## Argument Reference
-
-The following arguments are supported:
-
-* `name` - (Required) The name of the pipeline.
-
-## Attributes Reference
-
-The following attributes are exported:
-
-* `id` - The UUID of the pipeline.
-* `name` - The name of the pipeline.
diff --git a/website/source/docs/providers/heroku/r/pipeline_coupling.html.markdown b/website/source/docs/providers/heroku/r/pipeline_coupling.html.markdown
deleted file mode 100644
index 614c582ed..000000000
--- a/website/source/docs/providers/heroku/r/pipeline_coupling.html.markdown
+++ /dev/null
@@ -1,67 +0,0 @@
----
-layout: "heroku"
-page_title: "Heroku: heroku_pipeline_coupling"
-sidebar_current: "docs-heroku-resource-pipeline-coupling"
-description: |-
- Provides a Heroku Pipeline Coupling resource.
----
-
-# heroku\_pipeline\_coupling
-
-
-Provides a [Heroku Pipeline Coupling](https://devcenter.heroku.com/articles/pipelines)
-resource.
-
-A pipeline is a group of Heroku apps that share the same codebase. Once a
-pipeline is created using [`heroku_pipeline`](./pipeline.html), and apps are added
-to different stages using `heroku_pipeline_coupling`, you can promote app slugs
-to the downstream stages.
-
-## Example Usage
-
-```hcl
-# Create Heroku apps for staging and production
-resource "heroku_app" "staging" {
- name = "test-app-staging"
-}
-
-resource "heroku_app" "production" {
- name = "test-app-production"
-}
-
-# Create a Heroku pipeline
-resource "heroku_pipeline" "test-app" {
- name = "test-app"
-}
-
-# Couple apps to different pipeline stages
-resource "heroku_pipeline_coupling" "staging" {
- app = "${heroku_app.staging.name}"
- pipeline = "${heroku_pipeline.test-app.id}"
- stage = "staging"
-}
-
-resource "heroku_pipeline_coupling" "production" {
- app = "${heroku_app.production.name}"
- pipeline = "${heroku_pipeline.test-app.id}"
- stage = "production"
-}
-```
-
-## Argument Reference
-
-The following arguments are supported:
-
-* `app` - (Required) The name of the app for this coupling.
-* `pipeline` - (Required) The ID of the pipeline to add this app to.
-* `stage` - (Required) The stage to couple this app to. Must be one of
-`review`, `development`, `staging`, or `production`.
-
-## Attributes Reference
-
-The following attributes are exported:
-
-* `id` - The UUID of this pipeline coupling.
-* `app` - The name of the application.
-* `pipeline` - The UUID of the pipeline.
-* `stage` - The stage for this coupling.
diff --git a/website/source/docs/providers/heroku/r/space.html.markdown b/website/source/docs/providers/heroku/r/space.html.markdown
deleted file mode 100644
index 089fe2159..000000000
--- a/website/source/docs/providers/heroku/r/space.html.markdown
+++ /dev/null
@@ -1,48 +0,0 @@
----
-layout: "heroku"
-page_title: "Heroku: heroku_space"
-sidebar_current: "docs-heroku-resource-space"
-description: |-
- Provides a Heroku Space resource for running apps in isolated, highly available, secure app execution environments.
----
-
-# heroku\_space
-
-Provides a Heroku Space resource for running apps in isolated, highly available, secure app execution environments.
-
-## Example Usage
-
-```hcl
-// Create a new Heroku space
-resource "heroku_space" "default" {
- name = "test-space"
- organization = "my-company"
- region = "virginia"
-}
-
-// Create a new Heroku app in test-space
-resource "heroku_app" "default" {
- name = "test-app"
- space = "${heroku_space.default.name}"
- organization = {
- name = "my-company"
- }
-}
-```
-
-## Argument Reference
-
-The following arguments are supported:
-
-* `name` - (Required) The name of the space.
-* `organization` - (Required) The name of the organization which will own the space.
-* `region` - (Optional) The region that the space should be created in.
-
-## Attributes Reference
-
-The following attributes are exported:
-
-* `id` - The ID of the space.
-* `name` - The space's name.
-* `organization` - The space's organization.
-* `region` - The space's region.
diff --git a/website/source/docs/providers/http/data_source.html.md b/website/source/docs/providers/http/data_source.html.md
deleted file mode 100644
index a833959a4..000000000
--- a/website/source/docs/providers/http/data_source.html.md
+++ /dev/null
@@ -1,51 +0,0 @@
----
-layout: "http"
-page_title: "HTTP Data Source"
-sidebar_current: "docs-http-data-source"
-description: |-
- Retrieves the content at an HTTP or HTTPS URL.
----
-
-# `http` Data Source
-
-The `http` data source makes an HTTP GET request to the given URL and exports
-information about the response.
-
-The given URL may be either an `http` or `https` URL. At present this resource
-can only retrieve data from URLs that respond with `text/*` or
-`application/json` content types, and expects the result to be UTF-8 encoded
-regardless of the returned content type header.
-
-~> **Important** Although `https` URLs can be used, there is currently no
-mechanism to authenticate the remote server except for general verification of
-the server certificate's chain of trust. Data retrieved from servers not under
-your control should be treated as untrustworthy.
-
-## Example Usage
-
-```hcl
-data "http" "example" {
- url = "https://checkpoint-api.hashicorp.com/v1/check/terraform"
-
- # Optional request headers
- request_headers {
- "Accept" = "application/json"
- }
-}
-```
-
-## Argument Reference
-
-The following arguments are supported:
-
-* `url` - (Required) The URL to request data from. This URL must respond with
- a `200 OK` response and a `text/*` or `application/json` Content-Type.
-
-* `request_headers` - (Optional) A map of strings representing additional HTTP
- headers to include in the request.
-
-## Attributes Reference
-
-The following attributes are exported:
-
-* `body` - The raw body of the HTTP response.
diff --git a/website/source/docs/providers/http/index.html.markdown b/website/source/docs/providers/http/index.html.markdown
deleted file mode 100644
index c6d37f46c..000000000
--- a/website/source/docs/providers/http/index.html.markdown
+++ /dev/null
@@ -1,15 +0,0 @@
----
-layout: "http"
-page_title: "Provider: HTTP"
-sidebar_current: "docs-http-index"
-description: |-
- The HTTP provider interacts with HTTP servers.
----
-
-# HTTP Provider
-
-The HTTP provider is a utility provider for interacting with generic HTTP
-servers as part of a Terraform configuration.
-
-This provider requires no configuration. For information on the resources
-it provides, see the navigation bar.
diff --git a/website/source/docs/providers/icinga2/index.html.markdown b/website/source/docs/providers/icinga2/index.html.markdown
deleted file mode 100644
index 9c8850dee..000000000
--- a/website/source/docs/providers/icinga2/index.html.markdown
+++ /dev/null
@@ -1,88 +0,0 @@
----
-layout: "icinga2"
-page_title: "Provider: Icinga2"
-sidebar_current: "docs-icinga2-index"
-description: |-
- The Icinga2 provider is used to configure hosts to be monitored by Icinga2 servers. The provider needs to be configured with the API URL of the Icinga2 server and credentials for an API user with the appropriate permissions.
----
-
-
-# Icinga2 Provider
-
-The Icinga2 provider is used to configure hosts to be monitored by
-[Icinga2](https://www.icinga.com/products/icinga-2/) servers. The provider
-needs to be configured with the API URL of the Icinga2 server and credentials
-for an API user with the appropriate permissions.
-
-## Example Usage
-
-```hcl
-# Configure the Icinga2 provider
-provider "icinga2" {
- api_url = "https://192.168.33.5:5665/v1"
- api_user = "root"
- api_password = "icinga"
- insecure_skip_tls_verify = true
-}
-
-# Configure a host
-resouce "icinga2_host" "web-server" {
- # ...
-}
-```
-
-## Authentication
-
-### Static credentials ###
-
-Static credentials can be provided by adding an `api_user` and `api_password` in-line in the
-icinga2 provider block:
-
-Usage:
-
-```hcl
-provider "icinga2" {
- api_url = "https://192.168.33.5:5665/v1"
- api_user = "root"
- api_password = "icinga"
-}
-```
-
-
-###Environment variables
-
-You can provide your credentials via `ICINGA2_API_USER` and `ICINGA2_API_PASSWORD`,
-environment variables, storing your Icinga2 API user and password, respectively.
-`ICINGA2_API_URL` and `ICINGA2_INSECURE_SKIP_TLS_VERIFY` are also used, if applicable:
-
-```hcl
-provider "icinga" {}
-```
-
-Usage:
-
-```hcl
-$ export ICINGA2_API_URL=https://192.168.33.5:5665/v1
-$ export ICINGA2_API_USER=root
-$ export ICINGA2_API_PASSWORD=icinga
-$ export ICINGA2_INSECURE_SKIP_TLS_VERIFY=true
-$ terraform plan
-```
-
-## Argument Reference
-
-* ``api_url`` - (Required) The root API URL of an Icinga2 server. May alternatively be
- set via the ``ICINGA2_API_URL`` environment variable.
-
-* ``api_user`` - (Required) The API username to use to
- authenticate to the Icinga2 server. May alternatively
- be set via the ``ICINGA2_API_USER`` environment variable.
-
-* ``api_password`` - (Required) The password to use to
- authenticate to the Icinga2 server. May alternatively
- be set via the ``ICINGA2_API_PASSWORD`` environment variable.
-
-* ``insecure_skip_tls_verify`` - (optional) Defaults to false. If set to true,
- verification of the Icinga2 server's SSL certificate is disabled. This is a security
- risk and should be avoided. May alternatively be set via the
- ``ICINGA2_INSECURE_SKIP_TLS_VERIFY`` environment variable.
diff --git a/website/source/docs/providers/icinga2/r/checkcommands.html.markdown b/website/source/docs/providers/icinga2/r/checkcommands.html.markdown
deleted file mode 100644
index 1055bb6eb..000000000
--- a/website/source/docs/providers/icinga2/r/checkcommands.html.markdown
+++ /dev/null
@@ -1,42 +0,0 @@
----
-layout: "icinga2"
-page_title: "Icinga2: host"
-sidebar_current: "docs-icinga2-resource-checkcommand"
-description: |-
- Configures a checkcommand resource. This allows checkcommands to be configured, updated and deleted.
----
-
-# icinga2\_checkcommand
-
-Configures an Icinga2 checkcommand resource. This allows checkcommands to be configured, updated,
-and deleted.
-
-## Example Usage
-
-```hcl
-# Configure a new checkcommand on an Icinga2 Server, that can be used to monitor hosts and/or services
-provider "icinga2" {
- api_url = "https://192.168.33.5:5665/v1"
-}
-
-resource "icinga2_checkcommand" "apache_status" {
- name = "apache_status"
- templates = ["apache-status", "plugin-check-command", "plugin-check-command", "ipv4-or-ipv6"]
- command = "/usr/lib64/nagios/plugins/check_apache_status.pl"
-
- arguments = {
- "-H" = "$apache_status_address$"
- "-c" = "$apache_status_critical$"
- "-p" = "$apache_status_port$"
- }
-}
-```
-
-## Argument Reference
-
-The following arguments are supported:
-
-* `arguments` - (Optional) A mapping of arguments to include with the command.
-* `command` - (Required) Path to the command te be executed.
-* `name` - (Required) Name by which to reference the checkcommand
-* `templates` - (Optional) A list of Icinga2 templates to assign to the host.
diff --git a/website/source/docs/providers/icinga2/r/host.html.markdown b/website/source/docs/providers/icinga2/r/host.html.markdown
deleted file mode 100644
index 1cdc7c103..000000000
--- a/website/source/docs/providers/icinga2/r/host.html.markdown
+++ /dev/null
@@ -1,45 +0,0 @@
----
-layout: "icinga2"
-page_title: "Icinga2: host"
-sidebar_current: "docs-icinga2-resource-host"
-description: |-
- Configures a host resource. This allows hosts to be configured, updated and deleted.
----
-
-# icinga2\_host
-
-Configures an Icinga2 host resource. This allows hosts to be configured, updated,
-and deleted.
-
-## Example Usage
-
-```hcl
-# Configure a new host to be monitored by an Icinga2 Server
-provider "icinga2" {
- api_url = "https://192.168.33.5:5665/v1"
-}
-
-resource "icinga2_host" "host" {
- hostname = "terraform-host-1"
- address = "10.10.10.1"
- check_command = "hostalive"
- templates = ["bp-host-web"]
-
- vars {
- os = "linux"
- osver = "1"
- allowance = "none"
- }
-}
-```
-
-## Argument Reference
-
-The following arguments are supported:
-
-* `address` - (Required) The address of the host.
-* `check_command` - (Required) The name of an existing Icinga2 CheckCommand object that is used to determine if the host is available or not.
-* `hostname` - (Required) The hostname of the host.
-* `templates` - (Optional) A list of Icinga2 templates to assign to the host.
-* `vars` - (Optional) A mapping of variables to assign to the host.
-
diff --git a/website/source/docs/providers/icinga2/r/hostgroup.html.markdown b/website/source/docs/providers/icinga2/r/hostgroup.html.markdown
deleted file mode 100644
index ce9578f7e..000000000
--- a/website/source/docs/providers/icinga2/r/hostgroup.html.markdown
+++ /dev/null
@@ -1,34 +0,0 @@
----
-layout: "icinga2"
-page_title: "Icinga2: hostgroup"
-sidebar_current: "docs-icinga2-resource-hostgroup"
-description: |-
- Configures a hostgroup resource. This allows hostgroup to be configured, updated and deleted.
----
-
-# icinga2\_hostgroup
-
-Configures an Icinga2 hostgroup resource. This allows hostgroup to be configured, updated,
-and deleted.
-
-## Example Usage
-
-```hcl
-# Configure a new hostgroup to be monitored by an Icinga2 Server
-provider "icinga2" {
- api_url = "https://192.168.33.5:5665/v1"
-}
-
-resource "icinga2_hostgroup" "my-hostgroup" {
- name = "terraform-hostgroup-1"
- display_name = "Terraform Test HostGroup"
-}
-```
-
-## Argument Reference
-
-The following arguments are supported:
-
-* `name` - (Required) The name of the hostgroup.
-* `display_name` - (Required) The name of the hostgroup to display in the Icinga2 interface.
-
diff --git a/website/source/docs/providers/icinga2/r/service.html.markdown b/website/source/docs/providers/icinga2/r/service.html.markdown
deleted file mode 100644
index 968d8cef0..000000000
--- a/website/source/docs/providers/icinga2/r/service.html.markdown
+++ /dev/null
@@ -1,36 +0,0 @@
----
-layout: "icinga2"
-page_title: "Icinga2: service"
-sidebar_current: "docs-icinga2-resource-service"
-description: |-
- Configures a service resource. This allows service to be configured, updated and deleted.
----
-
-# icinga2\_service
-
-Configures an Icinga2 service resource. This allows service to be configured, updated,
-and deleted.
-
-## Example Usage
-
-```hcl
-# Configure a new service to be monitored by an Icinga2 Server
-provider "icinga2" {
- api_url = "https://192.168.33.5:5665/v1"
-}
-
-resource "icinga2_service" "my-service" {
- hostname = "c1-mysql-1"
- servicename = "ssh"
- check_command = "ssh"
-}
-```
-
-## Argument Reference
-
-The following arguments are supported:
-
-* `check_command` - (Required) The name of an existing Icinga2 CheckCommand object that is used to determine if the service is available on the host.
-* `hostname` - (Required) The host to check the service's status on
-* `servicename` - (Required) The name of the service.
-
diff --git a/website/source/docs/providers/ignition/d/config.html.md b/website/source/docs/providers/ignition/d/config.html.md
deleted file mode 100644
index a5ac3a755..000000000
--- a/website/source/docs/providers/ignition/d/config.html.md
+++ /dev/null
@@ -1,58 +0,0 @@
----
-layout: "ignition"
-page_title: "Ignition: ignition_config"
-sidebar_current: "docs-ignition-datasource-config"
-description: |-
- Renders an ignition configuration as JSON
----
-
-# ignition\_config
-
-Renders an ignition configuration as JSON. It contains all the disks, partitions, arrays, filesystems, files, users, groups and units.
-
-## Example Usage
-
-```hcl
-data "ignition_config" "example" {
- systemd = [
- "${data.ignition_systemd_unit.example.id}",
- ]
-}
-```
-
-## Argument Reference
-
-The following arguments are supported:
-
-* `disks` - (Optional) The list of disks to be configured and their options.
-
-* `arrays` - (Optional) The list of RAID arrays to be configured.
-
-* `filesystems` - (Optional) The list of filesystems to be configured and/or used in the _ignition_file_ resource.
-
-* `files` - (Optional) The list of files, rooted in this particular filesystem, to be written.
-
-* `systemd` - (Optional) The list of systemd units. Describes the desired state of the systemd units.
-
-* `networkd` - (Optional) The list of networkd units. Describes the desired state of the networkd files.
-
-* `users` - (Optional) The list of accounts to be added.
-
-* `groups` - (Optional) The list of groups to be added.
-
-* `append` - (Optional) Any number of blocks with the configs to be appended to the current config.
-
-* `replace` - (Optional) A block with config that will replace the current.
-
-
-The `append` and `replace` blocks supports:
-
-* `source` - (Required) The URL of the config. Supported schemes are http. Note: When using http, it is advisable to use the verification option to ensure the contents haven’t been modified.
-
-* `verification` - (Optional) The hash of the config, in the form _\-\_ where type is sha512.
-
-## Attributes Reference
-
-The following attributes are exported:
-
-* `rendered` - The final rendered template.
diff --git a/website/source/docs/providers/ignition/d/disk.html.md b/website/source/docs/providers/ignition/d/disk.html.md
deleted file mode 100644
index c061ceeb0..000000000
--- a/website/source/docs/providers/ignition/d/disk.html.md
+++ /dev/null
@@ -1,54 +0,0 @@
----
-layout: "ignition"
-page_title: "Ignition: ignition_disk"
-sidebar_current: "docs-ignition-datasource-disk"
-description: |-
- Describes the desired state of a system’s disk.
----
-
-# ignition\_disk
-
-Describes the desired state of a system’s disk.
-
-## Example Usage
-
-```hcl
-data "ignition_disk" "foo" {
- device = "/dev/sda"
- partition {
- start = 2048
- size = 196037632
- }
-}
-```
-
-## Argument Reference
-
-The following arguments are supported:
-
-* `device` - (Required) The absolute path to the device. Devices are typically referenced by the _/dev/disk/by-*_ symlinks.
-
-* `wipe_table` - (Optional) Whether or not the partition tables shall be wiped. When true, the partition tables are erased before any further manipulation. Otherwise, the existing entries are left intact.
-
-* `partition` - (Optional) The list of partitions and their configuration for this particular disk..
-
-
-The `partition` block supports:
-
-* `label` - (Optional) The PARTLABEL for the partition.
-
-* `number` - (Optional) The partition number, which dictates it’s position in the partition table (one-indexed). If zero, use the next available partition slot.
-
-* `size` - (Optional) The size of the partition (in sectors). If zero, the partition will fill the remainder of the disk.
-
-
-* `start` - (Optional) The start of the partition (in sectors). If zero, the partition will be positioned at the earliest available part of the disk.
-
-
-* `type_guid` - (Optional) The GPT [partition type GUID](http://en.wikipedia.org/wiki/GUID_Partition_Table#Partition_type_GUIDs). If omitted, the default will be _0FC63DAF-8483-4772-8E79-3D69D8477DE4_ (Linux filesystem data).
-
-## Attributes Reference
-
-The following attributes are exported:
-
-* `id` - ID used to reference this resource in _ignition_config_.
\ No newline at end of file
diff --git a/website/source/docs/providers/ignition/d/file.html.md b/website/source/docs/providers/ignition/d/file.html.md
deleted file mode 100644
index b851bbbcd..000000000
--- a/website/source/docs/providers/ignition/d/file.html.md
+++ /dev/null
@@ -1,80 +0,0 @@
----
-layout: "ignition"
-page_title: "Ignition: ignition_file"
-sidebar_current: "docs-ignition-datasource-file"
-description: |-
- Describes a file to be written in a particular filesystem.
----
-
-# ignition\_file
-
-Describes a file to be written in a particular filesystem.
-
-## Example Usage
-
-File with inline content:
-
-```hcl
-data "ignition_file" "hello" {
- filesystem = "foo"
- path = "/hello.txt"
- content {
- content = "Hello World!"
- }
-}
-```
-
-File with remote content:
-
-```hcl
-data "ignition_file" "hello" {
- filesystem = "qux"
- path = "/hello.txt"
- source {
- source = "http://example.com/hello.txt.gz"
- compression = "gzip"
- verification = "sha512-0123456789abcdef0123456789...456789abcdef"
- }
-}
-```
-
-## Argument Reference
-
-The following arguments are supported:
-
-* `filesystem` - (Optional) The internal identifier of the filesystem. This matches the last filesystem with the given identifier. This should be a valid name from a _ignition\_filesystem_ resource.
-
-* `path` - (Optional) The absolute path to the file.
-
-* `content` - (Optional) Block to provide the file content inline.
-
-* `source` - (Optional) Block to retrieve the file content from a remote location.
-
- __Note__: `content` and `source` are mutually exclusive
-
-* `mode` - (Optional) The list of partitions and their configuration for
-this particular disk..
-
-* `uid` - (Optional) The user ID of the owner.
-
-* `gid` - (Optional) The group ID of the owner.
-
-The `content` block supports:
-
-* `mime` - (Required) MIME format of the content (default _text/plain_).
-
-* `content` - (Required) Content of the file.
-
-The `source` block supports:
-
-* `source` - (Required) The URL of the file contents. When using http, it is advisable to use the verification option to ensure the contents haven’t been modified.
-
-* `compression` - (Optional) The type of compression used on the contents (null or gzip).
-
-* `verification` - (Optional) The hash of the config, in the form _\-\_ where type is sha512.
-
-## Attributes Reference
-
-The following attributes are exported:
-
-* `id` - ID used to reference this resource in _ignition_config_.
\ No newline at end of file
diff --git a/website/source/docs/providers/ignition/d/filesystem.html.md b/website/source/docs/providers/ignition/d/filesystem.html.md
deleted file mode 100644
index 68e66f207..000000000
--- a/website/source/docs/providers/ignition/d/filesystem.html.md
+++ /dev/null
@@ -1,54 +0,0 @@
----
-layout: "ignition"
-page_title: "Ignition: ignition_filesystem"
-sidebar_current: "docs-ignition-datasource-filesystem"
-description: |-
- Describes the desired state of a system’s filesystem.
----
-
-# ignition\_filesystem
-
-Describes the desired state of a the system’s filesystems to be configured and/or used with the _ignition\_file_ resource.
-
-## Example Usage
-
-```hcl
-data "ignition_filesystem" "foo" {
- name = "root"
- mount {
- device = "/dev/disk/by-label/ROOT"
- format = "xfs"
- create = true
- options = ["-L", "ROOT"]
- }
-}
-```
-
-## Argument Reference
-
-The following arguments are supported:
-
-* `name` - (Optional) The identifier for the filesystem, internal to Ignition. This is only required if the filesystem needs to be referenced in the a _ignition\_files_ resource.
-
-* `mount` - (Optional) Contains the set of mount and formatting options for the filesystem. A non-null entry indicates that the filesystem should be mounted before it is used by Ignition.
-
-* `path` - (Optional) The mount-point of the filesystem. A non-null entry indicates that the filesystem has already been mounted by the system at the specified path. This is really only useful for _/sysroot_.
-
-
-The `mount` block supports:
-
-* `device` - (Required) The absolute path to the device. Devices are typically referenced by the _/dev/disk/by-*_ symlinks.
-
-* `format` - (Required) The filesystem format (ext4, btrfs, or xfs).
-
-* `create` - (Optional) Indicates if the filesystem shall be created.
-
-* `force` - (Optional) Whether or not the create operation shall overwrite an existing filesystem. Only allowed if the filesystem is being created.
-
-* `options` - (Optional) Any additional options to be passed to the format-specific mkfs utility. Only allowed if the filesystem is being created
-
-## Attributes Reference
-
-The following attributes are exported:
-
-* `id` - ID used to reference this resource in _ignition_config_.
\ No newline at end of file
diff --git a/website/source/docs/providers/ignition/d/group.html.md b/website/source/docs/providers/ignition/d/group.html.md
deleted file mode 100644
index 807e72dd6..000000000
--- a/website/source/docs/providers/ignition/d/group.html.md
+++ /dev/null
@@ -1,35 +0,0 @@
----
-layout: "ignition"
-page_title: "Ignition: ignition_group"
-sidebar_current: "docs-ignition-datasource-group"
-description: |-
- Describes the desired group additions to the passwd database.
----
-
-# ignition\_group
-
-Describes the desired group additions to the passwd database.
-
-## Example Usage
-
-```hcl
-data "ignition_group" "foo" {
- name = "foo"
-}
-```
-
-## Argument Reference
-
-The following arguments are supported:
-
-* `name` - (Required) The groupname for the account.
-
-* `password_hash` - (Optional) The encrypted password for the account.
-
-* `gid` - (Optional) The group ID of the new account.
-
-## Attributes Reference
-
-The following attributes are exported:
-
-* `id` - ID used to reference this resource in _ignition_config_.
\ No newline at end of file
diff --git a/website/source/docs/providers/ignition/d/networkd_unit.html.md b/website/source/docs/providers/ignition/d/networkd_unit.html.md
deleted file mode 100644
index bf45e345b..000000000
--- a/website/source/docs/providers/ignition/d/networkd_unit.html.md
+++ /dev/null
@@ -1,34 +0,0 @@
----
-layout: "ignition"
-page_title: "Ignition: ignition_networkd_unit"
-sidebar_current: "docs-ignition-datasource-networkd-unit"
-description: |-
- Describes the desired state of the networkd units.
----
-
-# ignition\_networkd\_unit
-
-Describes the desired state of the networkd units.
-
-## Example Usage
-
-```hcl
-data "ignition_networkd_unit" "example" {
- name = "00-eth0.network"
- content = "[Match]\nName=eth0\n\n[Network]\nAddress=10.0.1.7"
-}
-```
-
-## Argument Reference
-
-The following arguments are supported:
-
-* `name` - (Required) The name of the file. This must be suffixed with a valid unit type (e.g. _00-eth0.network_).
-
-* `content` - (Required) The contents of the networkd file.
-
-## Attributes Reference
-
-The following attributes are exported:
-
-* `id` - ID used to reference this resource in _ignition_config_.
\ No newline at end of file
diff --git a/website/source/docs/providers/ignition/d/raid.html.md b/website/source/docs/providers/ignition/d/raid.html.md
deleted file mode 100644
index 47219a251..000000000
--- a/website/source/docs/providers/ignition/d/raid.html.md
+++ /dev/null
@@ -1,64 +0,0 @@
----
-layout: "ignition"
-page_title: "Ignition: ignition_raid"
-sidebar_current: "docs-ignition-datasource-raid"
-description: |-
- Describes the desired state of the system’s RAID.
----
-
-# ignition\_raid
-
-Describes the desired state of the system’s RAID.
-
-## Example Usage
-
-```hcl
-data "ignition_raid" "md" {
- name = "data"
- level = "stripe"
- devices = [
- "/dev/disk/by-partlabel/raid.1.1",
- "/dev/disk/by-partlabel/raid.1.2"
- ]
-}
-
-data "ignition_disk" "disk1" {
- device = "/dev/sdb"
- wipe_table = true
- partition {
- label = "raid.1.1"
- number = 1
- size = 20480
- start = 0
- }
-}
-
-data "ignition_disk" "disk2" {
- device = "/dev/sdc"
- wipe_table = true
- partition {
- label = "raid.1.2"
- number = 1
- size = 20480
- start = 0
- }
-}
-```
-
-## Argument Reference
-
-The following arguments are supported:
-
-* `name` - (Required) The name to use for the resulting md device.
-
-* `level` - (Required) The redundancy level of the array (e.g. linear, raid1, raid5, etc.).
-
-* `devices` - (Required) The list of devices (referenced by their absolute path) in the array.
-
-* `spares` - (Optional) The number of spares (if applicable) in the array.
-
-## Attributes Reference
-
-The following attributes are exported:
-
-* `id` - ID used to reference this resource in _ignition_config_
\ No newline at end of file
diff --git a/website/source/docs/providers/ignition/d/systemd_unit.html.md b/website/source/docs/providers/ignition/d/systemd_unit.html.md
deleted file mode 100644
index a97e32147..000000000
--- a/website/source/docs/providers/ignition/d/systemd_unit.html.md
+++ /dev/null
@@ -1,46 +0,0 @@
----
-layout: "ignition"
-page_title: "Ignition: ignition_systemd_unit"
-sidebar_current: "docs-ignition-datasource-systemd-unit"
-description: |-
- Describes the desired state of the systemd units.
----
-
-# ignition\_systemd\_unit
-
-Describes the desired state of the systemd units.
-
-## Example Usage
-
-```hcl
-data "ignition_systemd_unit" "example" {
- name = "example.service"
- content = "[Service]\nType=oneshot\nExecStart=/usr/bin/echo Hello World\n\n[Install]\nWantedBy=multi-user.target"
-}
-```
-
-## Argument Reference
-
-The following arguments are supported:
-
-* `name` - (Required) Tthe name of the unit. This must be suffixed with a valid unit type (e.g. _thing.service_).
-
-* `enable` - (Optional) Whether or not the service shall be enabled. When true, the service is enabled. In order for this to have any effect, the unit must have an install section. (default true)
-
-* `mask` - (Optional) Whether or not the service shall be masked. When true, the service is masked by symlinking it to _/dev/null_.
-
-* `content` - (Required) The contents of the unit. Optional when a dropin is provided.
-
-* `dropin` - (Optional) The list of drop-ins for the unit.
-
-The `dropin` block supports:
-
-* `name` - (Required) The name of the drop-in. This must be suffixed with _.conf_.
-
-* `content` - (Optional) The contents of the drop-in.
-
-## Attributes Reference
-
-The following attributes are exported:
-
-* `id` - ID used to reference this resource in _ignition_config_.
\ No newline at end of file
diff --git a/website/source/docs/providers/ignition/d/user.html.md b/website/source/docs/providers/ignition/d/user.html.md
deleted file mode 100644
index c4975a739..000000000
--- a/website/source/docs/providers/ignition/d/user.html.md
+++ /dev/null
@@ -1,55 +0,0 @@
----
-layout: "ignition"
-page_title: "Ignition: ignition_user"
-sidebar_current: "docs-ignition-datasource-user"
-description: |-
- Describes the desired user additions to the passwd database.
----
-
-# ignition\_user
-
-Describes the desired user additions to the passwd database.
-
-## Example Usage
-
-```hcl
-data "ignition_user" "foo" {
- name = "foo"
- home_dir = "/home/foo/"
- shell = "/bin/bash"
-}
-```
-
-## Argument Reference
-
-The following arguments are supported:
-
-* `name` - (Required) The username for the account.
-
-* `password_hash` - (Optional) The encrypted password for the account.
-
-* `ssh_authorized_keys` - (Optional) A list of SSH keys to be added to the user’s authorized_keys.
-
-* `uid` - (Optional) The user ID of the new account.
-
-* `gecos` - (Optional) The GECOS field of the new account.
-
-* `home_dir` - (Optional) The home directory of the new account.
-
-* `no_create_home` - (Optional) Whether or not to create the user’s home directory.
-
-* `primary_group` - (Optional) The name or ID of the primary group of the new account.
-
-* `groups` - (Optional) The list of supplementary groups of the new account.
-
-* `no_user_group` - (Optional) Whether or not to create a group with the same name as the user.
-
-* `no_log_init` - (Optional) Whether or not to add the user to the lastlog and faillog databases.
-
-* `shell` - (Optional) The login shell of the new account.
-
-## Attributes Reference
-
-The following attributes are exported:
-
-* `id` - ID used to reference this resource in _ignition_config_.
\ No newline at end of file
diff --git a/website/source/docs/providers/ignition/index.html.markdown b/website/source/docs/providers/ignition/index.html.markdown
deleted file mode 100644
index 25d067470..000000000
--- a/website/source/docs/providers/ignition/index.html.markdown
+++ /dev/null
@@ -1,41 +0,0 @@
----
-layout: "ignition"
-page_title: "Provider: Ignition"
-sidebar_current: "docs-ignition-index"
-description: |-
- The Ignition provider is used to generate Ignition configuration files used by CoreOS Linux.
----
-
-# Ignition Provider
-
-The Ignition provider is used to generate [Ignition](https://coreos.com/ignition/docs/latest/) configuration files. _Ignition_ is the provisioning utility used by [CoreOS](https://coreos.com/) Linux.
-
-The ignition provider is what we call a _logical provider_ and doesn't manage any _physical_ resources. It generates configurations files to be used by other resources.
-
-Use the navigation to the left to read about the available resources.
-
-## Example Usage
-
-This config will write a single service unit (shown below) with the contents of an example service. This unit will be enabled as a dependency of multi-user.target and therefore start on boot
-
-```hcl
-# Systemd unit data resource containing the unit definition
-data "ignition_systemd_unit" "example" {
- name = "example.service"
- content = "[Service]\nType=oneshot\nExecStart=/usr/bin/echo Hello World\n\n[Install]\nWantedBy=multi-user.target"
-}
-
-# Ingnition config include the previous defined systemd unit data resource
-data "ignition_config" "example" {
- systemd = [
- "${data.ignition_systemd_unit.example.id}",
- ]
-}
-
-# Create a CoreOS server using the Igntion config.
-resource "aws_instance" "web" {
- # ...
-
- user_data = "${data.ignition_config.example.rendered}"
-}
-```
diff --git a/website/source/docs/providers/influxdb/index.html.markdown b/website/source/docs/providers/influxdb/index.html.markdown
deleted file mode 100644
index 4b500ac02..000000000
--- a/website/source/docs/providers/influxdb/index.html.markdown
+++ /dev/null
@@ -1,51 +0,0 @@
----
-layout: "influxdb"
-page_title: "Provider: InfluxDB"
-sidebar_current: "docs-influxdb-index"
-description: |-
- The InfluxDB provider configures databases, etc on an InfluxDB server.
----
-
-# InfluxDB Provider
-
-The InfluxDB provider allows Terraform to create Databases in
-[InfluxDB](https://influxdb.com/). InfluxDB is a database server optimized
-for time-series data.
-
-The provider configuration block accepts the following arguments:
-
-* ``url`` - (Optional) The root URL of a InfluxDB server. May alternatively be
- set via the ``INFLUXDB_URL`` environment variable. Defaults to
- `http://localhost:8086/`.
-
-* ``username`` - (Optional) The name of the user to use when making requests.
- May alternatively be set via the ``INFLUXDB_USERNAME`` environment variable.
-
-* ``password`` - (Optional) The password to use when making requests.
- May alternatively be set via the ``INFLUXDB_PASSWORD`` environment variable.
-
-Use the navigation to the left to read about the available resources.
-
-## Example Usage
-
-```hcl
-provider "influxdb" {
- url = "http://influxdb.example.com/"
- username = "terraform"
-}
-
-resource "influxdb_database" "metrics" {
- name = "awesome_app"
-}
-
-resource "influxdb_continuous_query" "minnie" {
- name = "minnie"
- database = "${influxdb_database.metrics.name}"
- query = "SELECT min(mouse) INTO min_mouse FROM zoo GROUP BY time(30m)"
-}
-
-resource "influxdb_user" "paul" {
- name = "paul"
- password = "super-secret"
-}
-```
diff --git a/website/source/docs/providers/influxdb/r/continuous_query.html.md b/website/source/docs/providers/influxdb/r/continuous_query.html.md
deleted file mode 100644
index 7e8b596fc..000000000
--- a/website/source/docs/providers/influxdb/r/continuous_query.html.md
+++ /dev/null
@@ -1,38 +0,0 @@
----
-layout: "influxdb"
-page_title: "InfluxDB: influxdb_continuous_query"
-sidebar_current: "docs-influxdb-resource-continuous_query"
-description: |-
- The influxdb_continuous_query resource allows an InfluxDB continuous query to be managed.
----
-
-# influxdb\_continuous\_query
-
-The continuous_query resource allows a continuous query to be created on an InfluxDB server.
-
-## Example Usage
-
-```hcl
-resource "influxdb_database" "test" {
- name = "terraform-test"
-}
-
-resource "influxdb_continuous_query" "minnie" {
- name = "minnie"
- database = "${influxdb_database.test.name}"
- query = "SELECT min(mouse) INTO min_mouse FROM zoo GROUP BY time(30m)"
-}
-
-```
-
-## Argument Reference
-
-The following arguments are supported:
-
-* `name` - (Required) The name for the continuous_query. This must be unique on the InfluxDB server.
-* `database` - (Required) The database for the continuous_query. This must be an existing influxdb database.
-* `query` - (Required) The query for the continuous_query.
-
-## Attributes Reference
-
-This resource exports no further attributes.
diff --git a/website/source/docs/providers/influxdb/r/database.html.md b/website/source/docs/providers/influxdb/r/database.html.md
deleted file mode 100644
index 0e0cf4933..000000000
--- a/website/source/docs/providers/influxdb/r/database.html.md
+++ /dev/null
@@ -1,30 +0,0 @@
----
-layout: "influxdb"
-page_title: "InfluxDB: influxdb_database"
-sidebar_current: "docs-influxdb-resource-database"
-description: |-
- The influxdb_database resource allows an InfluxDB database to be created.
----
-
-# influxdb\_database
-
-The database resource allows a database to be created on an InfluxDB server.
-
-## Example Usage
-
-```hcl
-resource "influxdb_database" "metrics" {
- name = "awesome_app"
-}
-```
-
-## Argument Reference
-
-The following arguments are supported:
-
-* `name` - (Required) The name for the database. This must be unique on the
- InfluxDB server.
-
-## Attributes Reference
-
-This resource exports no further attributes.
diff --git a/website/source/docs/providers/influxdb/r/user.html.md b/website/source/docs/providers/influxdb/r/user.html.md
deleted file mode 100644
index a1911b5e9..000000000
--- a/website/source/docs/providers/influxdb/r/user.html.md
+++ /dev/null
@@ -1,47 +0,0 @@
----
-layout: "influxdb"
-page_title: "InfluxDB: influxdb_user"
-sidebar_current: "docs-influxdb-resource-user"
-description: |-
- The influxdb_user resource allows an InfluxDB users to be managed.
----
-
-# influxdb\_user
-
-The user resource allows a user to be created on an InfluxDB server.
-
-## Example Usage
-
-```hcl
-resource "influxdb_database" "green" {
- name = "terraform-green"
-}
-
-resource "influxdb_user" "paul" {
- name = "paul"
- password = "super-secret"
-
- grant {
- database = "${influxdb_database.green.name}"
- privilege = "write"
- }
-}
-```
-
-## Argument Reference
-
-The following arguments are supported:
-
-* `name` - (Required) The name for the user.
-* `password` - (Required) The password for the user.
-* `admin` - (Optional) Mark the user as admin.
-* `grant` - (Optional) A list of grants for non-admin users
-
-Each `grant` supports the following:
-
-* `database` - (Required) The name of the database the privilege is associated with
-* `privilege` - (Required) The privilege to grant (READ|WRITE|ALL)
-
-## Attributes Reference
-
-* `admin` - (Bool) indication if the user is an admin or not.
diff --git a/website/source/docs/providers/kubernetes/index.html.markdown b/website/source/docs/providers/kubernetes/index.html.markdown
deleted file mode 100644
index 3a8820304..000000000
--- a/website/source/docs/providers/kubernetes/index.html.markdown
+++ /dev/null
@@ -1,99 +0,0 @@
----
-layout: "kubernetes"
-page_title: "Provider: Kubernetes"
-sidebar_current: "docs-kubernetes-index"
-description: |-
- The Kubernetes (K8s) provider is used to interact with the resources supported by Kubernetes. The provider needs to be configured with the proper credentials before it can be used.
----
-
-# Kubernetes Provider
-
-The Kubernetes (K8S) provider is used to interact with the resources supported by Kubernetes. The provider needs to be configured with the proper credentials before it can be used.
-
-Use the navigation to the left to read about the available resources.
-
--> **Note:** The Kubernetes provider is new as of Terraform 0.9. It is ready to be used but many features are still being added. If there is a Kubernetes feature missing, please report it in the GitHub repo.
-
-## Example Usage
-
-```hcl
-provider "kubernetes" {
- config_context_auth_info = "ops"
- config_context_cluster = "mycluster"
-}
-
-resource "kubernetes_namespace" "example" {
- metadata {
- name = "my-first-namespace"
- }
-}
-```
-
-## Kubernetes versions
-
-Both backward and forward compatibility with Kubernetes API is mostly defined
-by the [official K8S Go library](https://github.com/kubernetes/kubernetes) which we ship with Terraform.
-Below are versions of the library bundled with given versions of Terraform.
-
-* Terraform `<= 0.9.6` - Kubernetes `1.5.4`
-* Terraform `0.9.7+` - Kubernetes `1.6.1`
-
-## Authentication
-
-There are generally two ways to configure the Kubernetes provider.
-
-### File config
-
-The provider always first tries to load **a config file** from a given
-(or default) location. Depending on whether you have current context set
-this _may_ require `config_context_auth_info` and/or `config_context_cluster`
-and/or `config_context`.
-
-#### Setting default config context
-
-Here's an example for how to set default context and avoid all provider configuration:
-
-```
-kubectl config set-context default-system \
- --cluster=chosen-cluster \
- --user=chosen-user
-
-kubectl config use-context default-system
-```
-
-Read [more about `kubectl` in the official docs](https://kubernetes.io/docs/user-guide/kubectl-overview/).
-
-### Statically defined credentials
-
-The other way is **statically** define all the credentials:
-
-```hcl
-provider "kubernetes" {
- host = "https://104.196.242.174"
- username = "ClusterMaster"
- password = "MindTheGap"
-
- client_certificate = "${file("~/.kube/client-cert.pem")}"
- client_key = "${file("~/.kube/client-key.pem")}"
- cluster_ca_certificate = "${file("~/.kube/cluster-ca-cert.pem")}"
-}
-```
-
-If you have **both** valid configuration in a config file and static configuration, the static one is used as override.
-i.e. any static field will override its counterpart loaded from the config.
-
-## Argument Reference
-
-The following arguments are supported:
-
-* `host` - (Optional) The hostname (in form of URI) of Kubernetes master. Can be sourced from `KUBE_HOST`. Defaults to `https://localhost`.
-* `username` - (Optional) The username to use for HTTP basic authentication when accessing the Kubernetes master endpoint. Can be sourced from `KUBE_USER`.
-* `password` - (Optional) The password to use for HTTP basic authentication when accessing the Kubernetes master endpoint. Can be sourced from `KUBE_PASSWORD`.
-* `insecure`- (Optional) Whether server should be accessed without verifying the TLS certificate. Can be sourced from `KUBE_INSECURE`. Defaults to `false`.
-* `client_certificate` - (Optional) PEM-encoded client certificate for TLS authentication. Can be sourced from `KUBE_CLIENT_CERT_DATA`.
-* `client_key` - (Optional) PEM-encoded client certificate key for TLS authentication. Can be sourced from `KUBE_CLIENT_KEY_DATA`.
-* `cluster_ca_certificate` - (Optional) PEM-encoded root certificates bundle for TLS authentication. Can be sourced from `KUBE_CLUSTER_CA_CERT_DATA`.
-* `config_path` - (Optional) Path to the kube config file. Can be sourced from `KUBE_CONFIG` or `KUBECONFIG`. Defaults to `~/.kube/config`.
-* `config_context` - (Optional) Context to choose from the config file. Can be sourced from `KUBE_CTX`.
-* `config_context_auth_info` - (Optional) Authentication info context of the kube config (name of the kubeconfig user, `--user` flag in `kubectl`). Can be sourced from `KUBE_CTX_AUTH_INFO`.
-* `config_context_cluster` - (Optional) Cluster context of the kube config (name of the kubeconfig cluster, `--cluster` flag in `kubectl`). Can be sourced from `KUBE_CTX_CLUSTER`.
diff --git a/website/source/docs/providers/kubernetes/r/config_map.html.markdown b/website/source/docs/providers/kubernetes/r/config_map.html.markdown
deleted file mode 100644
index dd324078d..000000000
--- a/website/source/docs/providers/kubernetes/r/config_map.html.markdown
+++ /dev/null
@@ -1,61 +0,0 @@
----
-layout: "kubernetes"
-page_title: "Kubernetes: kubernetes_config_map"
-sidebar_current: "docs-kubernetes-resource-config-map"
-description: |-
- The resource provides mechanisms to inject containers with configuration data while keeping containers agnostic of Kubernetes.
----
-
-# kubernetes_config_map
-
-The resource provides mechanisms to inject containers with configuration data while keeping containers agnostic of Kubernetes.
-Config Map can be used to store fine-grained information like individual properties or coarse-grained information like entire config files or JSON blobs.
-
-## Example Usage
-
-```hcl
-resource "kubernetes_config_map" "example" {
- metadata {
- name = "my-config"
- }
-
- data {
- api_host = "myhost:443"
- db_host = "dbhost:5432"
- }
-}
-```
-
-## Argument Reference
-
-The following arguments are supported:
-
-* `data` - (Optional) A map of the configuration data.
-* `metadata` - (Required) Standard config map's metadata. More info: https://github.com/kubernetes/community/blob/master/contributors/devel/api-conventions.md#metadata
-
-## Nested Blocks
-
-### `metadata`
-
-#### Arguments
-
-* `annotations` - (Optional) An unstructured key value map stored with the config map that may be used to store arbitrary metadata. More info: http://kubernetes.io/docs/user-guide/annotations
-* `generate_name` - (Optional) Prefix, used by the server, to generate a unique name ONLY IF the `name` field has not been provided. This value will also be combined with a unique suffix. Read more: https://github.com/kubernetes/community/blob/master/contributors/devel/api-conventions.md#idempotency
-* `labels` - (Optional) Map of string keys and values that can be used to organize and categorize (scope and select) the config map. May match selectors of replication controllers and services. More info: http://kubernetes.io/docs/user-guide/labels
-* `name` - (Optional) Name of the config map, must be unique. Cannot be updated. More info: http://kubernetes.io/docs/user-guide/identifiers#names
-* `namespace` - (Optional) Namespace defines the space within which name of the config map must be unique.
-
-#### Attributes
-
-* `generation` - A sequence number representing a specific generation of the desired state.
-* `resource_version` - An opaque value that represents the internal version of this config map that can be used by clients to determine when config map has changed. Read more: https://github.com/kubernetes/community/blob/master/contributors/devel/api-conventions.md#concurrency-control-and-consistency
-* `self_link` - A URL representing this config map.
-* `uid` - The unique in time and space value for this config map. More info: http://kubernetes.io/docs/user-guide/identifiers#uids
-
-## Import
-
-Config Map can be imported using its namespace and name, e.g.
-
-```
-$ terraform import kubernetes_config_map.example default/my-config
-```
diff --git a/website/source/docs/providers/kubernetes/r/horizontal_pod_autoscaler.html.markdown b/website/source/docs/providers/kubernetes/r/horizontal_pod_autoscaler.html.markdown
deleted file mode 100644
index 8eb67e91b..000000000
--- a/website/source/docs/providers/kubernetes/r/horizontal_pod_autoscaler.html.markdown
+++ /dev/null
@@ -1,82 +0,0 @@
----
-layout: "kubernetes"
-page_title: "Kubernetes: kubernetes_horizontal_pod_autoscaler"
-sidebar_current: "docs-kubernetes-resource-horizontal-pod-autoscaler"
-description: |-
- Horizontal Pod Autoscaler automatically scales the number of pods in a replication controller, deployment or replica set based on observed CPU utilization.
----
-
-# kubernetes_horizontal_pod_autoscaler
-
-Horizontal Pod Autoscaler automatically scales the number of pods in a replication controller, deployment or replica set based on observed CPU utilization.
-
-
-## Example Usage
-
-```hcl
-resource "kubernetes_horizontal_pod_autoscaler" "example" {
- metadata {
- name = "terraform-example"
- }
- spec {
- max_replicas = 10
- min_replicas = 8
- scale_target_ref {
- kind = "ReplicationController"
- name = "MyApp"
- }
- }
-}
-```
-
-## Argument Reference
-
-The following arguments are supported:
-
-* `metadata` - (Required) Standard horizontal pod autoscaler's metadata. More info: https://github.com/kubernetes/community/blob/master/contributors/devel/api-conventions.md#metadata
-* `spec` - (Required) Behaviour of the autoscaler. More info: http://releases.k8s.io/HEAD/docs/devel/api-conventions.md#spec-and-status.
-
-## Nested Blocks
-
-### `metadata`
-
-#### Arguments
-
-* `annotations` - (Optional) An unstructured key value map stored with the horizontal pod autoscaler that may be used to store arbitrary metadata. More info: http://kubernetes.io/docs/user-guide/annotations
-* `generate_name` - (Optional) Prefix, used by the server, to generate a unique name ONLY IF the `name` field has not been provided. This value will also be combined with a unique suffix. Read more: https://github.com/kubernetes/community/blob/master/contributors/devel/api-conventions.md#idempotency
-* `labels` - (Optional) Map of string keys and values that can be used to organize and categorize (scope and select) the horizontal pod autoscaler. May match selectors of replication controllers and services. More info: http://kubernetes.io/docs/user-guide/labels
-* `name` - (Optional) Name of the horizontal pod autoscaler, must be unique. Cannot be updated. More info: http://kubernetes.io/docs/user-guide/identifiers#names
-* `namespace` - (Optional) Namespace defines the space within which name of the horizontal pod autoscaler must be unique.
-
-#### Attributes
-
-
-* `generation` - A sequence number representing a specific generation of the desired state.
-* `resource_version` - An opaque value that represents the internal version of this horizontal pod autoscaler that can be used by clients to determine when horizontal pod autoscaler has changed. Read more: https://github.com/kubernetes/community/blob/master/contributors/devel/api-conventions.md#concurrency-control-and-consistency
-* `self_link` - A URL representing this horizontal pod autoscaler.
-* `uid` - The unique in time and space value for this horizontal pod autoscaler. More info: http://kubernetes.io/docs/user-guide/identifiers#uids
-
-### `spec`
-
-#### Arguments
-
-* `max_replicas` - (Required) Upper limit for the number of pods that can be set by the autoscaler.
-* `min_replicas` - (Optional) Lower limit for the number of pods that can be set by the autoscaler, defaults to `1`.
-* `scale_target_ref` - (Required) Reference to scaled resource. e.g. Replication Controller
-* `target_cpu_utilization_percentage` - (Optional) Target average CPU utilization (represented as a percentage of requested CPU) over all the pods. If not specified the default autoscaling policy will be used.
-
-### `scale_target_ref`
-
-#### Arguments
-
-* `api_version` - (Optional) API version of the referent
-* `kind` - (Required) Kind of the referent. e.g. `ReplicationController`. More info: http://releases.k8s.io/HEAD/docs/devel/api-conventions.md#types-kinds
-* `name` - (Required) Name of the referent. More info: http://kubernetes.io/docs/user-guide/identifiers#names
-
-## Import
-
-Horizontal Pod Autoscaler can be imported using the namespace and name, e.g.
-
-```
-$ terraform import kubernetes_horizontal_pod_autoscaler.example default/terraform-example
-```
diff --git a/website/source/docs/providers/kubernetes/r/limit_range.html.markdown b/website/source/docs/providers/kubernetes/r/limit_range.html.markdown
deleted file mode 100644
index ae456e95f..000000000
--- a/website/source/docs/providers/kubernetes/r/limit_range.html.markdown
+++ /dev/null
@@ -1,97 +0,0 @@
----
-layout: "kubernetes"
-page_title: "Kubernetes: kubernetes_limit_range"
-sidebar_current: "docs-kubernetes-resource-limit-range"
-description: |-
- Limit Range sets resource usage limits (e.g. memory, cpu, storage) for supported kinds of resources in a namespace.
----
-
-# kubernetes_limit_range
-
-Limit Range sets resource usage limits (e.g. memory, cpu, storage) for supported kinds of resources in a namespace.
-
-Read more in [the official docs](https://kubernetes.io/docs/tasks/configure-pod-container/apply-resource-quota-limit/#applying-default-resource-requests-and-limits).
-
-
-## Example Usage
-
-```hcl
-resource "kubernetes_limit_range" "example" {
- metadata {
- name = "terraform-example"
- }
- spec {
- limit {
- type = "Pod"
- max {
- cpu = "200m"
- memory = "1024M"
- }
- }
- limit {
- type = "PersistentVolumeClaim"
- min {
- storage = "24M"
- }
- }
- limit {
- type = "Container"
- default {
- cpu = "50m"
- memory = "24M"
- }
- }
- }
-}
-```
-
-## Argument Reference
-
-The following arguments are supported:
-
-* `metadata` - (Required) Standard limit range's metadata. More info: https://github.com/kubernetes/community/blob/master/contributors/devel/api-conventions.md#metadata
-* `spec` - (Optional) Spec defines the limits enforced. More info: http://releases.k8s.io/HEAD/docs/devel/api-conventions.md#spec-and-status
-
-## Nested Blocks
-
-### `spec`
-
-#### Arguments
-
-* `limit` - (Optional) The list of limits that are enforced.
-
-### `limit`
-
-#### Arguments
-
-* `default` - (Optional) Default resource requirement limit value by resource name if resource limit is omitted.
-* `default_request` - (Optional) The default resource requirement request value by resource name if resource request is omitted.
-* `max` - (Optional) Max usage constraints on this kind by resource name.
-* `max_limit_request_ratio` - (Optional) The named resource must have a request and limit that are both non-zero where limit divided by request is less than or equal to the enumerated value; this represents the max burst for the named resource.
-* `min` - (Optional) Min usage constraints on this kind by resource name.
-* `type` - (Optional) Type of resource that this limit applies to. e.g. `Pod`, `Container` or `PersistentVolumeClaim`
-
-### `metadata`
-
-#### Arguments
-
-* `annotations` - (Optional) An unstructured key value map stored with the limit range that may be used to store arbitrary metadata. More info: http://kubernetes.io/docs/user-guide/annotations
-* `generate_name` - (Optional) Prefix, used by the server, to generate a unique name ONLY IF the `name` field has not been provided. This value will also be combined with a unique suffix. Read more: https://github.com/kubernetes/community/blob/master/contributors/devel/api-conventions.md#idempotency
-* `labels` - (Optional) Map of string keys and values that can be used to organize and categorize (scope and select) the limit range. May match selectors of replication controllers and services. More info: http://kubernetes.io/docs/user-guide/labels
-* `name` - (Optional) Name of the limit range, must be unique. Cannot be updated. More info: http://kubernetes.io/docs/user-guide/identifiers#names
-* `namespace` - (Optional) Namespace defines the space within which name of the limit range must be unique.
-
-#### Attributes
-
-* `generation` - A sequence number representing a specific generation of the desired state.
-* `resource_version` - An opaque value that represents the internal version of this limit range that can be used by clients to determine when limit range has changed. Read more: https://github.com/kubernetes/community/blob/master/contributors/devel/api-conventions.md#concurrency-control-and-consistency
-* `self_link` - A URL representing this limit range.
-* `uid` - The unique in time and space value for this limit range. More info: http://kubernetes.io/docs/user-guide/identifiers#uids
-
-## Import
-
-Limit Range can be imported using its namespace and name, e.g.
-
-```
-$ terraform import kubernetes_limit_range.example default/terraform-example
-```
diff --git a/website/source/docs/providers/kubernetes/r/namespace.html.markdown b/website/source/docs/providers/kubernetes/r/namespace.html.markdown
deleted file mode 100644
index f236820c4..000000000
--- a/website/source/docs/providers/kubernetes/r/namespace.html.markdown
+++ /dev/null
@@ -1,63 +0,0 @@
----
-layout: "kubernetes"
-page_title: "Kubernetes: kubernetes_namespace"
-sidebar_current: "docs-kubernetes-resource-namespace"
-description: |-
- Kubernetes supports multiple virtual clusters backed by the same physical cluster. These virtual clusters are called namespaces.
----
-
-# kubernetes_namespace
-
-Kubernetes supports multiple virtual clusters backed by the same physical cluster. These virtual clusters are called namespaces.
-Read more about namespaces at https://kubernetes.io/docs/user-guide/namespaces/
-
-## Example Usage
-
-```hcl
-resource "kubernetes_namespace" "example" {
- metadata {
- annotations {
- name = "example-annotation"
- }
-
- labels {
- mylabel = "label-value"
- }
-
- name = "terraform-example-namespace"
- }
-}
-
-```
-
-## Argument Reference
-
-The following arguments are supported:
-
-* `metadata` - (Required) Standard namespace's [metadata](https://github.com/kubernetes/community/blob/master/contributors/devel/api-conventions.md#metadata).
-
-## Nested Blocks
-
-### `metadata`
-
-#### Arguments
-
-* `annotations` - (Optional) An unstructured key value map stored with the namespace that may be used to store arbitrary metadata. More info: http://kubernetes.io/docs/user-guide/annotations
-* `generate_name` - (Optional) Prefix, used by the server, to generate a unique name ONLY IF the `name` field has not been provided. This value will also be combined with a unique suffix. Read more about [name idempotency](https://github.com/kubernetes/community/blob/master/contributors/devel/api-conventions.md#idempotency).
-* `labels` - (Optional) Map of string keys and values that can be used to organize and categorize (scope and select) namespaces. May match selectors of replication controllers and services. More info: http://kubernetes.io/docs/user-guide/labels
-* `name` - (Optional) Name of the namespace, must be unique. Cannot be updated. More info: http://kubernetes.io/docs/user-guide/identifiers#names
-
-#### Attributes
-
-* `generation` - A sequence number representing a specific generation of the desired state.
-* `resource_version` - An opaque value that represents the internal version of this namespace that can be used by clients to determine when namespaces have changed. Read more about [concurrency control and consistency](https://github.com/kubernetes/community/blob/master/contributors/devel/api-conventions.md#concurrency-control-and-consistency).
-* `self_link` - A URL representing this namespace.
-* `uid` - The unique in time and space value for this namespace. More info: http://kubernetes.io/docs/user-guide/identifiers#uids
-
-## Import
-
-Namespaces can be imported using their name, e.g.
-
-```
-$ terraform import kubernetes_namespace.n terraform-example-namespace
-```
diff --git a/website/source/docs/providers/kubernetes/r/persistent_volume.html.markdown b/website/source/docs/providers/kubernetes/r/persistent_volume.html.markdown
deleted file mode 100644
index 016333c43..000000000
--- a/website/source/docs/providers/kubernetes/r/persistent_volume.html.markdown
+++ /dev/null
@@ -1,256 +0,0 @@
----
-layout: "kubernetes"
-page_title: "Kubernetes: kubernetes_persistent_volume"
-sidebar_current: "docs-kubernetes-resource-persistent-volume-x"
-description: |-
- A Persistent Volume (PV) is a piece of networked storage in the cluster that has been provisioned by an administrator.
----
-
-# kubernetes_persistent_volume
-
-The resource provides a piece of networked storage in the cluster provisioned by an administrator. It is a resource in the cluster just like a node is a cluster resource. Persistent Volumes have a lifecycle independent of any individual pod that uses the PV.
-
-More info: https://kubernetes.io/docs/concepts/storage/persistent-volumes/
-
-## Example Usage
-
-```hcl
-resource "kubernetes_persistent_volume" "example" {
- metadata {
- name = "terraform-example"
- }
- spec {
- capacity {
- storage = "2Gi"
- }
- access_modes = ["ReadWriteMany"]
- persistent_volume_source {
- vsphere_volume {
- volume_path = "/absolute/path"
- }
- }
- }
-}
-```
-
-## Argument Reference
-
-The following arguments are supported:
-
-* `metadata` - (Required) Standard persistent volume's metadata. More info: https://github.com/kubernetes/community/blob/master/contributors/devel/api-conventions.md#metadata
-* `spec` - (Required) Spec of the persistent volume owned by the cluster. See below.
-
-## Nested Blocks
-
-### `spec`
-
-#### Arguments
-
-* `access_modes` - (Required) Contains all ways the volume can be mounted. More info: http://kubernetes.io/docs/user-guide/persistent-volumes#access-modes
-* `capacity` - (Required) A description of the persistent volume's resources and capacity. More info: http://kubernetes.io/docs/user-guide/persistent-volumes#capacity
-* `persistent_volume_reclaim_policy` - (Optional) What happens to a persistent volume when released from its claim. Valid options are Retain (default) and Recycle. Recycling must be supported by the volume plugin underlying this persistent volume. More info: http://kubernetes.io/docs/user-guide/persistent-volumes#recycling-policy
-* `persistent_volume_source` - (Required) The specification of a persistent volume.
-
-### `persistent_volume_source`
-
-#### Arguments
-
-* `aws_elastic_block_store` - (Optional) Represents an AWS Disk resource that is attached to a kubelet's host machine and then exposed to the pod. More info: http://kubernetes.io/docs/user-guide/volumes#awselasticblockstore
-* `azure_disk` - (Optional) Represents an Azure Data Disk mount on the host and bind mount to the pod.
-* `azure_file` - (Optional) Represents an Azure File Service mount on the host and bind mount to the pod.
-* `ceph_fs` - (Optional) Represents a Ceph FS mount on the host that shares a pod's lifetime
-* `cinder` - (Optional) Represents a cinder volume attached and mounted on kubelets host machine. More info: http://releases.k8s.io/HEAD/examples/mysql-cinder-pd/README.md
-* `fc` - (Optional) Represents a Fibre Channel resource that is attached to a kubelet's host machine and then exposed to the pod.
-* `flex_volume` - (Optional) Represents a generic volume resource that is provisioned/attached using an exec based plugin. This is an alpha feature and may change in future.
-* `flocker` - (Optional) Represents a Flocker volume attached to a kubelet's host machine and exposed to the pod for its usage. This depends on the Flocker control service being running
-* `gce_persistent_disk` - (Optional) Represents a GCE Disk resource that is attached to a kubelet's host machine and then exposed to the pod. Provisioned by an admin. More info: http://kubernetes.io/docs/user-guide/volumes#gcepersistentdisk
-* `glusterfs` - (Optional) Represents a Glusterfs volume that is attached to a host and exposed to the pod. Provisioned by an admin. More info: http://releases.k8s.io/HEAD/examples/volumes/glusterfs/README.md
-* `host_path` - (Optional) Represents a directory on the host. Provisioned by a developer or tester. This is useful for single-node development and testing only! On-host storage is not supported in any way and WILL NOT WORK in a multi-node cluster. More info: http://kubernetes.io/docs/user-guide/volumes#hostpath
-* `iscsi` - (Optional) Represents an ISCSI Disk resource that is attached to a kubelet's host machine and then exposed to the pod. Provisioned by an admin.
-* `nfs` - (Optional) Represents an NFS mount on the host. Provisioned by an admin. More info: http://kubernetes.io/docs/user-guide/volumes#nfs
-* `photon_persistent_disk` - (Optional) Represents a PhotonController persistent disk attached and mounted on kubelets host machine
-* `quobyte` - (Optional) Quobyte represents a Quobyte mount on the host that shares a pod's lifetime
-* `rbd` - (Optional) Represents a Rados Block Device mount on the host that shares a pod's lifetime. More info: http://releases.k8s.io/HEAD/examples/volumes/rbd/README.md
-* `vsphere_volume` - (Optional) Represents a vSphere volume attached and mounted on kubelets host machine
-
-
-### `aws_elastic_block_store`
-
-#### Arguments
-
-* `fs_type` - (Optional) Filesystem type of the volume that you want to mount. Tip: Ensure that the filesystem type is supported by the host operating system. Examples: "ext4", "xfs", "ntfs". Implicitly inferred to be "ext4" if unspecified. More info: http://kubernetes.io/docs/user-guide/volumes#awselasticblockstore
-* `partition` - (Optional) The partition in the volume that you want to mount. If omitted, the default is to mount by volume name. Examples: For volume /dev/sda1, you specify the partition as "1". Similarly, the volume partition for /dev/sda is "0" (or you can leave the property empty).
-* `read_only` - (Optional) Whether to set the read-only property in VolumeMounts to "true". If omitted, the default is "false". More info: http://kubernetes.io/docs/user-guide/volumes#awselasticblockstore
-* `volume_id` - (Required) Unique ID of the persistent disk resource in AWS (Amazon EBS volume). More info: http://kubernetes.io/docs/user-guide/volumes#awselasticblockstore
-
-### `azure_disk`
-
-#### Arguments
-
-* `caching_mode` - (Required) Host Caching mode: None, Read Only, Read Write.
-* `data_disk_uri` - (Required) The URI the data disk in the blob storage
-* `disk_name` - (Required) The Name of the data disk in the blob storage
-* `fs_type` - (Optional) Filesystem type to mount. Must be a filesystem type supported by the host operating system. Ex. "ext4", "xfs", "ntfs". Implicitly inferred to be "ext4" if unspecified.
-* `read_only` - (Optional) Whether to force the read-only setting in VolumeMounts. Defaults to false (read/write).
-
-### `azure_file`
-
-#### Arguments
-
-* `read_only` - (Optional) Whether to force the read-only setting in VolumeMounts. Defaults to false (read/write).
-* `secret_name` - (Required) The name of secret that contains Azure Storage Account Name and Key
-* `share_name` - (Required) Share Name
-
-### `ceph_fs`
-
-#### Arguments
-
-* `monitors` - (Required) Monitors is a collection of Ceph monitors More info: http://releases.k8s.io/HEAD/examples/volumes/cephfs/README.md#how-to-use-it
-* `path` - (Optional) Used as the mounted root, rather than the full Ceph tree, default is /
-* `read_only` - (Optional) Whether to force the read-only setting in VolumeMounts. Defaults to `false` (read/write). More info: http://releases.k8s.io/HEAD/examples/volumes/cephfs/README.md#how-to-use-it
-* `secret_file` - (Optional) The path to key ring for User, default is /etc/ceph/user.secret More info: http://releases.k8s.io/HEAD/examples/volumes/cephfs/README.md#how-to-use-it
-* `secret_ref` - (Optional) Reference to the authentication secret for User, default is empty. More info: http://releases.k8s.io/HEAD/examples/volumes/cephfs/README.md#how-to-use-it
-* `user` - (Optional) User is the rados user name, default is admin. More info: http://releases.k8s.io/HEAD/examples/volumes/cephfs/README.md#how-to-use-it
-
-### `cinder`
-
-#### Arguments
-
-* `fs_type` - (Optional) Filesystem type to mount. Must be a filesystem type supported by the host operating system. Examples: "ext4", "xfs", "ntfs". Implicitly inferred to be "ext4" if unspecified. More info: http://releases.k8s.io/HEAD/examples/mysql-cinder-pd/README.md
-* `read_only` - (Optional) Whether to force the read-only setting in VolumeMounts. Defaults to false (read/write). More info: http://releases.k8s.io/HEAD/examples/mysql-cinder-pd/README.md
-* `volume_id` - (Required) Volume ID used to identify the volume in Cinder. More info: http://releases.k8s.io/HEAD/examples/mysql-cinder-pd/README.md
-
-### `fc`
-
-#### Arguments
-
-* `fs_type` - (Optional) Filesystem type to mount. Must be a filesystem type supported by the host operating system. Ex. "ext4", "xfs", "ntfs". Implicitly inferred to be "ext4" if unspecified.
-* `lun` - (Required) FC target lun number
-* `read_only` - (Optional) Whether to force the read-only setting in VolumeMounts. Defaults to false (read/write).
-* `target_ww_ns` - (Required) FC target worldwide names (WWNs)
-
-### `flex_volume`
-
-#### Arguments
-
-* `driver` - (Required) Driver is the name of the driver to use for this volume.
-* `fs_type` - (Optional) Filesystem type to mount. Must be a filesystem type supported by the host operating system. Ex. "ext4", "xfs", "ntfs". The default filesystem depends on FlexVolume script.
-* `options` - (Optional) Extra command options if any.
-* `read_only` - (Optional) Whether to force the ReadOnly setting in VolumeMounts. Defaults to false (read/write).
-* `secret_ref` - (Optional) Reference to the secret object containing sensitive information to pass to the plugin scripts. This may be empty if no secret object is specified. If the secret object contains more than one secret, all secrets are passed to the plugin scripts.
-
-### `flocker`
-
-#### Arguments
-
-* `dataset_name` - (Optional) Name of the dataset stored as metadata -> name on the dataset for Flocker should be considered as deprecated
-* `dataset_uuid` - (Optional) UUID of the dataset. This is unique identifier of a Flocker dataset
-
-### `gce_persistent_disk`
-
-#### Arguments
-
-* `fs_type` - (Optional) Filesystem type of the volume that you want to mount. Tip: Ensure that the filesystem type is supported by the host operating system. Examples: "ext4", "xfs", "ntfs". Implicitly inferred to be "ext4" if unspecified. More info: http://kubernetes.io/docs/user-guide/volumes#gcepersistentdisk
-* `partition` - (Optional) The partition in the volume that you want to mount. If omitted, the default is to mount by volume name. Examples: For volume /dev/sda1, you specify the partition as "1". Similarly, the volume partition for /dev/sda is "0" (or you can leave the property empty). More info: http://kubernetes.io/docs/user-guide/volumes#gcepersistentdisk
-* `pd_name` - (Required) Unique name of the PD resource in GCE. Used to identify the disk in GCE. More info: http://kubernetes.io/docs/user-guide/volumes#gcepersistentdisk
-* `read_only` - (Optional) Whether to force the ReadOnly setting in VolumeMounts. Defaults to false. More info: http://kubernetes.io/docs/user-guide/volumes#gcepersistentdisk
-
-### `glusterfs`
-
-#### Arguments
-
-* `endpoints_name` - (Required) The endpoint name that details Glusterfs topology. More info: http://releases.k8s.io/HEAD/examples/volumes/glusterfs/README.md#create-a-pod
-* `path` - (Required) The Glusterfs volume path. More info: http://releases.k8s.io/HEAD/examples/volumes/glusterfs/README.md#create-a-pod
-* `read_only` - (Optional) Whether to force the Glusterfs volume to be mounted with read-only permissions. Defaults to false. More info: http://releases.k8s.io/HEAD/examples/volumes/glusterfs/README.md#create-a-pod
-
-### `host_path`
-
-#### Arguments
-
-* `path` - (Optional) Path of the directory on the host. More info: http://kubernetes.io/docs/user-guide/volumes#hostpath
-
-### `iscsi`
-
-#### Arguments
-
-* `fs_type` - (Optional) Filesystem type of the volume that you want to mount. Tip: Ensure that the filesystem type is supported by the host operating system. Examples: "ext4", "xfs", "ntfs". Implicitly inferred to be "ext4" if unspecified. More info: http://kubernetes.io/docs/user-guide/volumes#iscsi
-* `iqn` - (Required) Target iSCSI Qualified Name.
-* `iscsi_interface` - (Optional) iSCSI interface name that uses an iSCSI transport. Defaults to 'default' (tcp).
-* `lun` - (Optional) iSCSI target lun number.
-* `read_only` - (Optional) Whether to force the read-only setting in VolumeMounts. Defaults to false.
-* `target_portal` - (Required) iSCSI target portal. The portal is either an IP or ip_addr:port if the port is other than default (typically TCP ports 860 and 3260).
-
-### `metadata`
-
-#### Arguments
-
-* `annotations` - (Optional) An unstructured key value map stored with the persistent volume that may be used to store arbitrary metadata. More info: http://kubernetes.io/docs/user-guide/annotations
-* `labels` - (Optional) Map of string keys and values that can be used to organize and categorize (scope and select) the persistent volume. May match selectors of replication controllers and services. More info: http://kubernetes.io/docs/user-guide/labels
-* `name` - (Optional) Name of the persistent volume, must be unique. Cannot be updated. More info: http://kubernetes.io/docs/user-guide/identifiers#names
-
-#### Attributes
-
-
-* `generation` - A sequence number representing a specific generation of the desired state.
-* `resource_version` - An opaque value that represents the internal version of this persistent volume that can be used by clients to determine when persistent volume has changed. Read more: https://github.com/kubernetes/community/blob/master/contributors/devel/api-conventions.md#concurrency-control-and-consistency
-* `self_link` - A URL representing this persistent volume.
-* `uid` - The unique in time and space value for this persistent volume. More info: http://kubernetes.io/docs/user-guide/identifiers#uids
-
-### `nfs`
-
-#### Arguments
-
-* `path` - (Required) Path that is exported by the NFS server. More info: http://kubernetes.io/docs/user-guide/volumes#nfs
-* `read_only` - (Optional) Whether to force the NFS export to be mounted with read-only permissions. Defaults to false. More info: http://kubernetes.io/docs/user-guide/volumes#nfs
-* `server` - (Required) Server is the hostname or IP address of the NFS server. More info: http://kubernetes.io/docs/user-guide/volumes#nfs
-
-### `photon_persistent_disk`
-
-#### Arguments
-
-* `fs_type` - (Optional) Filesystem type to mount. Must be a filesystem type supported by the host operating system. Ex. "ext4", "xfs", "ntfs". Implicitly inferred to be "ext4" if unspecified.
-* `pd_id` - (Required) ID that identifies Photon Controller persistent disk
-
-### `quobyte`
-
-#### Arguments
-
-* `group` - (Optional) Group to map volume access to Default is no group
-* `read_only` - (Optional) Whether to force the Quobyte volume to be mounted with read-only permissions. Defaults to false.
-* `registry` - (Required) Registry represents a single or multiple Quobyte Registry services specified as a string as host:port pair (multiple entries are separated with commas) which acts as the central registry for volumes
-* `user` - (Optional) User to map volume access to Defaults to serivceaccount user
-* `volume` - (Required) Volume is a string that references an already created Quobyte volume by name.
-
-### `rbd`
-
-#### Arguments
-
-* `ceph_monitors` - (Required) A collection of Ceph monitors. More info: http://releases.k8s.io/HEAD/examples/volumes/rbd/README.md#how-to-use-it
-* `fs_type` - (Optional) Filesystem type of the volume that you want to mount. Tip: Ensure that the filesystem type is supported by the host operating system. Examples: "ext4", "xfs", "ntfs". Implicitly inferred to be "ext4" if unspecified. More info: http://kubernetes.io/docs/user-guide/volumes#rbd
-* `keyring` - (Optional) Keyring is the path to key ring for RBDUser. Default is /etc/ceph/keyring. More info: http://releases.k8s.io/HEAD/examples/volumes/rbd/README.md#how-to-use-it
-* `rados_user` - (Optional) The rados user name. Default is admin. More info: http://releases.k8s.io/HEAD/examples/volumes/rbd/README.md#how-to-use-it
-* `rbd_image` - (Required) The rados image name. More info: http://releases.k8s.io/HEAD/examples/volumes/rbd/README.md#how-to-use-it
-* `rbd_pool` - (Optional) The rados pool name. Default is rbd. More info: http://releases.k8s.io/HEAD/examples/volumes/rbd/README.md#how-to-use-it.
-* `read_only` - (Optional) Whether to force the read-only setting in VolumeMounts. Defaults to false. More info: http://releases.k8s.io/HEAD/examples/volumes/rbd/README.md#how-to-use-it
-* `secret_ref` - (Optional) Name of the authentication secret for RBDUser. If provided overrides keyring. Default is nil. More info: http://releases.k8s.io/HEAD/examples/volumes/rbd/README.md#how-to-use-it
-
-### `secret_ref`
-
-#### Arguments
-
-* `name` - (Optional) Name of the referent. More info: http://kubernetes.io/docs/user-guide/identifiers#names
-
-### `vsphere_volume`
-
-#### Arguments
-
-* `fs_type` - (Optional) Filesystem type to mount. Must be a filesystem type supported by the host operating system. Ex. "ext4", "xfs", "ntfs". Implicitly inferred to be "ext4" if unspecified.
-* `volume_path` - (Required) Path that identifies vSphere volume vmdk
-
-## Import
-
-Persistent Volume can be imported using its name, e.g.
-
-```
-$ terraform import kubernetes_persistent_volume.example terraform-example
-```
diff --git a/website/source/docs/providers/kubernetes/r/persistent_volume_claim.html.markdown b/website/source/docs/providers/kubernetes/r/persistent_volume_claim.html.markdown
deleted file mode 100644
index 375510292..000000000
--- a/website/source/docs/providers/kubernetes/r/persistent_volume_claim.html.markdown
+++ /dev/null
@@ -1,114 +0,0 @@
----
-layout: "kubernetes"
-page_title: "Kubernetes: kubernetes_persistent_volume_claim"
-sidebar_current: "docs-kubernetes-resource-persistent-volume-claim"
-description: |-
- This resource allows the user to request for and claim to a persistent volume.
----
-
-# kubernetes_persistent_volume_claim
-
-This resource allows the user to request for and claim to a persistent volume.
-
-## Example Usage
-
-```hcl
-resource "kubernetes_persistent_volume_claim" "example" {
- metadata {
- name = "exampleclaimname"
- }
- spec {
- access_modes = ["ReadWriteMany"]
- resources {
- requests {
- storage = "5Gi"
- }
- }
- volume_name = "${kubernetes_persistent_volume.example.metadata.0.name}"
- }
-}
-
-resource "kubernetes_persistent_volume" "example" {
- metadata {
- name = "examplevolumename"
- }
- spec {
- capacity {
- storage = "10Gi"
- }
- access_modes = ["ReadWriteMany"]
- persistent_volume_source {
- gce_persistent_disk {
- pd_name = "test-123"
- }
- }
- }
-}
-```
-
-## Argument Reference
-
-The following arguments are supported:
-
-* `metadata` - (Required) Standard persistent volume claim's metadata. More info: https://github.com/kubernetes/community/blob/master/contributors/devel/api-conventions.md#metadata
-* `spec` - (Required) Spec defines the desired characteristics of a volume requested by a pod author. More info: http://kubernetes.io/docs/user-guide/persistent-volumes#persistentvolumeclaims
-* `wait_until_bound` - (Optional) Whether to wait for the claim to reach `Bound` state (to find volume in which to claim the space)
-
-## Nested Blocks
-
-### `metadata`
-
-#### Arguments
-
-* `annotations` - (Optional) An unstructured key value map stored with the persistent volume claim that may be used to store arbitrary metadata. More info: http://kubernetes.io/docs/user-guide/annotations
-* `generate_name` - (Optional) Prefix, used by the server, to generate a unique name ONLY IF the `name` field has not been provided. This value will also be combined with a unique suffix. Read more: https://github.com/kubernetes/community/blob/master/contributors/devel/api-conventions.md#idempotency
-* `labels` - (Optional) Map of string keys and values that can be used to organize and categorize (scope and select) the persistent volume claim. May match selectors of replication controllers and services. More info: http://kubernetes.io/docs/user-guide/labels
-* `name` - (Optional) Name of the persistent volume claim, must be unique. Cannot be updated. More info: http://kubernetes.io/docs/user-guide/identifiers#names
-* `namespace` - (Optional) Namespace defines the space within which name of the persistent volume claim must be unique.
-
-#### Attributes
-
-* `generation` - A sequence number representing a specific generation of the desired state.
-* `resource_version` - An opaque value that represents the internal version of this persistent volume claim that can be used by clients to determine when persistent volume claim has changed. Read more: https://github.com/kubernetes/community/blob/master/contributors/devel/api-conventions.md#concurrency-control-and-consistency
-* `self_link` - A URL representing this persistent volume claim.
-* `uid` - The unique in time and space value for this persistent volume claim. More info: http://kubernetes.io/docs/user-guide/identifiers#uids
-
-### `spec`
-
-#### Arguments
-
-* `access_modes` - (Required) A set of the desired access modes the volume should have. More info: http://kubernetes.io/docs/user-guide/persistent-volumes#access-modes-1
-* `resources` - (Required) A list of the minimum resources the volume should have. More info: http://kubernetes.io/docs/user-guide/persistent-volumes#resources
-* `selector` - (Optional) A label query over volumes to consider for binding.
-* `volume_name` - (Optional) The binding reference to the PersistentVolume backing this claim.
-
-### `match_expressions`
-
-#### Arguments
-
-* `key` - (Optional) The label key that the selector applies to.
-* `operator` - (Optional) A key's relationship to a set of values. Valid operators ard `In`, `NotIn`, `Exists` and `DoesNotExist`.
-* `values` - (Optional) An array of string values. If the operator is `In` or `NotIn`, the values array must be non-empty. If the operator is `Exists` or `DoesNotExist`, the values array must be empty. This array is replaced during a strategic merge patch.
-
-
-### `resources`
-
-#### Arguments
-
-* `limits` - (Optional) Map describing the maximum amount of compute resources allowed. More info: http://kubernetes.io/docs/user-guide/compute-resources/
-* `requests` - (Optional) Map describing the minimum amount of compute resources required. If this is omitted for a container, it defaults to `limits` if that is explicitly specified, otherwise to an implementation-defined value. More info: http://kubernetes.io/docs/user-guide/compute-resources/
-
-### `selector`
-
-#### Arguments
-
-* `match_expressions` - (Optional) A list of label selector requirements. The requirements are ANDed.
-* `match_labels` - (Optional) A map of {key,value} pairs. A single {key,value} in the matchLabels map is equivalent to an element of `match_expressions`, whose key field is "key", the operator is "In", and the values array contains only "value". The requirements are ANDed.
-
-## Import
-
-Persistent Volume Claim can be imported using its namespace and name, e.g.
-
-```
-$ terraform import kubernetes_persistent_volume_claim.example default/example-name
-```
diff --git a/website/source/docs/providers/kubernetes/r/pod.html.markdown b/website/source/docs/providers/kubernetes/r/pod.html.markdown
deleted file mode 100644
index e84a26a19..000000000
--- a/website/source/docs/providers/kubernetes/r/pod.html.markdown
+++ /dev/null
@@ -1,539 +0,0 @@
----
-layout: "kubernetes"
-page_title: "Kubernetes: kubernetes_pod"
-sidebar_current: "docs-kubernetes-resource-pod"
-description: |-
- A pod is a group of one or more containers, the shared storage for those containers, and options about how to run the containers. Pods are always co-located and co-scheduled, and run in a shared context.
----
-
-# kubernetes_pod
-
-A pod is a group of one or more containers, the shared storage for those containers, and options about how to run the containers. Pods are always co-located and co-scheduled, and run in a shared context.
-
-Read more at https://kubernetes.io/docs/concepts/workloads/pods/pod/
-
-## Example Usage
-
-```hcl
-resource "kubernetes_pod" "test" {
- metadata {
- name = "terraform-example"
- }
-
- spec {
- container {
- image = "nginx:1.7.9"
- name = "example"
- }
- }
-}
-```
-
-## Argument Reference
-
-The following arguments are supported:
-
-* `metadata` - (Required) Standard pod's metadata. More info: https://github.com/kubernetes/community/blob/master/contributors/devel/api-conventions.md#metadata
-* `spec` - (Required) Spec of the pod owned by the cluster
-
-## Nested Blocks
-
-### `metadata`
-
-#### Arguments
-
-* `annotations` - (Optional) An unstructured key value map stored with the pod that may be used to store arbitrary metadata. More info: http://kubernetes.io/docs/user-guide/annotations
-* `generate_name` - (Optional) Prefix, used by the server, to generate a unique name ONLY IF the `name` field has not been provided. This value will also be combined with a unique suffix. Read more: https://github.com/kubernetes/community/blob/master/contributors/devel/api-conventions.md#idempotency
-* `labels` - (Optional) Map of string keys and values that can be used to organize and categorize (scope and select) the pod. May match selectors of replication controllers and services. More info: http://kubernetes.io/docs/user-guide/labels
-* `name` - (Optional) Name of the pod, must be unique. Cannot be updated. More info: http://kubernetes.io/docs/user-guide/identifiers#names
-* `namespace` - (Optional) Namespace defines the space within which name of the pod must be unique.
-
-#### Attributes
-
-* `generation` - A sequence number representing a specific generation of the desired state.
-* `resource_version` - An opaque value that represents the internal version of this pod that can be used by clients to determine when pod has changed. Read more: https://github.com/kubernetes/community/blob/master/contributors/devel/api-conventions.md#concurrency-control-and-consistency
-* `self_link` - A URL representing this pod.
-* `uid` - The unique in time and space value for this pod. More info: http://kubernetes.io/docs/user-guide/identifiers#uids
-
-### `spec`
-
-#### Arguments
-
-* `active_deadline_seconds` - (Optional) Optional duration in seconds the pod may be active on the node relative to StartTime before the system will actively try to mark it failed and kill associated containers. Value must be a positive integer.
-* `container` - (Optional) List of containers belonging to the pod. Containers cannot currently be added or removed. There must be at least one container in a Pod. Cannot be updated. More info: http://kubernetes.io/docs/user-guide/containers
-* `dns_policy` - (Optional) Set DNS policy for containers within the pod. One of 'ClusterFirst' or 'Default'. Defaults to 'ClusterFirst'.
-* `host_ipc` - (Optional) Use the host's ipc namespace. Optional: Default to false.
-* `host_network` - (Optional) Host networking requested for this pod. Use the host's network namespace. If this option is set, the ports that will be used must be specified.
-* `host_pid` - (Optional) Use the host's pid namespace.
-* `hostname` - (Optional) Specifies the hostname of the Pod If not specified, the pod's hostname will be set to a system-defined value.
-* `image_pull_secrets` - (Optional) ImagePullSecrets is an optional list of references to secrets in the same namespace to use for pulling any of the images used by this PodSpec. If specified, these secrets will be passed to individual puller implementations for them to use. For example, in the case of docker, only DockerConfig type secrets are honored. More info: http://kubernetes.io/docs/user-guide/images#specifying-imagepullsecrets-on-a-pod
-* `node_name` - (Optional) NodeName is a request to schedule this pod onto a specific node. If it is non-empty, the scheduler simply schedules this pod onto that node, assuming that it fits resource requirements.
-* `node_selector` - (Optional) NodeSelector is a selector which must be true for the pod to fit on a node. Selector which must match a node's labels for the pod to be scheduled on that node. More info: http://kubernetes.io/docs/user-guide/node-selection.
-* `restart_policy` - (Optional) Restart policy for all containers within the pod. One of Always, OnFailure, Never. More info: http://kubernetes.io/docs/user-guide/pod-states#restartpolicy.
-* `security_context` - (Optional) SecurityContext holds pod-level security attributes and common container settings. Optional: Defaults to empty
-* `service_account_name` - (Optional) ServiceAccountName is the name of the ServiceAccount to use to run this pod. More info: http://releases.k8s.io/HEAD/docs/design/service_accounts.md.
-* `subdomain` - (Optional) If specified, the fully qualified Pod hostname will be "...svc.". If not specified, the pod will not have a domainname at all..
-* `termination_grace_period_seconds` - (Optional) Optional duration in seconds the pod needs to terminate gracefully. May be decreased in delete request. Value must be non-negative integer. The value zero indicates delete immediately. If this value is nil, the default grace period will be used instead. The grace period is the duration in seconds after the processes running in the pod are sent a termination signal and the time when the processes are forcibly halted with a kill signal. Set this value longer than the expected cleanup time for your process.
-* `volume` - (Optional) List of volumes that can be mounted by containers belonging to the pod. More info: http://kubernetes.io/docs/user-guide/volumes
-
-### `container`
-
-#### Arguments
-
-* `args` - (Optional) Arguments to the entrypoint. The docker image's CMD is used if this is not provided. Variable references $(VAR_NAME) are expanded using the container's environment. If a variable cannot be resolved, the reference in the input string will be unchanged. The $(VAR_NAME) syntax can be escaped with a double $$, ie: $$(VAR_NAME). Escaped references will never be expanded, regardless of whether the variable exists or not. Cannot be updated. More info: http://kubernetes.io/docs/user-guide/containers#containers-and-commands
-* `command` - (Optional) Entrypoint array. Not executed within a shell. The docker image's ENTRYPOINT is used if this is not provided. Variable references $(VAR_NAME) are expanded using the container's environment. If a variable cannot be resolved, the reference in the input string will be unchanged. The $(VAR_NAME) syntax can be escaped with a double $$, ie: $$(VAR_NAME). Escaped references will never be expanded, regardless of whether the variable exists or not. Cannot be updated. More info: http://kubernetes.io/docs/user-guide/containers#containers-and-commands
-* `env` - (Optional) List of environment variables to set in the container. Cannot be updated.
-* `image` - (Optional) Docker image name. More info: http://kubernetes.io/docs/user-guide/images
-* `image_pull_policy` - (Optional) Image pull policy. One of Always, Never, IfNotPresent. Defaults to Always if :latest tag is specified, or IfNotPresent otherwise. Cannot be updated. More info: http://kubernetes.io/docs/user-guide/images#updating-images
-* `lifecycle` - (Optional) Actions that the management system should take in response to container lifecycle events
-* `liveness_probe` - (Optional) Periodic probe of container liveness. Container will be restarted if the probe fails. Cannot be updated. More info: http://kubernetes.io/docs/user-guide/pod-states#container-probes
-* `name` - (Required) Name of the container specified as a DNS_LABEL. Each container in a pod must have a unique name (DNS_LABEL). Cannot be updated.
-* `port` - (Optional) List of ports to expose from the container. Exposing a port here gives the system additional information about the network connections a container uses, but is primarily informational. Not specifying a port here DOES NOT prevent that port from being exposed. Any port which is listening on the default "0.0.0.0" address inside a container will be accessible from the network. Cannot be updated.
-* `readiness_probe` - (Optional) Periodic probe of container service readiness. Container will be removed from service endpoints if the probe fails. Cannot be updated. More info: http://kubernetes.io/docs/user-guide/pod-states#container-probes
-* `resources` - (Optional) Compute Resources required by this container. Cannot be updated. More info: http://kubernetes.io/docs/user-guide/persistent-volumes#resources
-* `security_context` - (Optional) Security options the pod should run with. More info: http://releases.k8s.io/HEAD/docs/design/security_context.md
-* `stdin` - (Optional) Whether this container should allocate a buffer for stdin in the container runtime. If this is not set, reads from stdin in the container will always result in EOF.
-* `stdin_once` - (Optional) Whether the container runtime should close the stdin channel after it has been opened by a single attach. When stdin is true the stdin stream will remain open across multiple attach sessions. If stdinOnce is set to true, stdin is opened on container start, is empty until the first client attaches to stdin, and then remains open and accepts data until the client disconnects, at which time stdin is closed and remains closed until the container is restarted. If this flag is false, a container processes that reads from stdin will never receive an EOF.
-* `termination_message_path` - (Optional) Optional: Path at which the file to which the container's termination message will be written is mounted into the container's filesystem. Message written is intended to be brief final status, such as an assertion failure message. Defaults to /dev/termination-log. Cannot be updated.
-* `tty` - (Optional) Whether this container should allocate a TTY for itself
-* `volume_mount` - (Optional) Pod volumes to mount into the container's filesystem. Cannot be updated.
-* `working_dir` - (Optional) Container's working directory. If not specified, the container runtime's default will be used, which might be configured in the container image. Cannot be updated.
-
-### `aws_elastic_block_store`
-
-#### Arguments
-
-* `fs_type` - (Optional) Filesystem type of the volume that you want to mount. Tip: Ensure that the filesystem type is supported by the host operating system. Examples: "ext4", "xfs", "ntfs". Implicitly inferred to be "ext4" if unspecified. More info: http://kubernetes.io/docs/user-guide/volumes#awselasticblockstore
-* `partition` - (Optional) The partition in the volume that you want to mount. If omitted, the default is to mount by volume name. Examples: For volume /dev/sda1, you specify the partition as "1". Similarly, the volume partition for /dev/sda is "0" (or you can leave the property empty).
-* `read_only` - (Optional) Whether to set the read-only property in VolumeMounts to "true". If omitted, the default is "false". More info: http://kubernetes.io/docs/user-guide/volumes#awselasticblockstore
-* `volume_id` - (Required) Unique ID of the persistent disk resource in AWS (Amazon EBS volume). More info: http://kubernetes.io/docs/user-guide/volumes#awselasticblockstore
-
-### `azure_disk`
-
-#### Arguments
-
-* `caching_mode` - (Required) Host Caching mode: None, Read Only, Read Write.
-* `data_disk_uri` - (Required) The URI the data disk in the blob storage
-* `disk_name` - (Required) The Name of the data disk in the blob storage
-* `fs_type` - (Optional) Filesystem type to mount. Must be a filesystem type supported by the host operating system. Ex. "ext4", "xfs", "ntfs". Implicitly inferred to be "ext4" if unspecified.
-* `read_only` - (Optional) Whether to force the read-only setting in VolumeMounts. Defaults to false (read/write).
-
-### `azure_file`
-
-#### Arguments
-
-* `read_only` - (Optional) Whether to force the read-only setting in VolumeMounts. Defaults to false (read/write).
-* `secret_name` - (Required) The name of secret that contains Azure Storage Account Name and Key
-* `share_name` - (Required) Share Name
-
-### `capabilities`
-
-#### Arguments
-
-* `add` - (Optional) Added capabilities
-* `drop` - (Optional) Removed capabilities
-
-### `ceph_fs`
-
-#### Arguments
-
-* `monitors` - (Required) Monitors is a collection of Ceph monitors More info: http://releases.k8s.io/HEAD/examples/volumes/cephfs/README.md#how-to-use-it
-* `path` - (Optional) Used as the mounted root, rather than the full Ceph tree, default is /
-* `read_only` - (Optional) Whether to force the read-only setting in VolumeMounts. Defaults to `false` (read/write). More info: http://releases.k8s.io/HEAD/examples/volumes/cephfs/README.md#how-to-use-it
-* `secret_file` - (Optional) The path to key ring for User, default is /etc/ceph/user.secret More info: http://releases.k8s.io/HEAD/examples/volumes/cephfs/README.md#how-to-use-it
-* `secret_ref` - (Optional) Reference to the authentication secret for User, default is empty. More info: http://releases.k8s.io/HEAD/examples/volumes/cephfs/README.md#how-to-use-it
-* `user` - (Optional) User is the rados user name, default is admin. More info: http://releases.k8s.io/HEAD/examples/volumes/cephfs/README.md#how-to-use-it
-
-### `cinder`
-
-#### Arguments
-
-* `fs_type` - (Optional) Filesystem type to mount. Must be a filesystem type supported by the host operating system. Examples: "ext4", "xfs", "ntfs". Implicitly inferred to be "ext4" if unspecified. More info: http://releases.k8s.io/HEAD/examples/mysql-cinder-pd/README.md
-* `read_only` - (Optional) Whether to force the read-only setting in VolumeMounts. Defaults to false (read/write). More info: http://releases.k8s.io/HEAD/examples/mysql-cinder-pd/README.md
-* `volume_id` - (Required) Volume ID used to identify the volume in Cinder. More info: http://releases.k8s.io/HEAD/examples/mysql-cinder-pd/README.md
-
-### `config_map`
-
-#### Arguments
-
-* `default_mode` - (Optional) Optional: mode bits to use on created files by default. Must be a value between 0 and 0777. Defaults to 0644. Directories within the path are not affected by this setting. This might be in conflict with other options that affect the file mode, like fsGroup, and the result can be other mode bits set.
-* `items` - (Optional) If unspecified, each key-value pair in the Data field of the referenced ConfigMap will be projected into the volume as a file whose name is the key and content is the value. If specified, the listed keys will be projected into the specified paths, and unlisted keys will not be present. If a key is specified which is not present in the ConfigMap, the volume setup will error. Paths must be relative and may not contain the '..' path or start with '..'.
-* `name` - (Optional) Name of the referent. More info: http://kubernetes.io/docs/user-guide/identifiers#names
-
-### `config_map_key_ref`
-
-#### Arguments
-
-* `key` - (Optional) The key to select.
-* `name` - (Optional) Name of the referent. More info: http://kubernetes.io/docs/user-guide/identifiers#names
-
-### `downward_api`
-
-#### Arguments
-
-* `default_mode` - (Optional) Optional: mode bits to use on created files by default. Must be a value between 0 and 0777. Defaults to 0644. Directories within the path are not affected by this setting. This might be in conflict with other options that affect the file mode, like fsGroup, and the result can be other mode bits set.
-* `items` - (Optional) If unspecified, each key-value pair in the Data field of the referenced ConfigMap will be projected into the volume as a file whose name is the key and content is the value. If specified, the listed keys will be projected into the specified paths, and unlisted keys will not be present. If a key is specified which is not present in the ConfigMap, the volume setup will error. Paths must be relative and may not contain the '..' path or start with '..'.
-
-### `empty_dir`
-
-#### Arguments
-
-* `medium` - (Optional) What type of storage medium should back this directory. The default is "" which means to use the node's default medium. Must be an empty string (default) or Memory. More info: http://kubernetes.io/docs/user-guide/volumes#emptydir
-
-### `env`
-
-#### Arguments
-
-* `name` - (Required) Name of the environment variable. Must be a C_IDENTIFIER
-* `value` - (Optional) Variable references $(VAR_NAME) are expanded using the previous defined environment variables in the container and any service environment variables. If a variable cannot be resolved, the reference in the input string will be unchanged. The $(VAR_NAME) syntax can be escaped with a double $$, ie: $$(VAR_NAME). Escaped references will never be expanded, regardless of whether the variable exists or not. Defaults to "".
-* `value_from` - (Optional) Source for the environment variable's value
-
-### `exec`
-
-#### Arguments
-
-* `command` - (Optional) Command is the command line to execute inside the container, the working directory for the command is root ('/') in the container's filesystem. The command is simply exec'd, it is not run inside a shell, so traditional shell instructions. To use a shell, you need to explicitly call out to that shell. Exit status of 0 is treated as live/healthy and non-zero is unhealthy.
-
-### `fc`
-
-#### Arguments
-
-* `fs_type` - (Optional) Filesystem type to mount. Must be a filesystem type supported by the host operating system. Ex. "ext4", "xfs", "ntfs". Implicitly inferred to be "ext4" if unspecified.
-* `lun` - (Required) FC target lun number
-* `read_only` - (Optional) Whether to force the read-only setting in VolumeMounts. Defaults to false (read/write).
-* `target_ww_ns` - (Required) FC target worldwide names (WWNs)
-
-### `field_ref`
-
-#### Arguments
-
-* `api_version` - (Optional) Version of the schema the FieldPath is written in terms of, defaults to "v1".
-* `field_path` - (Optional) Path of the field to select in the specified API version
-
-### `flex_volume`
-
-#### Arguments
-
-* `driver` - (Required) Driver is the name of the driver to use for this volume.
-* `fs_type` - (Optional) Filesystem type to mount. Must be a filesystem type supported by the host operating system. Ex. "ext4", "xfs", "ntfs". The default filesystem depends on FlexVolume script.
-* `options` - (Optional) Extra command options if any.
-* `read_only` - (Optional) Whether to force the ReadOnly setting in VolumeMounts. Defaults to false (read/write).
-* `secret_ref` - (Optional) Reference to the secret object containing sensitive information to pass to the plugin scripts. This may be empty if no secret object is specified. If the secret object contains more than one secret, all secrets are passed to the plugin scripts.
-
-### `flocker`
-
-#### Arguments
-
-* `dataset_name` - (Optional) Name of the dataset stored as metadata -> name on the dataset for Flocker should be considered as deprecated
-* `dataset_uuid` - (Optional) UUID of the dataset. This is unique identifier of a Flocker dataset
-
-### `gce_persistent_disk`
-
-#### Arguments
-
-* `fs_type` - (Optional) Filesystem type of the volume that you want to mount. Tip: Ensure that the filesystem type is supported by the host operating system. Examples: "ext4", "xfs", "ntfs". Implicitly inferred to be "ext4" if unspecified. More info: http://kubernetes.io/docs/user-guide/volumes#gcepersistentdisk
-* `partition` - (Optional) The partition in the volume that you want to mount. If omitted, the default is to mount by volume name. Examples: For volume /dev/sda1, you specify the partition as "1". Similarly, the volume partition for /dev/sda is "0" (or you can leave the property empty). More info: http://kubernetes.io/docs/user-guide/volumes#gcepersistentdisk
-* `pd_name` - (Required) Unique name of the PD resource in GCE. Used to identify the disk in GCE. More info: http://kubernetes.io/docs/user-guide/volumes#gcepersistentdisk
-* `read_only` - (Optional) Whether to force the ReadOnly setting in VolumeMounts. Defaults to false. More info: http://kubernetes.io/docs/user-guide/volumes#gcepersistentdisk
-
-### `git_repo`
-
-#### Arguments
-
-* `directory` - (Optional) Target directory name. Must not contain or start with '..'. If '.' is supplied, the volume directory will be the git repository. Otherwise, if specified, the volume will contain the git repository in the subdirectory with the given name.
-* `repository` - (Optional) Repository URL
-* `revision` - (Optional) Commit hash for the specified revision.
-
-### `glusterfs`
-
-#### Arguments
-
-* `endpoints_name` - (Required) The endpoint name that details Glusterfs topology. More info: http://releases.k8s.io/HEAD/examples/volumes/glusterfs/README.md#create-a-pod
-* `path` - (Required) The Glusterfs volume path. More info: http://releases.k8s.io/HEAD/examples/volumes/glusterfs/README.md#create-a-pod
-* `read_only` - (Optional) Whether to force the Glusterfs volume to be mounted with read-only permissions. Defaults to false. More info: http://releases.k8s.io/HEAD/examples/volumes/glusterfs/README.md#create-a-pod
-
-### `host_path`
-
-#### Arguments
-
-* `path` - (Optional) Path of the directory on the host. More info: http://kubernetes.io/docs/user-guide/volumes#hostpath
-
-### `http_get`
-
-#### Arguments
-
-* `host` - (Optional) Host name to connect to, defaults to the pod IP. You probably want to set "Host" in httpHeaders instead.
-* `http_header` - (Optional) Scheme to use for connecting to the host.
-* `path` - (Optional) Path to access on the HTTP server.
-* `port` - (Optional) Name or number of the port to access on the container. Number must be in the range 1 to 65535. Name must be an IANA_SVC_NAME.
-* `scheme` - (Optional) Scheme to use for connecting to the host.
-
-### `http_header`
-
-#### Arguments
-
-* `name` - (Optional) The header field name
-* `value` - (Optional) The header field value
-
-### `image_pull_secrets`
-
-#### Arguments
-
-* `name` - (Required) Name of the referent. More info: http://kubernetes.io/docs/user-guide/identifiers#names
-
-### `iscsi`
-
-#### Arguments
-
-* `fs_type` - (Optional) Filesystem type of the volume that you want to mount. Tip: Ensure that the filesystem type is supported by the host operating system. Examples: "ext4", "xfs", "ntfs". Implicitly inferred to be "ext4" if unspecified. More info: http://kubernetes.io/docs/user-guide/volumes#iscsi
-* `iqn` - (Required) Target iSCSI Qualified Name.
-* `iscsi_interface` - (Optional) iSCSI interface name that uses an iSCSI transport. Defaults to 'default' (tcp).
-* `lun` - (Optional) iSCSI target lun number.
-* `read_only` - (Optional) Whether to force the read-only setting in VolumeMounts. Defaults to false.
-* `target_portal` - (Required) iSCSI target portal. The portal is either an IP or ip_addr:port if the port is other than default (typically TCP ports 860 and 3260).
-
-### `items`
-
-#### Arguments
-
-* `key` - (Optional) The key to project.
-* `mode` - (Optional) Optional: mode bits to use on this file, must be a value between 0 and 0777. If not specified, the volume defaultMode will be used. This might be in conflict with other options that affect the file mode, like fsGroup, and the result can be other mode bits set.
-* `path` - (Optional) The relative path of the file to map the key to. May not be an absolute path. May not contain the path element '..'. May not start with the string '..'.
-
-### `lifecycle`
-
-#### Arguments
-
-* `post_start` - (Optional) post_start is called immediately after a container is created. If the handler fails, the container is terminated and restarted according to its restart policy. Other management of the container blocks until the hook completes. More info: http://kubernetes.io/docs/user-guide/container-environment#hook-details
-* `pre_stop` - (Optional) pre_stop is called immediately before a container is terminated. The container is terminated after the handler completes. The reason for termination is passed to the handler. Regardless of the outcome of the handler, the container is eventually terminated. Other management of the container blocks until the hook completes. More info: http://kubernetes.io/docs/user-guide/container-environment#hook-details
-
-### `limits`
-
-#### Arguments
-
-* `cpu` - (Optional) CPU
-* `memory` - (Optional) Memory
-
-### `liveness_probe`
-
-#### Arguments
-
-* `exec` - (Optional) exec specifies the action to take.
-* `failure_threshold` - (Optional) Minimum consecutive failures for the probe to be considered failed after having succeeded.
-* `http_get` - (Optional) Specifies the http request to perform.
-* `initial_delay_seconds` - (Optional) Number of seconds after the container has started before liveness probes are initiated. More info: http://kubernetes.io/docs/user-guide/pod-states#container-probes
-* `period_seconds` - (Optional) How often (in seconds) to perform the probe
-* `success_threshold` - (Optional) Minimum consecutive successes for the probe to be considered successful after having failed.
-* `tcp_socket` - (Optional) TCPSocket specifies an action involving a TCP port. TCP hooks not yet supported
-* `timeout_seconds` - (Optional) Number of seconds after which the probe times out. More info: http://kubernetes.io/docs/user-guide/pod-states#container-probes
-
-### `nfs`
-
-#### Arguments
-
-* `path` - (Required) Path that is exported by the NFS server. More info: http://kubernetes.io/docs/user-guide/volumes#nfs
-* `read_only` - (Optional) Whether to force the NFS export to be mounted with read-only permissions. Defaults to false. More info: http://kubernetes.io/docs/user-guide/volumes#nfs
-* `server` - (Required) Server is the hostname or IP address of the NFS server. More info: http://kubernetes.io/docs/user-guide/volumes#nfs
-
-### `persistent_volume_claim`
-
-#### Arguments
-
-* `claim_name` - (Optional) ClaimName is the name of a PersistentVolumeClaim in the same
-* `read_only` - (Optional) Will force the ReadOnly setting in VolumeMounts.
-
-### `photon_persistent_disk`
-
-#### Arguments
-
-* `fs_type` - (Optional) Filesystem type to mount. Must be a filesystem type supported by the host operating system. Ex. "ext4", "xfs", "ntfs". Implicitly inferred to be "ext4" if unspecified.
-* `pd_id` - (Required) ID that identifies Photon Controller persistent disk
-
-### `port`
-
-#### Arguments
-
-* `container_port` - (Required) Number of port to expose on the pod's IP address. This must be a valid port number, 0 < x < 65536.
-* `host_ip` - (Optional) What host IP to bind the external port to.
-* `host_port` - (Optional) Number of port to expose on the host. If specified, this must be a valid port number, 0 < x < 65536. If HostNetwork is specified, this must match ContainerPort. Most containers do not need this.
-* `name` - (Optional) If specified, this must be an IANA_SVC_NAME and unique within the pod. Each named port in a pod must have a unique name. Name for the port that can be referred to by services
-* `protocol` - (Optional) Protocol for port. Must be UDP or TCP. Defaults to "TCP".
-
-### `post_start`
-
-#### Arguments
-
-* `exec` - (Optional) exec specifies the action to take.
-* `http_get` - (Optional) Specifies the http request to perform.
-* `tcp_socket` - (Optional) TCPSocket specifies an action involving a TCP port. TCP hooks not yet supported
-
-### `pre_stop`
-
-#### Arguments
-
-* `exec` - (Optional) exec specifies the action to take.
-* `http_get` - (Optional) Specifies the http request to perform.
-* `tcp_socket` - (Optional) TCPSocket specifies an action involving a TCP port. TCP hooks not yet supported
-
-### `quobyte`
-
-#### Arguments
-
-* `group` - (Optional) Group to map volume access to Default is no group
-* `read_only` - (Optional) Whether to force the Quobyte volume to be mounted with read-only permissions. Defaults to false.
-* `registry` - (Required) Registry represents a single or multiple Quobyte Registry services specified as a string as host:port pair (multiple entries are separated with commas) which acts as the central registry for volumes
-* `user` - (Optional) User to map volume access to Defaults to serivceaccount user
-* `volume` - (Required) Volume is a string that references an already created Quobyte volume by name.
-
-### `rbd`
-
-#### Arguments
-
-* `ceph_monitors` - (Required) A collection of Ceph monitors. More info: http://releases.k8s.io/HEAD/examples/volumes/rbd/README.md#how-to-use-it
-* `fs_type` - (Optional) Filesystem type of the volume that you want to mount. Tip: Ensure that the filesystem type is supported by the host operating system. Examples: "ext4", "xfs", "ntfs". Implicitly inferred to be "ext4" if unspecified. More info: http://kubernetes.io/docs/user-guide/volumes#rbd
-* `keyring` - (Optional) Keyring is the path to key ring for RBDUser. Default is /etc/ceph/keyring. More info: http://releases.k8s.io/HEAD/examples/volumes/rbd/README.md#how-to-use-it
-* `rados_user` - (Optional) The rados user name. Default is admin. More info: http://releases.k8s.io/HEAD/examples/volumes/rbd/README.md#how-to-use-it
-* `rbd_image` - (Required) The rados image name. More info: http://releases.k8s.io/HEAD/examples/volumes/rbd/README.md#how-to-use-it
-* `rbd_pool` - (Optional) The rados pool name. Default is rbd. More info: http://releases.k8s.io/HEAD/examples/volumes/rbd/README.md#how-to-use-it.
-* `read_only` - (Optional) Whether to force the read-only setting in VolumeMounts. Defaults to false. More info: http://releases.k8s.io/HEAD/examples/volumes/rbd/README.md#how-to-use-it
-* `secret_ref` - (Optional) Name of the authentication secret for RBDUser. If provided overrides keyring. Default is nil. More info: http://releases.k8s.io/HEAD/examples/volumes/rbd/README.md#how-to-use-it
-
-### `readiness_probe`
-
-#### Arguments
-
-* `exec` - (Optional) exec specifies the action to take.
-* `failure_threshold` - (Optional) Minimum consecutive failures for the probe to be considered failed after having succeeded.
-* `http_get` - (Optional) Specifies the http request to perform.
-* `initial_delay_seconds` - (Optional) Number of seconds after the container has started before liveness probes are initiated. More info: http://kubernetes.io/docs/user-guide/pod-states#container-probes
-* `period_seconds` - (Optional) How often (in seconds) to perform the probe
-* `success_threshold` - (Optional) Minimum consecutive successes for the probe to be considered successful after having failed.
-* `tcp_socket` - (Optional) TCPSocket specifies an action involving a TCP port. TCP hooks not yet supported
-* `timeout_seconds` - (Optional) Number of seconds after which the probe times out. More info: http://kubernetes.io/docs/user-guide/pod-states#container-probes
-
-### `resources`
-
-#### Arguments
-
-* `limits` - (Optional) Describes the maximum amount of compute resources allowed. More info: http://kubernetes.io/docs/user-guide/compute-resources/
-* `requests` - (Optional) Describes the minimum amount of compute resources required.
-
-### `requests`
-
-#### Arguments
-
-* `cpu` - (Optional) CPU
-* `memory` - (Optional) Memory
-
-### `resource_field_ref`
-
-#### Arguments
-
-* `container_name` - (Optional) The name of the container
-* `resource` - (Required) Resource to select
-
-### `se_linux_options`
-
-#### Arguments
-
-* `level` - (Optional) Level is SELinux level label that applies to the container.
-* `role` - (Optional) Role is a SELinux role label that applies to the container.
-* `type` - (Optional) Type is a SELinux type label that applies to the container.
-* `user` - (Optional) User is a SELinux user label that applies to the container.
-
-### `secret`
-
-#### Arguments
-
-* `secret_name` - (Optional) Name of the secret in the pod's namespace to use. More info: http://kubernetes.io/docs/user-guide/volumes#secrets
-
-### `secret_key_ref`
-
-#### Arguments
-
-* `key` - (Optional) The key of the secret to select from. Must be a valid secret key.
-* `name` - (Optional) Name of the referent. More info: http://kubernetes.io/docs/user-guide/identifiers#names
-
-### `secret_ref`
-
-#### Arguments
-
-* `name` - (Optional) Name of the referent. More info: http://kubernetes.io/docs/user-guide/identifiers#names
-
-### `security_context`
-
-#### Arguments
-
-* `fs_group` - (Optional) A special supplemental group that applies to all containers in a pod. Some volume types allow the Kubelet to change the ownership of that volume to be owned by the pod: 1. The owning GID will be the FSGroup 2. The setgid bit is set (new files created in the volume will be owned by FSGroup) 3. The permission bits are OR'd with rw-rw---- If unset, the Kubelet will not modify the ownership and permissions of any volume.
-* `run_as_non_root` - (Optional) Indicates that the container must run as a non-root user. If true, the Kubelet will validate the image at runtime to ensure that it does not run as UID 0 (root) and fail to start the container if it does.
-* `run_as_user` - (Optional) The UID to run the entrypoint of the container process. Defaults to user specified in image metadata if unspecified
-* `se_linux_options` - (Optional) The SELinux context to be applied to all containers. If unspecified, the container runtime will allocate a random SELinux context for each container. May also be set in SecurityContext. If set in both SecurityContext and PodSecurityContext, the value specified in SecurityContext takes precedence for that container.
-* `supplemental_groups` - (Optional) A list of groups applied to the first process run in each container, in addition to the container's primary GID. If unspecified, no groups will be added to any container.
-
-### `tcp_socket`
-
-#### Arguments
-
-* `port` - (Required) Number or name of the port to access on the container. Number must be in the range 1 to 65535. Name must be an IANA_SVC_NAME.
-
-### `value_from`
-
-#### Arguments
-
-* `config_map_key_ref` - (Optional) Selects a key of a ConfigMap.
-* `field_ref` - (Optional) Selects a field of the pod: supports metadata.name, metadata.namespace, metadata.labels, metadata.annotations, spec.nodeName, spec.serviceAccountName, status.podIP..
-* `resource_field_ref` - (Optional) Selects a field of the pod: supports metadata.name, metadata.namespace, metadata.labels, metadata.annotations, spec.nodeName, spec.serviceAccountName, status.podIP..
-* `secret_key_ref` - (Optional) Selects a field of the pod: supports metadata.name, metadata.namespace, metadata.labels, metadata.annotations, spec.nodeName, spec.serviceAccountName, status.podIP..
-
-### `volume`
-
-#### Arguments
-
-* `aws_elastic_block_store` - (Optional) Represents an AWS Disk resource that is attached to a kubelet's host machine and then exposed to the pod. More info: http://kubernetes.io/docs/user-guide/volumes#awselasticblockstore
-* `azure_disk` - (Optional) Represents an Azure Data Disk mount on the host and bind mount to the pod.
-* `azure_file` - (Optional) Represents an Azure File Service mount on the host and bind mount to the pod.
-* `ceph_fs` - (Optional) Represents a Ceph FS mount on the host that shares a pod's lifetime
-* `cinder` - (Optional) Represents a cinder volume attached and mounted on kubelets host machine. More info: http://releases.k8s.io/HEAD/examples/mysql-cinder-pd/README.md
-* `config_map` - (Optional) ConfigMap represents a configMap that should populate this volume
-* `downward_api` - (Optional) DownwardAPI represents downward API about the pod that should populate this volume
-* `empty_dir` - (Optional) EmptyDir represents a temporary directory that shares a pod's lifetime. More info: http://kubernetes.io/docs/user-guide/volumes#emptydir
-* `fc` - (Optional) Represents a Fibre Channel resource that is attached to a kubelet's host machine and then exposed to the pod.
-* `flex_volume` - (Optional) Represents a generic volume resource that is provisioned/attached using an exec based plugin. This is an alpha feature and may change in future.
-* `flocker` - (Optional) Represents a Flocker volume attached to a kubelet's host machine and exposed to the pod for its usage. This depends on the Flocker control service being running
-* `gce_persistent_disk` - (Optional) Represents a GCE Disk resource that is attached to a kubelet's host machine and then exposed to the pod. Provisioned by an admin. More info: http://kubernetes.io/docs/user-guide/volumes#gcepersistentdisk
-* `git_repo` - (Optional) GitRepo represents a git repository at a particular revision.
-* `glusterfs` - (Optional) Represents a Glusterfs volume that is attached to a host and exposed to the pod. Provisioned by an admin. More info: http://releases.k8s.io/HEAD/examples/volumes/glusterfs/README.md
-* `host_path` - (Optional) Represents a directory on the host. Provisioned by a developer or tester. This is useful for single-node development and testing only! On-host storage is not supported in any way and WILL NOT WORK in a multi-node cluster. More info: http://kubernetes.io/docs/user-guide/volumes#hostpath
-* `iscsi` - (Optional) Represents an ISCSI Disk resource that is attached to a kubelet's host machine and then exposed to the pod. Provisioned by an admin.
-* `name` - (Optional) Volume's name. Must be a DNS_LABEL and unique within the pod. More info: http://kubernetes.io/docs/user-guide/identifiers#names
-* `nfs` - (Optional) Represents an NFS mount on the host. Provisioned by an admin. More info: http://kubernetes.io/docs/user-guide/volumes#nfs
-* `persistent_volume_claim` - (Optional) The specification of a persistent volume.
-* `photon_persistent_disk` - (Optional) Represents a PhotonController persistent disk attached and mounted on kubelets host machine
-* `quobyte` - (Optional) Quobyte represents a Quobyte mount on the host that shares a pod's lifetime
-* `rbd` - (Optional) Represents a Rados Block Device mount on the host that shares a pod's lifetime. More info: http://releases.k8s.io/HEAD/examples/volumes/rbd/README.md
-* `secret` - (Optional) Secret represents a secret that should populate this volume. More info: http://kubernetes.io/docs/user-guide/volumes#secrets
-* `vsphere_volume` - (Optional) Represents a vSphere volume attached and mounted on kubelets host machine
-
-### `volume_mount`
-
-#### Arguments
-
-* `mount_path` - (Required) Path within the container at which the volume should be mounted. Must not contain ':'.
-* `name` - (Required) This must match the Name of a Volume.
-* `read_only` - (Optional) Mounted read-only if true, read-write otherwise (false or unspecified). Defaults to false.
-* `sub_path` - (Optional) Path within the volume from which the container's volume should be mounted. Defaults to "" (volume's root).
-
-### `vsphere_volume`
-
-#### Arguments
-
-* `fs_type` - (Optional) Filesystem type to mount. Must be a filesystem type supported by the host operating system. Ex. "ext4", "xfs", "ntfs". Implicitly inferred to be "ext4" if unspecified.
-* `volume_path` - (Required) Path that identifies vSphere volume vmdk
-
-## Import
-
-Pod can be imported using the namespace and name, e.g.
-
-```
-$ terraform import kubernetes_pod.example default/terraform-example
-```
diff --git a/website/source/docs/providers/kubernetes/r/resource_quota.html.markdown b/website/source/docs/providers/kubernetes/r/resource_quota.html.markdown
deleted file mode 100644
index a54e0356d..000000000
--- a/website/source/docs/providers/kubernetes/r/resource_quota.html.markdown
+++ /dev/null
@@ -1,69 +0,0 @@
----
-layout: "kubernetes"
-page_title: "Kubernetes: kubernetes_resource_quota"
-sidebar_current: "docs-kubernetes-resource-resource-quota"
-description: |-
- A resource quota provides constraints that limit aggregate resource consumption per namespace. It can limit the quantity of objects that can be created in a namespace by type, as well as the total amount of compute resources that may be consumed by resources in that project.
----
-
-# kubernetes_resource_quota
-
-A resource quota provides constraints that limit aggregate resource consumption per namespace. It can limit the quantity of objects that can be created in a namespace by type, as well as the total amount of compute resources that may be consumed by resources in that project.
-
-
-## Example Usage
-
-```hcl
-resource "kubernetes_resource_quota" "example" {
- metadata {
- name = "terraform-example"
- }
- spec {
- hard {
- pods = 10
- }
- scopes = ["BestEffort"]
- }
-}
-```
-
-## Argument Reference
-
-The following arguments are supported:
-
-* `metadata` - (Required) Standard resource quota's metadata. More info: https://github.com/kubernetes/community/blob/master/contributors/devel/api-conventions.md#metadata
-* `spec` - (Optional) Spec defines the desired quota. http://releases.k8s.io/HEAD/docs/devel/api-conventions.md#spec-and-status
-
-## Nested Blocks
-
-### `metadata`
-
-#### Arguments
-
-* `annotations` - (Optional) An unstructured key value map stored with the resource quota that may be used to store arbitrary metadata. More info: http://kubernetes.io/docs/user-guide/annotations
-* `labels` - (Optional) Map of string keys and values that can be used to organize and categorize (scope and select) the resource quota. May match selectors of replication controllers and services. More info: http://kubernetes.io/docs/user-guide/labels
-* `name` - (Optional) Name of the resource quota, must be unique. Cannot be updated. More info: http://kubernetes.io/docs/user-guide/identifiers#names
-* `namespace` - (Optional) Namespace defines the space within which name of the resource quota must be unique.
-
-#### Attributes
-
-
-* `generation` - A sequence number representing a specific generation of the desired state.
-* `resource_version` - An opaque value that represents the internal version of this resource quota that can be used by clients to determine when resource quota has changed. Read more: https://github.com/kubernetes/community/blob/master/contributors/devel/api-conventions.md#concurrency-control-and-consistency
-* `self_link` - A URL representing this resource quota.
-* `uid` - The unique in time and space value for this resource quota. More info: http://kubernetes.io/docs/user-guide/identifiers#uids
-
-### `spec`
-
-#### Arguments
-
-* `hard` - (Optional) The set of desired hard limits for each named resource. More info: http://releases.k8s.io/HEAD/docs/design/admission_control_resource_quota.md#admissioncontrol-plugin-resourcequota
-* `scopes` - (Optional) A collection of filters that must match each object tracked by a quota. If not specified, the quota matches all objects.
-
-## Import
-
-Resource Quota can be imported using its namespace and name, e.g.
-
-```
-$ terraform import kubernetes_resource_quota.example default/terraform-example
-```
\ No newline at end of file
diff --git a/website/source/docs/providers/kubernetes/r/secret.html.markdown b/website/source/docs/providers/kubernetes/r/secret.html.markdown
deleted file mode 100644
index 5a0217f54..000000000
--- a/website/source/docs/providers/kubernetes/r/secret.html.markdown
+++ /dev/null
@@ -1,85 +0,0 @@
----
-layout: "kubernetes"
-page_title: "Kubernetes: kubernetes_secret"
-sidebar_current: "docs-kubernetes-resource-secret"
-description: |-
- The resource provides mechanisms to inject containers with sensitive information while keeping containers agnostic of Kubernetes.
----
-
-# kubernetes_secret
-
-The resource provides mechanisms to inject containers with sensitive information, such as passwords, while keeping containers agnostic of Kubernetes.
-Secrets can be used to store sensitive information either as individual properties or coarse-grained entries like entire files or JSON blobs.
-The resource will by default create a secret which is available to any pod in the specified (or default) namespace.
-
-~> Read more about security properties and risks involved with using Kubernetes secrets: https://kubernetes.io/docs/user-guide/secrets/#security-properties
-
-~> **Note:** All arguments including the secret data will be stored in the raw state as plain-text. [Read more about sensitive data in state](/docs/state/sensitive-data.html).
-
-## Example Usage
-
-```hcl
-resource "kubernetes_secret" "example" {
- metadata {
- name = "basic-auth"
- }
-
- data {
- username = "admin"
- password = "P4ssw0rd"
- }
-
- type = "kubernetes.io/basic-auth"
-}
-```
-
-## Example Usage (Docker config)
-
-```hcl
-resource "kubernetes_secret" "example" {
- metadata {
- name = "docker-cfg"
- }
-
- data {
- ".dockercfg" = "${file("${path.module}/.docker/config.json")}"
- }
-
- type = "kubernetes.io/dockercfg"
-}
-```
-
-## Argument Reference
-
-The following arguments are supported:
-
-* `data` - (Optional) A map of the secret data.
-* `metadata` - (Required) Standard secret's metadata. More info: https://github.com/kubernetes/community/blob/master/contributors/devel/api-conventions.md#metadata
-* `type` - (Optional) The secret type. Defaults to `Opaque`. More info: https://github.com/kubernetes/community/blob/master/contributors/design-proposals/secrets.md#proposed-design
-
-## Nested Blocks
-
-### `metadata`
-
-#### Arguments
-
-* `annotations` - (Optional) An unstructured key value map stored with the secret that may be used to store arbitrary metadata. More info: http://kubernetes.io/docs/user-guide/annotations
-* `generate_name` - (Optional) Prefix, used by the server, to generate a unique name ONLY IF the `name` field has not been provided. This value will also be combined with a unique suffix. Read more: https://github.com/kubernetes/community/blob/master/contributors/devel/api-conventions.md#idempotency
-* `labels` - (Optional) Map of string keys and values that can be used to organize and categorize (scope and select) the secret. May match selectors of replication controllers and services. More info: http://kubernetes.io/docs/user-guide/labels
-* `name` - (Optional) Name of the secret, must be unique. Cannot be updated. More info: http://kubernetes.io/docs/user-guide/identifiers#names
-* `namespace` - (Optional) Namespace defines the space within which name of the secret must be unique.
-
-#### Attributes
-
-* `generation` - A sequence number representing a specific generation of the desired state.
-* `resource_version` - An opaque value that represents the internal version of this secret that can be used by clients to determine when secret has changed. Read more: https://github.com/kubernetes/community/blob/master/contributors/devel/api-conventions.md#concurrency-control-and-consistency
-* `self_link` - A URL representing this secret.
-* `uid` - The unique in time and space value for this secret. More info: http://kubernetes.io/docs/user-guide/identifiers#uids
-
-## Import
-
-Secret can be imported using its namespace and name, e.g.
-
-```
-$ terraform import kubernetes_secret.example default/my-secret
-```
diff --git a/website/source/docs/providers/kubernetes/r/service.html.markdown b/website/source/docs/providers/kubernetes/r/service.html.markdown
deleted file mode 100644
index 04e048cdb..000000000
--- a/website/source/docs/providers/kubernetes/r/service.html.markdown
+++ /dev/null
@@ -1,93 +0,0 @@
----
-layout: "kubernetes"
-page_title: "Kubernetes: kubernetes_service"
-sidebar_current: "docs-kubernetes-resource-service"
-description: |-
- A Service is an abstraction which defines a logical set of pods and a policy by which to access them - sometimes called a micro-service.
----
-
-# kubernetes_service
-
-A Service is an abstraction which defines a logical set of pods and a policy by which to access them - sometimes called a micro-service.
-
-
-## Example Usage
-
-```hcl
-resource "kubernetes_service" "example" {
- metadata {
- name = "terraform-example"
- }
- spec {
- selector {
- App = "MyApp"
- }
- session_affinity = "ClientIP"
- port {
- port = 8080
- target_port = 80
- }
-
- type = "LoadBalancer"
- }
-}
-```
-
-## Argument Reference
-
-The following arguments are supported:
-
-* `metadata` - (Required) Standard service's metadata. More info: https://github.com/kubernetes/community/blob/master/contributors/devel/api-conventions.md#metadata
-* `spec` - (Required) Spec defines the behavior of a service. http://releases.k8s.io/HEAD/docs/devel/api-conventions.md#spec-and-status
-
-## Nested Blocks
-
-### `metadata`
-
-#### Arguments
-
-* `annotations` - (Optional) An unstructured key value map stored with the service that may be used to store arbitrary metadata. More info: http://kubernetes.io/docs/user-guide/annotations
-* `generate_name` - (Optional) Prefix, used by the server, to generate a unique name ONLY IF the `name` field has not been provided. This value will also be combined with a unique suffix. Read more: https://github.com/kubernetes/community/blob/master/contributors/devel/api-conventions.md#idempotency
-* `labels` - (Optional) Map of string keys and values that can be used to organize and categorize (scope and select) the service. May match selectors of replication controllers and services. More info: http://kubernetes.io/docs/user-guide/labels
-* `name` - (Optional) Name of the service, must be unique. Cannot be updated. More info: http://kubernetes.io/docs/user-guide/identifiers#names
-* `namespace` - (Optional) Namespace defines the space within which name of the service must be unique.
-
-#### Attributes
-
-
-* `generation` - A sequence number representing a specific generation of the desired state.
-* `resource_version` - An opaque value that represents the internal version of this service that can be used by clients to determine when service has changed. Read more: https://github.com/kubernetes/community/blob/master/contributors/devel/api-conventions.md#concurrency-control-and-consistency
-* `self_link` - A URL representing this service.
-* `uid` - The unique in time and space value for this service. More info: http://kubernetes.io/docs/user-guide/identifiers#uids
-
-### `spec`
-
-#### Arguments
-
-* `cluster_ip` - (Optional) The IP address of the service. It is usually assigned randomly by the master. If an address is specified manually and is not in use by others, it will be allocated to the service; otherwise, creation of the service will fail. `None` can be specified for headless services when proxying is not required. Ignored if type is `ExternalName`. More info: http://kubernetes.io/docs/user-guide/services#virtual-ips-and-service-proxies
-* `external_ips` - (Optional) A list of IP addresses for which nodes in the cluster will also accept traffic for this service. These IPs are not managed by Kubernetes. The user is responsible for ensuring that traffic arrives at a node with this IP. A common example is external load-balancers that are not part of the Kubernetes system.
-* `external_name` - (Optional) The external reference that kubedns or equivalent will return as a CNAME record for this service. No proxying will be involved. Must be a valid DNS name and requires `type` to be `ExternalName`.
-* `load_balancer_ip` - (Optional) Only applies to `type = LoadBalancer`. LoadBalancer will get created with the IP specified in this field. This feature depends on whether the underlying cloud-provider supports specifying this field when a load balancer is created. This field will be ignored if the cloud-provider does not support the feature.
-* `load_balancer_source_ranges` - (Optional) If specified and supported by the platform, this will restrict traffic through the cloud-provider load-balancer will be restricted to the specified client IPs. This field will be ignored if the cloud-provider does not support the feature. More info: http://kubernetes.io/docs/user-guide/services-firewalls
-* `port` - (Required) The list of ports that are exposed by this service. More info: http://kubernetes.io/docs/user-guide/services#virtual-ips-and-service-proxies
-* `selector` - (Optional) Route service traffic to pods with label keys and values matching this selector. Only applies to types `ClusterIP`, `NodePort`, and `LoadBalancer`. More info: http://kubernetes.io/docs/user-guide/services#overview
-* `session_affinity` - (Optional) Used to maintain session affinity. Supports `ClientIP` and `None`. Defaults to `None`. More info: http://kubernetes.io/docs/user-guide/services#virtual-ips-and-service-proxies
-* `type` - (Optional) Determines how the service is exposed. Defaults to `ClusterIP`. Valid options are `ExternalName`, `ClusterIP`, `NodePort`, and `LoadBalancer`. `ExternalName` maps to the specified `external_name`. More info: http://kubernetes.io/docs/user-guide/services#overview
-
-### `port`
-
-#### Arguments
-
-* `name` - (Optional) The name of this port within the service. All ports within the service must have unique names. Optional if only one ServicePort is defined on this service.
-* `node_port` - (Optional) The port on each node on which this service is exposed when `type` is `NodePort` or `LoadBalancer`. Usually assigned by the system. If specified, it will be allocated to the service if unused or else creation of the service will fail. Default is to auto-allocate a port if the `type` of this service requires one. More info: http://kubernetes.io/docs/user-guide/services#type--nodeport
-* `port` - (Required) The port that will be exposed by this service.
-* `protocol` - (Optional) The IP protocol for this port. Supports `TCP` and `UDP`. Default is `TCP`.
-* `target_port` - (Required) Number or name of the port to access on the pods targeted by the service. Number must be in the range 1 to 65535. This field is ignored for services with `cluster_ip = "None"`. More info: http://kubernetes.io/docs/user-guide/services#defining-a-service
-
-## Import
-
-Service can be imported using its namespace and name, e.g.
-
-```
-$ terraform import kubernetes_service.example default/terraform-name
-```
diff --git a/website/source/docs/providers/librato/index.html.markdown b/website/source/docs/providers/librato/index.html.markdown
deleted file mode 100644
index b80d83081..000000000
--- a/website/source/docs/providers/librato/index.html.markdown
+++ /dev/null
@@ -1,39 +0,0 @@
----
-layout: "librato"
-page_title: "Provider: Librato"
-sidebar_current: "docs-librato-index"
-description: |-
- The Librato provider is used to interact with the resources supported by Librato. The provider needs to be configured with the proper credentials before it can be used.
----
-
-# Librato Provider
-
-The Librato provider is used to interact with the
-resources supported by Librato. The provider needs to be configured
-with the proper credentials before it can be used.
-
-Use the navigation to the left to read about the available resources.
-
-## Example Usage
-
-```hcl
-# Configure the Librato provider
-provider "librato" {
- email = "ops@company.com"
- token = "${var.librato_token}"
-}
-
-# Create a new space
-resource "librato_space" "default" {
- # ...
-}
-```
-
-## Argument Reference
-
-The following arguments are supported:
-
-* `token` - (Required) Librato API token. It must be provided, but it can also
- be sourced from the `LIBRATO_TOKEN` environment variable.
-* `email` - (Required) Librato email address. It must be provided, but it can
- also be sourced from the `LIBRATO_EMAIL` environment variable.
diff --git a/website/source/docs/providers/librato/r/alert.html.markdown b/website/source/docs/providers/librato/r/alert.html.markdown
deleted file mode 100644
index 85dc6a7bf..000000000
--- a/website/source/docs/providers/librato/r/alert.html.markdown
+++ /dev/null
@@ -1,67 +0,0 @@
----
-layout: "librato"
-page_title: "Librato: librato_alert"
-sidebar_current: "docs-librato-resource-alert"
-description: |-
- Provides a Librato Alert resource. This can be used to create and manage alerts on Librato.
----
-
-# librato\_alert
-
-Provides a Librato Alert resource. This can be used to
-create and manage alerts on Librato.
-
-## Example Usage
-
-```hcl
-# Create a new Librato alert
-resource "librato_alert" "myalert" {
- name = "MyAlert"
- description = "A Test Alert"
- services = ["${librato_service.myservice.id}"]
-
- condition {
- type = "above"
- threshold = 10
- metric_name = "librato.cpu.percent.idle"
- }
-}
-```
-
-## Argument Reference
-
-The following arguments are supported:
-
-* `name` - (Required) The name of the alert.
-* `description` - (Required) Description of the alert.
-* `active` - whether the alert is active (can be triggered). Defaults to true.
-* `rearm_seconds` - minimum amount of time between sending alert notifications, in seconds.
-* `services` - list of notification service IDs.
-* `condition` - A trigger condition for the alert. Conditions documented below.
-* `attributes` - A hash of additional attribtues for the alert. Attributes documented below.
-
-## Attributes Reference
-
-The following attributes are exported:
-
-* `id` - The ID of the alert.
-* `name` - The name of the alert.
-* `description` - (Required) Description of the alert.
-* `active` - whether the alert is active (can be triggered). Defaults to true.
-* `rearm_seconds` - minimum amount of time between sending alert notifications, in seconds.
-* `services` - list of notification service IDs.
-* `condition` - A trigger condition for the alert. Conditions documented below.
-
-Conditions (`condition`) support the following:
-
-* `type` - The type of condition. Must be one of `above`, `below` or `absent`.
-* `metric_name`- The name of the metric this alert condition applies to.
-* `source`- A source expression which identifies which sources for the given metric to monitor.
-* `detect_reset` - boolean: toggles the method used to calculate the delta from the previous sample when the summary_function is `derivative`.
-* `duration` - number of seconds condition must be true to fire the alert (required for type `absent`).
-* `threshold` - float: measurements over this number will fire the alert (only for `above` or `below`).
-* `summary_function` - Indicates which statistic of an aggregated measurement to alert on. ((only for `above` or `below`).
-
-Attributes (`attributes`) support the following:
-
-* `runbook_url` - a URL for the runbook to be followed when this alert is firing. Used in the Librato UI if set.
diff --git a/website/source/docs/providers/librato/r/metric.html.markdown b/website/source/docs/providers/librato/r/metric.html.markdown
deleted file mode 100644
index 2f2ba9f7f..000000000
--- a/website/source/docs/providers/librato/r/metric.html.markdown
+++ /dev/null
@@ -1,71 +0,0 @@
----
-layout: "librato"
-page_title: "Librato: librato_metric"
-sidebar_current: "docs-librato-resource-metric"
-description: |-
- Provides a Librato Metric resource. This can be used to create and manage metrics on Librato.
----
-
-# librato\_metric
-
-Provides a Librato Metric resource. This can be used to create and manage metrics on Librato.
-
-## Example Usage
-
-```hcl
-# Create a new Librato metric
-resource "librato_metric" "mymetric" {
- name = "MyMetric"
- type = "counter"
- description = "A Test Metric"
- attributes {
- display_stacked = true
- }
-}
-```
-
-## Argument Reference
-
-The following arguments are supported:
-
-* `type` - (Required) The type of metric to create (gauge, counter, or composite).
-* `name` - (Required) The unique identifier of the metric.
-* `display_name` - The name which will be used for the metric when viewing the Metrics website.
-* `description` - Text that can be used to explain precisely what the metric is measuring.
-* `period` - Number of seconds that is the standard reporting period of the metric.
-* `attributes` - The attributes hash configures specific components of a metric’s visualization.
-* `composite` - The definition of the composite metric.
-
-## Attributes Reference
-
-The following attributes are exported:
-
-* `name` - The identifier for the metric.
-* `display_name` - The name which will be used for the metric when viewing the Metrics website.
-* `type` - The type of metric to create (gauge, counter, or composite).
-* `description` - Text that describes precisely what the metric is measuring.
-* `period` - Number of seconds that is the standard reporting period of the metric. Setting the period enables Metrics to detect abnormal interruptions in reporting and aids in analytics. For gauge metrics that have service-side aggregation enabled, this option will define the period that aggregation occurs on.
-* `source_lag` -
-* `composite` - The composite definition. Only used when type is composite.
-
-Attributes (`attributes`) support the following:
-
-* `color` - Sets a default color to prefer when visually rendering the metric. Must be a seven character string that represents the hex code of the color e.g. #52D74C.
-* `display_max` - If a metric has a known theoretical maximum value, set display_max so that visualizations can provide perspective of the current values relative to the maximum value.
-* `display_min` - If a metric has a known theoretical minimum value, set display_min so that visualizations can provide perspective of the current values relative to the minimum value.
-* `display_units_long` - A string that identifies the unit of measurement e.g. Microseconds. Typically the long form of display_units_short and used in visualizations e.g. the Y-axis label on a graph.
-* `display_units_short` - A terse (usually abbreviated) string that identifies the unit of measurement e.g. uS (Microseconds). Typically the short form of display_units_long and used in visualizations e.g. the tooltip for a point on a graph.
-* `display_stacked` - A boolean value indicating whether or not multiple metric streams should be aggregated in a visualization (e.g. stacked graphs). By default counters have display_stacked enabled while gauges have it disabled.
-* `summarize_function` - Determines how to calculate values when rolling up from raw values to higher resolution intervals. Must be one of: ‘average’, 'sum’, 'count’, 'min’, 'max’. If summarize_function is not set the behavior defaults to average.
-
-If the values of the measurements to be rolled up are: 2, 10, 5:
-
-* average: 5.67
-* sum: 17
-* count: 3
-* min: 2
-* max: 10
-
-* `aggregate` - Enable service-side aggregation for this metric. When enabled, measurements sent using the same tag set will be aggregated into single measurements on an interval defined by the period of the metric. If there is no period defined for the metric then all measurements will be aggregated on a 60-second interval.
-
-This option takes a value of true or false. If this option is not set for a metric it will default to false.
diff --git a/website/source/docs/providers/librato/r/service.html.markdown b/website/source/docs/providers/librato/r/service.html.markdown
deleted file mode 100644
index 1e579bf77..000000000
--- a/website/source/docs/providers/librato/r/service.html.markdown
+++ /dev/null
@@ -1,45 +0,0 @@
----
-layout: "librato"
-page_title: "Librato: librato_service"
-sidebar_current: "docs-librato-resource-service"
-description: |-
- Provides a Librato service resource. This can be used to create and manage notification services on Librato.
----
-
-# librato\_service
-
-Provides a Librato Service resource. This can be used to
-create and manage notification services on Librato.
-
-## Example Usage
-
-```hcl
-# Create a new Librato service
-resource "librato_service" "email" {
- title = "Email the admins"
- type = "mail"
-
- settings = < **Note** Terraform primarily deals with remote resources which are able
-to outlive a single Terraform run, and so local resources can sometimes violate
-its assumptions. The resources here are best used with care, since depending
-on local state can make it hard to apply the same Terraform configuration on
-many different local systems where the local resources may not be universally
-available. See specific notes in each resource for more information.
diff --git a/website/source/docs/providers/local/r/file.html.md b/website/source/docs/providers/local/r/file.html.md
deleted file mode 100644
index 83ac2a325..000000000
--- a/website/source/docs/providers/local/r/file.html.md
+++ /dev/null
@@ -1,37 +0,0 @@
----
-layout: "local"
-page_title: "Local: local_file"
-sidebar_current: "docs-local-resource-file"
-description: |-
- Generates a local file from content.
----
-
-# local_file
-
-Generates a local file with the given content.
-
-~> **Note** When working with local files, Terraform will detect the resource
-as having been deleted each time a configuration is applied on a new machine
-where the file is not present and will generate a diff to re-create it. This
-may cause "noise" in diffs in environments where configurations are routinely
-applied by many different users or within automation systems.
-
-## Example Usage
-
-```hcl
-resource "local_file" "foo" {
- content = "foo!"
- filename = "${path.module}/foo.bar"
-}
-```
-
-## Argument Reference
-
-The following arguments are supported:
-
-* `content` - (Required) The content of file to create.
-
-* `filename` - (Required) The path of the file to create.
-
-Any required parent directories will be created automatically, and any existing
-file with the given name will be overwritten.
diff --git a/website/source/docs/providers/logentries/index.html.markdown b/website/source/docs/providers/logentries/index.html.markdown
deleted file mode 100644
index 31d9f57f6..000000000
--- a/website/source/docs/providers/logentries/index.html.markdown
+++ /dev/null
@@ -1,72 +0,0 @@
----
-layout: "logentries"
-page_title: "Provider: Logentries"
-sidebar_current: "docs-logentries-index"
-description: |-
- The Logentries provider is used to manage Logentries logs and log sets. Logentries provides live log management and analytics. The provider needs to be configured with a Logentries account key before it can be used.
----
-
-# Logentries Provider
-
-The Logentries provider is used to manage Logentries logs and log sets. Logentries provides live log management and analytics. The provider needs to be configured with a Logentries account key before it can be used.
-
-Use the navigation to the left to read about the available resources.
-
-## Example Usage
-
-```hcl
-# Configure the Logentries provider
-provider "logentries" {
- account_key = "${var.logentries_account_key}"
-}
-
-# Create a log set
-resource "logentries_logset" "host_logs" {
- name = "${var.server}-logs"
-}
-
-# Create a log and add it to the log set
-resource "logentries_log" "app_log" {
- logset_id = "${logentries_logset.host_logs.id}"
- name = "myapp-log"
- source = "token"
-}
-
-# Add the log token to a cloud-config that can be used by an
-# application to send logs to Logentries
-resource "aws_launch_configuration" "app_launch_config" {
- name_prefix = "myapp-"
- image_id = "${var.ami}"
- instance_type = "${var.instance_type}"
-
- user_data = < **Caution:** The ``mysql_database`` resource can completely delete your
-database just as easily as it can create it. To avoid costly accidents,
-consider setting
-[``prevent_destroy``](/docs/configuration/resources.html#prevent_destroy)
-on your database resources as an extra safety measure.
-
-## Example Usage
-
-```hcl
-resource "mysql_database" "app" {
- name = "my_awesome_app"
-}
-```
-
-## Argument Reference
-
-The following arguments are supported:
-
-* `name` - (Required) The name of the database. This must be unique within
- a given MySQL server and may or may not be case-sensitive depending on
- the operating system on which the MySQL server is running.
-
-* `default_character_set` - (Optional) The default character set to use when
- a table is created without specifying an explicit character set. Defaults
- to "utf8".
-
-* `default_collation` - (Optional) The default collation to use when a table
- is created without specifying an explicit collation. Defaults to
- ``utf8_general_ci``. Each character set has its own set of collations, so
- changing the character set requires also changing the collation.
-
-Note that the defaults for character set and collation above do not respect
-any defaults set on the MySQL server, so that the configuration can be set
-appropriately even though Terraform cannot see the server-level defaults. If
-you wish to use the server's defaults you must consult the server's
-configuration and then set the ``default_character_set`` and
-``default_collation`` to match.
-
-## Attributes Reference
-
-No further attributes are exported.
diff --git a/website/source/docs/providers/mysql/r/grant.html.markdown b/website/source/docs/providers/mysql/r/grant.html.markdown
deleted file mode 100644
index 464445836..000000000
--- a/website/source/docs/providers/mysql/r/grant.html.markdown
+++ /dev/null
@@ -1,52 +0,0 @@
----
-layout: "mysql"
-page_title: "MySQL: mysql_grant"
-sidebar_current: "docs-mysql-resource-grant"
-description: |-
- Creates and manages privileges given to a user on a MySQL server
----
-
-# mysql\_grant
-
-The ``mysql_grant`` resource creates and manages privileges given to
-a user on a MySQL server.
-
-## Example Usage
-
-```hcl
-resource "mysql_user" "jdoe" {
- user = "jdoe"
- host = "example.com"
- password = "password"
-}
-
-resource "mysql_grant" "jdoe" {
- user = "${mysql_user.jdoe.user}"
- host = "${mysql_user.jdoe.host}"
- database = "app"
- privileges = ["SELECT", "UPDATE"]
-}
-```
-
-## Argument Reference
-
-The following arguments are supported:
-
-* `user` - (Required) The name of the user.
-
-* `host` - (Optional) The source host of the user. Defaults to "localhost".
-
-* `database` - (Required) The database to grant privileges on. At this time,
- privileges are given to all tables on the database (`mydb.*`).
-
-* `privileges` - (Required) A list of privileges to grant to the user. Refer
- to a list of privileges (such as
- [here](https://dev.mysql.com/doc/refman/5.5/en/grant.html)) for applicable
- privileges.
-
-* `grant` - (Optional) Whether to also give the user privileges to grant
- the same privileges to other users.
-
-## Attributes Reference
-
-No further attributes are exported.
diff --git a/website/source/docs/providers/mysql/r/user.html.markdown b/website/source/docs/providers/mysql/r/user.html.markdown
deleted file mode 100644
index 6f3f8b50d..000000000
--- a/website/source/docs/providers/mysql/r/user.html.markdown
+++ /dev/null
@@ -1,40 +0,0 @@
----
-layout: "mysql"
-page_title: "MySQL: mysql_user"
-sidebar_current: "docs-mysql-resource-user"
-description: |-
- Creates and manages a user on a MySQL server.
----
-
-# mysql\_user
-
-The ``mysql_user`` resource creates and manages a user on a MySQL
-server.
-
-~> **Note:** All arguments including username and password will be stored in the raw state as plain-text.
-[Read more about sensitive data in state](/docs/state/sensitive-data.html).
-
-## Example Usage
-
-```hcl
-resource "mysql_user" "jdoe" {
- user = "jdoe"
- host = "example.com"
- password = "password"
-}
-```
-
-## Argument Reference
-
-The following arguments are supported:
-
-* `user` - (Required) The name of the user.
-
-* `host` - (Optional) The source host of the user. Defaults to "localhost".
-
-* `password` - (Optional) The password of the user. The value of this
- argument is plain-text so make sure to secure where this is defined.
-
-## Attributes Reference
-
-No further attributes are exported.
diff --git a/website/source/docs/providers/newrelic/d/application.html.markdown b/website/source/docs/providers/newrelic/d/application.html.markdown
deleted file mode 100644
index 8d0f4a209..000000000
--- a/website/source/docs/providers/newrelic/d/application.html.markdown
+++ /dev/null
@@ -1,52 +0,0 @@
----
-layout: "newrelic"
-page_title: "New Relic: newrelic_application"
-sidebar_current: "docs-newrelic-datasource-application"
-description: |-
- Looks up the information about an application in New Relic.
----
-
-# newrelic\_application
-
-Use this data source to get information about a specific application in New Relic.
-
-## Example Usage
-
-```hcl
-data "newrelic_application" "app" {
- name = "my-app"
-}
-
-resource "newrelic_alert_policy" "foo" {
- name = "foo"
-}
-
-resource "newrelic_alert_condition" "foo" {
- policy_id = "${newrelic_alert_policy.foo.id}"
-
- name = "foo"
- type = "apm_app_metric"
- entities = ["${data.newrelic_application.app.id}"]
- metric = "apdex"
- runbook_url = "https://www.example.com"
-
- term {
- duration = 5
- operator = "below"
- priority = "critical"
- threshold = "0.75"
- time_function = "all"
- }
-}
-```
-
-## Argument Reference
-
-The following arguments are supported:
-
-* `name` - (Required) The name of the application in New Relic.
-
-## Attributes Reference
-* `id` - The ID of the application.
-* `instance_ids` - A list of instance IDs associated with the application.
-* `host_ids` - A list of host IDs associated with the application.
diff --git a/website/source/docs/providers/newrelic/index.html.markdown b/website/source/docs/providers/newrelic/index.html.markdown
deleted file mode 100644
index ad1e4a3fc..000000000
--- a/website/source/docs/providers/newrelic/index.html.markdown
+++ /dev/null
@@ -1,71 +0,0 @@
----
-layout: "newrelic"
-page_title: "Provider: New Relic"
-sidebar_current: "docs-newrelic-index"
-description: |-
- New Relic offers a performance management solution enabling developers to
- diagnose and fix application performance problems in real time.
----
-
-# New Relic Provider
-
-[New Relic](https://newrelic.com/) offers a performance management solution
-enabling developers to diagnose and fix application performance problems in real time.
-
-Use the navigation to the left to read about the available resources.
-
-## Example Usage
-
-```hcl
-# Configure the New Relic provider
-provider "newrelic" {
- api_key = "${var.newrelic_api_key}"
-}
-
-# Create an alert policy
-resource "newrelic_alert_policy" "alert" {
- name = "Alert"
-}
-
-# Add a condition
-resource "newrelic_alert_condition" "foo" {
- policy_id = "${newrelic_alert_policy.alert.id}"
-
- name = "foo"
- type = "apm_app_metric"
- entities = ["12345"] # You can look this up in New Relic
- metric = "apdex"
- runbook_url = "https://docs.example.com/my-runbook"
-
- term {
- duration = 5
- operator = "below"
- priority = "critical"
- threshold = "0.75"
- time_function = "all"
- }
-}
-
-# Add a notification channel
-resource "newrelic_alert_channel" "email" {
- name = "email"
- type = "email"
-
- configuration = {
- recipients = "paul@example.com"
- include_json_attachment = "1"
- }
-}
-
-# Link the channel to the policy
-resource "newrelic_alert_policy_channel" "alert_email" {
- policy_id = "${newrelic_alert_policy.alert.id}"
- channel_id = "${newrelic_alert_channel.email.id}"
-}
-```
-
-## Argument Reference
-
-The following arguments are supported:
-
-* `api_key` - (Required) Your New Relic API key.
diff --git a/website/source/docs/providers/newrelic/r/alert_channel.html.markdown b/website/source/docs/providers/newrelic/r/alert_channel.html.markdown
deleted file mode 100644
index eb533a778..000000000
--- a/website/source/docs/providers/newrelic/r/alert_channel.html.markdown
+++ /dev/null
@@ -1,45 +0,0 @@
----
-layout: "newrelic"
-page_title: "New Relic: newrelic_alert_channel"
-sidebar_current: "docs-newrelic-resource-alert-channel"
-description: |-
- Create and manage a notification channel for alerts in New Relic.
----
-
-# newrelic\_alert\_channel
-
-## Example Usage
-
-```hcl
-resource "newrelic_alert_channel" "foo" {
- name = "foo"
- type = "email"
-
- configuration = {
- recipients = "foo@example.com"
- include_json_attachment = "1"
- }
-}
-```
-
-## Argument Reference
-
-The following arguments are supported:
-
- * `name` - (Required) The name of the channel.
- * `type` - (Required) The type of channel. One of: `campfire`, `email`, `hipchat`, `opsgenie`, `pagerduty`, `slack`, `victorops`, or `webhook`.
- * `configuration` - (Required) A map of key / value pairs with channel type specific values.
-
-## Attributes Reference
-
-The following attributes are exported:
-
- * `id` - The ID of the channel.
-
-## Import
-
-Alert channels can be imported using the `id`, e.g.
-
-```
-$ terraform import newrelic_alert_channel.main 12345
-```
diff --git a/website/source/docs/providers/newrelic/r/alert_condition.html.markdown b/website/source/docs/providers/newrelic/r/alert_condition.html.markdown
deleted file mode 100644
index bbbcb2c27..000000000
--- a/website/source/docs/providers/newrelic/r/alert_condition.html.markdown
+++ /dev/null
@@ -1,78 +0,0 @@
----
-layout: "newrelic"
-page_title: "New Relic: newrelic_alert_condition"
-sidebar_current: "docs-newrelic-resource-alert-condition"
-description: |-
- Create and manage an alert condition for a policy in New Relic.
----
-
-# newrelic\_alert\_condition
-
-## Example Usage
-
-```hcl
-data "newrelic_application" "app" {
- name = "my-app"
-}
-
-resource "newrelic_alert_policy" "foo" {
- name = "foo"
-}
-
-resource "newrelic_alert_condition" "foo" {
- policy_id = "${newrelic_alert_policy.foo.id}"
-
- name = "foo"
- type = "apm_app_metric"
- entities = ["${data.newrelic_application.app.id}"]
- metric = "apdex"
- runbook_url = "https://www.example.com"
-
- term {
- duration = 5
- operator = "below"
- priority = "critical"
- threshold = "0.75"
- time_function = "all"
- }
-}
-```
-
-## Argument Reference
-
-The following arguments are supported:
-
- * `policy_id` - (Required) The ID of the policy where this condition should be used.
- * `name` - (Required) The title of the condition
- * `type` - (Required) The type of condition. One of: `apm_app_metric`, `apm_kt_metric`, `servers_metric`, `browser_metric`, `mobile_metric`
- * `entities` - (Required) The instance IDS associated with this condition.
- * `metric` - (Required) The metric field accepts parameters based on the `type` set.
- * `runbook_url` - (Optional) Runbook URL to display in notifications.
- * `condition_scope` - (Optional) `instance` or `application`. This is required if you are using the JVM plugin in New Relic.
- * `term` - (Required) A list of terms for this condition. See [Terms](#terms) below for details.
- * `user_defined_metric` - (Optional) A custom metric to be evaluated.
- * `user_defined_value_function` - (Optional) One of: `average`, `min`, `max`, `total`, or `sample_size`.
-
-## Terms
-
-The `term` mapping supports the following arguments:
-
- * `duration` - (Required) In minutes, must be: `5`, `10`, `15`, `30`, `60`, or `120`.
- * `operator` - (Optional) `above`, `below`, or `equal`. Defaults to `equal`.
- * `priority` - (Optional) `critical` or `warning`. Defaults to `critical`.
- * `threshold` - (Required) Must be 0 or greater.
- * `time_function` - (Required) `all` or `any`.
-
-## Attributes Reference
-
-The following attributes are exported:
-
- * `id` - The ID of the alert condition.
-
-## Import
-
-Alert conditions can be imported using the `id`, e.g.
-
-```
-$ terraform import newrelic_alert_condition.main 12345
-```
diff --git a/website/source/docs/providers/newrelic/r/alert_policy.html.markdown b/website/source/docs/providers/newrelic/r/alert_policy.html.markdown
deleted file mode 100644
index fddb20461..000000000
--- a/website/source/docs/providers/newrelic/r/alert_policy.html.markdown
+++ /dev/null
@@ -1,40 +0,0 @@
----
-layout: "newrelic"
-page_title: "New Relic: newrelic_alert_policy"
-sidebar_current: "docs-newrelic-resource-alert-policy"
-description: |-
- Create and manage alert policies in New Relic.
----
-
-# newrelic\_alert\_policy
-
-## Example Usage
-
-```hcl
-resource "newrelic_alert_policy" "foo" {
- name = "foo"
-}
-```
-
-## Argument Reference
-
-The following arguments are supported:
-
- * `name` - (Required) The name of the policy.
- * `incident_preference` - (Optional) The rollup strategy for the policy. Options include: `PER_POLICY`, `PER_CONDITION`, or `PER_CONDITION_AND_TARGET`. The default is `PER_POLICY`.
-
-## Attributes Reference
-
-The following attributes are exported:
-
- * `id` - The ID of the policy.
- * `created_at` - The time the policy was created.
- * `updated_at` - The time the policy was last updated.
-
-## Import
-
-Alert policies can be imported using the `id`, e.g.
-
-```
-$ terraform import newrelic_alert_policy.main 12345
-```
diff --git a/website/source/docs/providers/newrelic/r/alert_policy_channel.html.markdown b/website/source/docs/providers/newrelic/r/alert_policy_channel.html.markdown
deleted file mode 100644
index 310d14876..000000000
--- a/website/source/docs/providers/newrelic/r/alert_policy_channel.html.markdown
+++ /dev/null
@@ -1,39 +0,0 @@
----
-layout: "newrelic"
-page_title: "New Relic: newrelic_alert_policy_channel"
-sidebar_current: "docs-newrelic-resource-alert-policy-channel"
-description: |-
- Map alert policies to alert channels in New Relic.
----
-
-# newrelic\_alert\_policy\_channel
-
-## Example Usage
-
-```hcl
-resource "newrelic_alert_policy" "foo" {
- name = "foo"
-}
-
-resource "newrelic_alert_channel" "foo" {
- name = "foo"
- type = "email"
-
- configuration = {
- recipients = "foo@example.com"
- include_json_attachment = "1"
- }
-}
-
-resource "newrelic_alert_policy_channel" "foo" {
- policy_id = "${newrelic_alert_policy.foo.id}"
- channel_id = "${newrelic_alert_channel.foo.id}"
-}
-```
-
-## Argument Reference
-
-The following arguments are supported:
-
- * `policy_id` - (Required) The ID of the policy.
- * `channel_id` - (Required) The ID of the channel.
diff --git a/website/source/docs/providers/nomad/index.html.markdown b/website/source/docs/providers/nomad/index.html.markdown
deleted file mode 100644
index 129a9540b..000000000
--- a/website/source/docs/providers/nomad/index.html.markdown
+++ /dev/null
@@ -1,39 +0,0 @@
----
-layout: "nomad"
-page_title: "Provider: Nomad"
-sidebar_current: "docs-nomad-index"
-description: |-
- Nomad is a cluster scheduler. The Nomad provider exposes resources to interact with a Nomad cluster.
----
-
-# Nomad Provider
-
-[Nomad](https://www.nomadproject.io) is a cluster scheduler. The Nomad
-provider exposes resources to interact with a Nomad cluster.
-
-Use the navigation to the left to read about the available resources.
-
-## Example Usage
-
-```hcl
-# Configure the Nomad provider
-provider "nomad" {
- address = "nomad.mycompany.com"
- region = "us-east-2"
-}
-
-# Register a job
-resource "nomad_job" "monitoring" {
- jobspec = "${file("${path.module}/jobspec.hcl")}"
-}
-```
-
-## Argument Reference
-
-The following arguments are supported:
-
-* `address` - (Optional) The HTTP(S) API address of the Nomad agent to use. Defaults to `http://127.0.0.1:4646`. The `NOMAD_ADDR` environment variable can also be used.
-* `region` - (Optional) The Nomad region to target. The `NOMAD_REGION` environment variable can also be used.
-* `ca_file` - (Optional) A path to a PEM-encoded certificate authority used to verify the remote agent's certificate. The `NOMAD_CACERT` environment variable can also be used.
-* `cert_file` - (Optional) A path to a PEM-encoded certificate provided to the remote agent; requires use of `key_file`. The `NOMAD_CLIENT_CERT` environment variable can also be used.
-* `key_file`- (Optional) A path to a PEM-encoded private key, required if `cert_file` is specified. The `NOMAD_CLIENT_KEY` environment variable can also be used.
diff --git a/website/source/docs/providers/nomad/r/job.html.markdown b/website/source/docs/providers/nomad/r/job.html.markdown
deleted file mode 100644
index 317473118..000000000
--- a/website/source/docs/providers/nomad/r/job.html.markdown
+++ /dev/null
@@ -1,76 +0,0 @@
----
-layout: "nomad"
-page_title: "Nomad: nomad_job"
-sidebar_current: "docs-nomad-resource-job"
-description: |-
- Manages a job registered in Nomad.
----
-
-# nomad_job
-
-Manages a job registered in Nomad.
-
-This can be used to initialize your cluster with system jobs, common services,
-and more. In day to day Nomad use it is common for developers to submit
-jobs to Nomad directly, such as for general app deployment. In addition to
-these apps, a Nomad cluster often runs core system services that are ideally
-setup during infrastructure creation. This resource is ideal for the latter
-type of job, but can be used to manage any job within Nomad.
-
-## Example Usage
-
-Registering a job from a jobspec file:
-
-```hcl
-resource "nomad_job" "app" {
- jobspec = "${file("${path.module}/job.hcl")}"
-}
-```
-
-Registering a job from an inline jobspec. This is less realistic but
-is an example of how it is possible. More likely, the contents will
-be paired with something such as the
-[template_file](https://www.terraform.io/docs/providers/template/d/file.html)
-resource to render parameterized jobspecs.
-
-```hcl
-resource "nomad_job" "app" {
- jobspec = < **Caution:** The ``opc_compute_instance`` resource can completely delete your
-instance just as easily as it can create it. To avoid costly accidents,
-consider setting
-[``prevent_destroy``](/docs/configuration/resources.html#prevent_destroy)
-on your instance resources as an extra safety measure.
-
-## Example Usage
-
-```hcl
-resource "opc_compute_ip_network" "test" {
- name = "internal-network"
- description = "Terraform Provisioned Internal Network"
- ip_address_prefix = "10.0.1.0/24"
- public_napt_enabled = false
-}
-
-resource "opc_compute_storage_volume" "test" {
- name = "internal"
- size = 100
-}
-
-resource "opc_compute_instance" "test" {
- name = "instance1"
- label = "Terraform Provisioned Instance"
- shape = "oc3"
- image_list = "/oracle/public/oel_6.7_apaas_16.4.5_1610211300"
-
- storage {
- volume = "${opc_compute_storage_volume.test.name}"
- index = 1
- }
-
- networking_info {
- index = 0
- nat = ["ippool:/oracle/public/ippool"]
- shared_network = true
- }
-}
-
-```
-
-## Argument Reference
-
-The following arguments are supported:
-
-* `name` - (Required) The name of the instance.
-
-* `shape` - (Required) The shape of the instance, e.g. `oc4`.
-
-* `instance_attributes` - (Optional) A JSON string of custom attributes. See [Attributes](#attributes) below for more information.
-
-* `boot_order` - (Optional) The index number of the bootable storage volume, presented as a list, that should be used to boot the instance. The only valid value is `[1]`. If you set this attribute, you must also specify a bootable storage volume with index number 1 in the volume sub-parameter of storage_attachments. When you specify boot_order, you don't need to specify the imagelist attribute, because the instance is booted using the image on the specified bootable storage volume. If you specify both boot_order and imagelist, the imagelist attribute is ignored.
-
-* `hostname` - (Optional) The host name assigned to the instance. On an Oracle Linux instance, this host name is displayed in response to the hostname command. Only relative DNS is supported. The domain name is suffixed to the host name that you specify. The host name must not end with a period. If you don't specify a host name, then a name is generated automatically.
-
-* `image_list` - (Optional) The imageList of the instance, e.g. `/oracle/public/oel_6.4_2GB_v1`.
-
-* `label` - (Optional) The label to apply to the instance.
-
-* `networking_info` - (Optional) Information pertaining to an individual network interface to be created and attached to the instance. See [Networking Info](#networking-info) below for more information.
-
-* `storage` - (Optional) Information pertaining to an individual storage attachment to be created during instance creation. Please see [Storage Attachments](#storage-attachments) below for more information.
-
-* `reverse_dns` - (Optional) If set to `true` (default), then reverse DNS records are created. If set to `false`, no reverse DNS records are created.
-
-* `ssh_keys` - (Optional) A list of the names of the SSH Keys that can be used to log into the instance.
-
-* `tags` - (Optional) A list of strings that should be supplied to the instance as tags.
-
-## Attributes
-
-During instance creation, there are several custom attributes that a user may wish to make available to the instance during instance creation.
-These attributes can be specified via the `instance_attributes` field, and must be presented as a string in JSON format.
-The easiest way to populate this field is with a HEREDOC:
-
-```hcl
-resource "opc_compute_instance" "foo" {
- name = "test"
- label = "test"
- shape = "oc3"
- imageList = "/oracle/public/oel_6.4_2GB_v1"
- instance_attributes = </@//
-```
-
-The instance can be imported as such:
-
-```shell
-$ terraform import opc_compute_instance.instance1 instance_name/instance_id
-```
diff --git a/website/source/docs/providers/opc/r/opc_compute_ip_address_association.html.markdown b/website/source/docs/providers/opc/r/opc_compute_ip_address_association.html.markdown
deleted file mode 100644
index cffabad6a..000000000
--- a/website/source/docs/providers/opc/r/opc_compute_ip_address_association.html.markdown
+++ /dev/null
@@ -1,48 +0,0 @@
----
-layout: "opc"
-page_title: "Oracle: opc_compute_ip_address_association"
-sidebar_current: "docs-opc-resource-ip-address-association"
-description: |-
- Creates and manages an IP address association in an OPC identity domain, for an IP Network.
----
-
-# opc\_compute\_ip\_address\_association
-
-The ``opc_compute_ip_address_association`` resource creates and manages an IP address association between an IP address reservation and a virtual NIC in an OPC identity domain, for an IP Network.
-
-## Example Usage
-
-```hcl
-resource "opc_compute_ip_address_association" "default" {
- name = "PrefixSet1"
- ip_address_reservation = "${opc_compute_ip_address_reservation.default.name}"
- vnic = "${data.opc_compute_vnic.default.name}"
- tags = ["tags1", "tags2"]
-}
-```
-
-## Argument Reference
-
-The following arguments are supported:
-
-* `name` - (Required) The name of the ip address association.
-
-* `ip_address_reservation` - (Optional) The name of the NAT IP address reservation.
-
-* `vnic` - (Optional) The name of the virtual NIC associated with this NAT IP reservation.
-
-* `description` - (Optional) A description of the ip address association.
-
-* `tags` - (Optional) List of tags that may be applied to the ip address association.
-
-In addition to the above, the following variables are exported:
-
-* `uri` - (Computed) The Uniform Resource Identifier of the ip address association.
-
-## Import
-
-IP Address Associations can be imported using the `resource name`, e.g.
-
-```shell
-$ terraform import opc_compute_ip_address_association.default example
-```
diff --git a/website/source/docs/providers/opc/r/opc_compute_ip_address_prefix_set.html.markdown b/website/source/docs/providers/opc/r/opc_compute_ip_address_prefix_set.html.markdown
deleted file mode 100644
index fa63f0794..000000000
--- a/website/source/docs/providers/opc/r/opc_compute_ip_address_prefix_set.html.markdown
+++ /dev/null
@@ -1,45 +0,0 @@
----
-layout: "opc"
-page_title: "Oracle: opc_compute_ip_address_prefix_set"
-sidebar_current: "docs-opc-resource-ip-address-prefix-set"
-description: |-
- Creates and manages an IP address prefix set in an OPC identity domain.
----
-
-# opc\_compute\_ip\_address\_prefix\_set
-
-The ``opc_compute_ip_address_prefix_set`` resource creates and manages an IP address prefix set in an OPC identity domain.
-
-## Example Usage
-
-```hcl
-resource "opc_compute_ip_address_prefix_set" "default" {
- name = "PrefixSet1"
- prefixes = ["192.168.0.0/16", "172.120.0.0/24"]
- tags = ["tags1", "tags2"]
-}
-```
-
-## Argument Reference
-
-The following arguments are supported:
-
-* `name` - (Required) The name of the ip address prefix set.
-
-* `prefixes` - (Optional) List of CIDR IPv4 prefixes assigned in the virtual network.
-
-* `description` - (Optional) A description of the ip address prefix set.
-
-* `tags` - (Optional) List of tags that may be applied to the ip address prefix set.
-
-In addition to the above, the following variables are exported:
-
-* `uri` - (Computed) The Uniform Resource Identifier of the ip address prefix set.
-
-## Import
-
-IP Address Prefix Set can be imported using the `resource name`, e.g.
-
-```shell
-$ terraform import opc_compute_ip_address_prefix_set.default example
-```
diff --git a/website/source/docs/providers/opc/r/opc_compute_ip_address_reservation.html.markdown b/website/source/docs/providers/opc/r/opc_compute_ip_address_reservation.html.markdown
deleted file mode 100644
index d73a67e1a..000000000
--- a/website/source/docs/providers/opc/r/opc_compute_ip_address_reservation.html.markdown
+++ /dev/null
@@ -1,46 +0,0 @@
----
-layout: "opc"
-page_title: "Oracle: opc_compute_ip_address_reservation"
-sidebar_current: "docs-opc-resource-ip-address-reservation"
-description: |-
- Creates and manages an IP address reservation in an OPC identity domain for an IP Network.
----
-
-# opc\_compute\_ip\_address\_reservation
-
-The ``opc_compute_ip_address_reservation`` resource creates and manages an IP address reservation in an OPC identity domain, for an IP Network.
-
-## Example Usage
-
-```hcl
-resource "opc_compute_ip_address_reservation" "default" {
- name = "IPAddressReservation1"
- ip_address_pool = "public-ippool"
-}
-```
-
-## Argument Reference
-
-The following arguments are supported:
-
-* `name` - (Required) The name of the ip address reservation.
-
-* `ip_address_pool` - (Required) The IP address pool from which you want to reserve an IP address. Must be either `public-ippool` or `cloud-ippool`.
-
-* `description` - (Optional) A description of the ip address reservation.
-
-* `tags` - (Optional) List of tags that may be applied to the IP address reservation.
-
-In addition to the above, the following attributes are exported:
-
-* `ip_address` - Reserved NAT IPv4 address from the IP address pool.
-
-* `uri` - The Uniform Resource Identifier of the ip address reservation
-
-## Import
-
-IP Address Reservations can be imported using the `resource name`, e.g.
-
-```shell
-$ terraform import opc_compute_ip_address_reservation.default example
-```
diff --git a/website/source/docs/providers/opc/r/opc_compute_ip_association.html.markdown b/website/source/docs/providers/opc/r/opc_compute_ip_association.html.markdown
deleted file mode 100644
index 8ed8334c6..000000000
--- a/website/source/docs/providers/opc/r/opc_compute_ip_association.html.markdown
+++ /dev/null
@@ -1,46 +0,0 @@
----
-layout: "opc"
-page_title: "Oracle: opc_compute_ip_association"
-sidebar_current: "docs-opc-resource-ip-association"
-description: |-
- Creates and manages an IP association in an OPC identity domain for the Shared Network.
----
-
-# opc\_compute\_ip\_association
-
-The ``opc_compute_ip_association`` resource creates and manages an association between an IP address and an instance in
-an OPC identity domain, for the Shared Network.
-
-## Example Usage
-
-```hcl
-resource "opc_compute_ip_association" "instance1_reservation1" {
- vcable = "${opc_compute_instance.test_instance.vcable}"
- parentpool = "ipreservation:${opc_compute_ip_reservation.reservation1.name}"
-}
-```
-
-## Argument Reference
-
-The following arguments are supported:
-
-* `vcable` - (Required) The vcable of the instance to associate the IP address with.
-
-* `parentpool` - (Required) The pool from which to take an IP address. To associate a specific reserved IP address, use
-the prefix `ipreservation:` followed by the name of the IP reservation. To allocate an IP address from a pool, use the
-prefix `ippool:`, e.g. `ippool:/oracle/public/ippool`.
-
-
-## Attributes Reference
-
-The following attributes are exported:
-
-* `name` The name of the IP Association
-
-## Import
-
-IP Associations can be imported using the `resource name`, e.g.
-
-```shell
-$ terraform import opc_compute_ip_association.association1 example
-```
diff --git a/website/source/docs/providers/opc/r/opc_compute_ip_network.html.markdown b/website/source/docs/providers/opc/r/opc_compute_ip_network.html.markdown
deleted file mode 100644
index 5b843219d..000000000
--- a/website/source/docs/providers/opc/r/opc_compute_ip_network.html.markdown
+++ /dev/null
@@ -1,62 +0,0 @@
----
-layout: "opc"
-page_title: "Oracle: opc_compute_ip_network"
-sidebar_current: "docs-opc-resource-ip-network"
-description: |-
- Creates and manages an IP Network
----
-
-# opc\_compute\_ip_network
-
-The ``opc_compute_ip_network`` resource creates and manages an IP Network.
-
-## Example Usage
-
-```hcl
-resource "opc_compute_ip_network" "foo" {
- name = "my-ip-network"
- description = "my IP Network"
- ip_address_prefix = "10.0.1.0/24"
- ip_network_exchange = "${opc_compute_ip_exchange.foo.name}"
- public_napt_enabled = false
- tags = ["tag1", "tag2"]
-}
-```
-
-## Argument Reference
-
-The following arguments are supported:
-
-* `name` - (Required) The name of the IP Network.
-
-* `ip_address_prefix` - (Required) The IPv4 address prefix, in CIDR format.
-
-* `description` - (Optional) The description of the IP Network.
-
-* `ip_network_exchange` - (Optional) Specify the IP Network exchange to which the IP Network belongs to.
-
-* `public_napt_enabled` - (Optional) If true, enable public internet access using NAPT for VNICs without any public IP Reservation. Defaults to `false`.
-
-## Attributes Reference
-
-The following attributes are exported:
-
-* `name` - The name of the IP Network
-
-* `ip_address_prefix` - The IPv4 address prefix, in CIDR format.
-
-* `description` - The description of the IP Network.
-
-* `ip_network_exchange` - The IP Network Exchange for the IP Network
-
-* `public_napt_enabled` - Whether public internet access using NAPT for VNICs without any public IP Reservation or not.
-
-* `uri` - Uniform Resource Identifier for the IP Network
-
-## Import
-
-IP Networks can be imported using the `resource name`, e.g.
-
-```shell
-$ terraform import opc_compute_ip_network.default example
-```
diff --git a/website/source/docs/providers/opc/r/opc_compute_ip_network_exchange.html.markdown b/website/source/docs/providers/opc/r/opc_compute_ip_network_exchange.html.markdown
deleted file mode 100644
index e0ab547b1..000000000
--- a/website/source/docs/providers/opc/r/opc_compute_ip_network_exchange.html.markdown
+++ /dev/null
@@ -1,37 +0,0 @@
----
-layout: "opc"
-page_title: "Oracle: opc_compute_ip_network_exchange"
-sidebar_current: "docs-opc-resource-ip-network-exchange"
-description: |-
- Creates and manages an IP network exchange in an OPC identity domain.
----
-
-# opc\_compute\_ip\_network\_exchange
-
-The ``opc_compute_ip_network_exchange`` resource creates and manages an IP network exchange in an OPC identity domain.
-
-## Example Usage
-
-```hcl
-resource "opc_compute_ip_network_exchange" "default" {
- name = "NetworkExchange1"
-}
-```
-
-## Argument Reference
-
-The following arguments are supported:
-
-* `name` - (Required) The name of the ip network exchange.
-
-* `description` - (Optional) A description of the ip network exchange.
-
-* `tags` - (Optional) List of tags that may be applied to the IP network exchange.
-
-## Import
-
-IP Network Exchange's can be imported using the `resource name`, e.g.
-
-```shell
-$ terraform import opc_compute_ip_network_exchange.exchange1 example
-```
diff --git a/website/source/docs/providers/opc/r/opc_compute_ip_reservation.html.markdown b/website/source/docs/providers/opc/r/opc_compute_ip_reservation.html.markdown
deleted file mode 100644
index dac6d208f..000000000
--- a/website/source/docs/providers/opc/r/opc_compute_ip_reservation.html.markdown
+++ /dev/null
@@ -1,43 +0,0 @@
----
-layout: "opc"
-page_title: "Oracle: opc_compute_ip_reservation"
-sidebar_current: "docs-opc-resource-ip-reservation"
-description: |-
- Creates and manages an IP reservation in an OPC identity domain for the Shared Network.
----
-
-# opc\_compute\_ip\_reservation
-
-The ``opc_compute_ip_reservation`` resource creates and manages an IP reservation in an OPC identity domain for the Shared Network.
-
-## Example Usage
-
-```hcl
-resource "opc_compute_ip_reservation" "reservation1" {
- parent_pool = "/oracle/public/ippool"
- permanent = true
- tags = [ "test" ]
-}
-```
-
-## Argument Reference
-
-The following arguments are supported:
-
-* `permanent` - (Required) Whether the IP address remains reserved even when it is no longer associated with an instance
-(if true), or may be returned to the pool and replaced with a different IP address when an instance is restarted, or
-deleted and recreated (if false).
-
-* `parent_pool` - (Optional) The pool from which to allocate the IP address. Defaults to `/oracle/public/ippool`, and is currently the only acceptable input.
-
-* `name` - (Optional) Name of the IP Reservation. Will be generated if unspecified.
-
-* `tags` - (Optional) List of tags that may be applied to the IP reservation.
-
-## Import
-
-IP Reservations can be imported using the `resource name`, e.g.
-
-```shell
-$ terraform import opc_compute_ip_reservations.reservation1 example
-```
diff --git a/website/source/docs/providers/opc/r/opc_compute_route.html.markdown b/website/source/docs/providers/opc/r/opc_compute_route.html.markdown
deleted file mode 100644
index ccb3d0dac..000000000
--- a/website/source/docs/providers/opc/r/opc_compute_route.html.markdown
+++ /dev/null
@@ -1,60 +0,0 @@
----
-layout: "opc"
-page_title: "Oracle: opc_compute_route"
-sidebar_current: "docs-opc-resource-route"
-description: |-
- Creates and manages a Route resource for an IP Network
----
-
-# opc\_compute\_route
-
-The ``opc_compute_route`` resource creates and manages a route for an IP Network.
-
-## Example Usage
-
-```hcl
-resource "opc_compute_route" "foo" {
- name = "my-route"
- description = "my IP Network route"
- admin_distance = 1
- ip_address_prefix = "10.0.1.0/24"
- next_hop_vnic_set = "${opc_compute_vnic_set.bar.name}"
- tags = ["tag1", "tag2"]
-}
-```
-
-## Argument Reference
-
-The following arguments are supported:
-
-* `name` - (Required) The name of the route.
-
-* `description` - (Optional) The description of the route.
-
-* `admin_distance` - (Optional) The route's administrative distance. Defaults to `0`.
-
-* `ip_address_prefix` - (Required) The IPv4 address prefix, in CIDR format, of the external network from which to route traffic.
-
-* `next_hop_vnic_set` - (Required) Name of the virtual NIC set to route matching packets to. Routed flows are load-balanced among all the virtual NICs in the virtual NIC set.
-
-## Attributes Reference
-
-The following attributes are exported:
-
-* `name` The name of the route
-
-* `description` - The description of the route.
-
-* `admin_distance` - The route's administrative distance. Defaults to `0`.
-
-* `ip_address_prefix` - The IPv4 address prefix, in CIDR format, of the external network from which to route traffic.
-
-* `next_hop_vnic_set` - Name of the virtual NIC set to route matching packets to. Routed flows are load-balanced among all the virtual NICs in the virtual NIC set.
-
-## Import
-
-Route's can be imported using the `resource name`, e.g.
-
-```shell
-$ terraform import opc_compute_route.route1 example
-```
diff --git a/website/source/docs/providers/opc/r/opc_compute_sec_rule.html.markdown b/website/source/docs/providers/opc/r/opc_compute_sec_rule.html.markdown
deleted file mode 100644
index 96a46b919..000000000
--- a/website/source/docs/providers/opc/r/opc_compute_sec_rule.html.markdown
+++ /dev/null
@@ -1,57 +0,0 @@
----
-layout: "opc"
-page_title: "Oracle: opc_compute_sec_rule"
-sidebar_current: "docs-opc-resource-sec-rule"
-description: |-
- Creates and manages a sec rule in an OPC identity domain.
----
-
-# opc\_compute\_sec\_rule
-
-The ``opc_compute_sec_rule`` resource creates and manages a sec rule in an OPC identity domain, which joinstogether a source security list (or security IP list), a destination security list (or security IP list), and a security application.
-
-## Example Usage
-
-```hcl
-resource "opc_compute_sec_rule" "test_rule" {
- name = "test"
- source_list = "seclist:${opc_compute_security_list.sec-list1.name}"
- destination_list = "seciplist:${opc_compute_security_ip_list.sec-ip-list1.name}"
- action = "permit"
- application = "${opc_compute_security_application.spring-boot.name}"
-}
-```
-
-## Argument Reference
-
-The following arguments are supported:
-
-* `name` - (Required) The unique (within the identity domain) name of the security rule.
-
-* `description` - (Optional) A description for this security rule.
-
-* `source_list` - (Required) The source security list (prefixed with `seclist:`), or security IP list (prefixed with
-`seciplist:`).
-
- * `destination_list` - (Required) The destination security list (prefixed with `seclist:`), or security IP list (prefixed with
- `seciplist:`).
-
-* `application` - (Required) The name of the application to which the rule applies.
-
-* `action` - (Required) Whether to `permit`, `refuse` or `deny` packets to which this rule applies. This will ordinarily
-be `permit`.
-
-* `disabled` - (Optional) Whether to disable this security rule. This is useful if you want to temporarily disable a rule
-without removing it outright from your Terraform resource definition. Defaults to `false`.
-
-In addition to the above, the following values are exported:
-
-* `uri` - The Uniform Resource Identifier of the sec rule.
-
-## Import
-
-Sec Rule's can be imported using the `resource name`, e.g.
-
-```shell
-$ terraform import opc_compute_sec_rule.rule1 example
-```
diff --git a/website/source/docs/providers/opc/r/opc_compute_security_application.html.markdown b/website/source/docs/providers/opc/r/opc_compute_security_application.html.markdown
deleted file mode 100644
index 2bf7fec5e..000000000
--- a/website/source/docs/providers/opc/r/opc_compute_security_application.html.markdown
+++ /dev/null
@@ -1,57 +0,0 @@
----
-layout: "opc"
-page_title: "Oracle: opc_compute_security_application"
-sidebar_current: "docs-opc-resource-security-application"
-description: |-
- Creates and manages a security application in an OPC identity domain.
----
-
-# opc\_compute\_security\_application
-
-The ``opc_compute_security_application`` resource creates and manages a security application in an OPC identity domain.
-
-## Example Usage (TCP)
-
-```hcl
-resource "opc_compute_security_application" "tomcat" {
- name = "tomcat"
- protocol = "tcp"
- dport = "8080"
-}
-```
-
-## Example Usage (ICMP)
-
-```hcl
-resource "opc_compute_security_application" "tomcat" {
- name = "tomcat"
- protocol = "icmp"
- icmptype = "echo"
- icmpcode = "protocol"
-}
-```
-
-## Argument Reference
-
-The following arguments are supported:
-
-* `name` - (Required) The unique (within the identity domain) name of the application
-
-* `protocol` - (Required) The protocol to enable for this application. Must be one of
-`tcp`, `udp`, `ah`, `esp`, `icmp`, `icmpv6`, `igmp`, `ipip`, `gre`, `mplsip`, `ospf`, `pim`, `rdp`, `sctp` or `all`.
-
-* `dport` - (Required) The port, or range of ports, to enable for this application, e.g `8080`, `6000-7000`. This must be set if the `protocol` is set to `tcp` or `udp`.
-
-* `icmptype` - (Optional) The ICMP type to enable for this application, if the `protocol` is `icmp`. Must be one of
-`echo`, `reply`, `ttl`, `traceroute`, `unreachable`.
-
-* `icmpcode` - (Optional) The ICMP code to enable for this application, if the `protocol` is `icmp`. Must be one of
-`admin`, `df`, `host`, `network`, `port` or `protocol`.
-
-## Import
-
-Security Application's can be imported using the `resource name`, e.g.
-
-```shell
-$ terraform import opc_compute_security_application.application1 example
-```
diff --git a/website/source/docs/providers/opc/r/opc_compute_security_association.html.markdown b/website/source/docs/providers/opc/r/opc_compute_security_association.html.markdown
deleted file mode 100644
index 2d711d589..000000000
--- a/website/source/docs/providers/opc/r/opc_compute_security_association.html.markdown
+++ /dev/null
@@ -1,40 +0,0 @@
----
-layout: "opc"
-page_title: "Oracle: opc_compute_security_association"
-sidebar_current: "docs-opc-resource-security-association"
-description: |-
- Creates and manages a security association in an OPC identity domain.
----
-
-# opc\_compute\_security\_association
-
-The ``opc_compute_security_association`` resource creates and manages an association between an instance and a security
-list in an OPC identity domain.
-
-## Example Usage
-
-```hcl
-resource "opc_compute_security_association" "test_instance_sec_list_1" {
- name = "association1"
- vcable = "${opc_compute_instance.test_instance.vcable}"
- seclist = "${opc_compute_security_list.sec_list1.name}"
-}
-```
-
-## Argument Reference
-
-The following arguments are supported:
-
-* `name` - (Optional) The Name for the Security Association. If not specified, one is created automatically. Changing this forces a new resource to be created.
-
-* `vcable` - (Required) The `vcable` of the instance to associate to the security list.
-
-* `seclist` - (Required) The name of the security list to associate the instance to.
-
-## Import
-
-Security Association's can be imported using the `resource name`, e.g.
-
-```shell
-$ terraform import opc_compute_security_association.association1 example
-```
diff --git a/website/source/docs/providers/opc/r/opc_compute_security_ip_list.html.markdown b/website/source/docs/providers/opc/r/opc_compute_security_ip_list.html.markdown
deleted file mode 100644
index 503c93efe..000000000
--- a/website/source/docs/providers/opc/r/opc_compute_security_ip_list.html.markdown
+++ /dev/null
@@ -1,38 +0,0 @@
----
-layout: "opc"
-page_title: "Oracle: opc_compute_security_ip_list"
-sidebar_current: "docs-opc-resource-security-list"
-description: |-
- Creates and manages a security IP list in an OPC identity domain.
----
-
-# opc\_compute\_security\_ip\_list
-
-The ``opc_compute_security_ip_list`` resource creates and manages a security IP list in an OPC identity domain.
-
-## Example Usage
-
-```hcl
-resource "opc_compute_security_ip_list" "sec_ip_list1" {
- name = "sec-ip-list1"
- ip_entries = ["217.138.34.4"]
-}
-```
-
-## Argument Reference
-
-The following arguments are supported:
-
-* `name` - (Required) The unique (within the identity domain) name of the security IP list.
-
-* `ip_entries` - (Required) The IP addresses to include in the list.
-
-* `description` - (Optional) The description of the security ip list.
-
-## Import
-
-IP List's can be imported using the `resource name`, e.g.
-
-```shell
-$ terraform import opc_compute_ip_list.list1 example
-```
diff --git a/website/source/docs/providers/opc/r/opc_compute_security_list.html.markdown b/website/source/docs/providers/opc/r/opc_compute_security_list.html.markdown
deleted file mode 100644
index 461e6603e..000000000
--- a/website/source/docs/providers/opc/r/opc_compute_security_list.html.markdown
+++ /dev/null
@@ -1,41 +0,0 @@
----
-layout: "opc"
-page_title: "Oracle: opc_compute_security_list"
-sidebar_current: "docs-opc-resource-security-list"
-description: |-
- Creates and manages a security list in an OPC identity domain.
----
-
-# opc\_compute\_security\_list
-
-The ``opc_compute_security_list`` resource creates and manages a security list in an OPC identity domain.
-
-## Example Usage
-
-```hcl
-resource "opc_compute_security_list" "sec_list1" {
- name = "sec-list-1"
- policy = "permit"
- outbound_cidr_policy = "deny"
-}
-```
-
-## Argument Reference
-
-The following arguments are supported:
-
-* `name` - (Required) The unique (within the identity domain) name of the security list.
-
-* `policy` - (Required) The policy to apply to instances associated with this list. Must be one of `permit`,
-`reject` (packets are dropped but a reply is sent) and `deny` (packets are dropped and no reply is sent).
-
-* `output_cidr_policy` - (Required) The policy for outbound traffic from the security list. Must be one of `permit`,
-`reject` (packets are dropped but a reply is sent) and `deny` (packets are dropped and no reply is sent).
-
-## Import
-
-Security List's can be imported using the `resource name`, e.g.
-
-```shell
-$ terraform import opc_compute_security_list.list1 example
-```
diff --git a/website/source/docs/providers/opc/r/opc_compute_security_protocol.html.markdown b/website/source/docs/providers/opc/r/opc_compute_security_protocol.html.markdown
deleted file mode 100644
index 00208321a..000000000
--- a/website/source/docs/providers/opc/r/opc_compute_security_protocol.html.markdown
+++ /dev/null
@@ -1,65 +0,0 @@
----
-layout: "opc"
-page_title: "Oracle: opc_compute_security_protocol"
-sidebar_current: "docs-opc-resource-security-protocol"
-description: |-
- Creates and manages an security protocol in an OPC identity domain.
----
-
-# opc\_compute\_security\_protocol
-
-The ``opc_compute_security_protocol`` resource creates and manages a security protocol in an OPC identity domain.
-
-## Example Usage
-
-```hcl
-resource "opc_compute_security_protocol" "default" {
- name = "security-protocol-1"
- dst_ports = ["2045-2050"]
- src_ports = ["3045-3060"]
- ip_protocol = "tcp"
-}
-```
-
-## Argument Reference
-
-The following arguments are supported:
-
-* `name` - (Required) The name of the security protocol.
-
-* `dst_ports` (Optional) Enter a list of port numbers or port range strings.
- Traffic is enabled by a security rule when a packet's destination port matches the
- ports specified here.
- For TCP, SCTP, and UDP, each port is a destination transport port, between 0 and 65535,
- inclusive. For ICMP, each port is an ICMP type, between 0 and 255, inclusive.
- If no destination ports are specified, all destination ports or ICMP types are allowed.
-
-* `src_ports` (Optional) Enter a list of port numbers or port range strings.
- Traffic is enabled by a security rule when a packet's source port matches the
- ports specified here.
- For TCP, SCTP, and UDP, each port is a source transport port,
- between 0 and 65535, inclusive.
- For ICMP, each port is an ICMP type, between 0 and 255, inclusive.
- If no source ports are specified, all source ports or ICMP types are allowed.
-
-* `ip_protocol` (Optional) The protocol used in the data portion of the IP datagram.
- Permitted values are: tcp, udp, icmp, igmp, ipip, rdp, esp, ah, gre, icmpv6, ospf, pim, sctp,
- mplsip, all.
- Traffic is enabled by a security rule when the protocol in the packet matches the
- protocol specified here. If no protocol is specified, all protocols are allowed.
-
-* `description` - (Optional) A description of the security protocol.
-
-* `tags` - (Optional) List of tags that may be applied to the security protocol.
-
-In addition to the above, the following values are exported:
-
-* `uri` - The Uniform Resource Identifier for the Security Protocol
-
-## Import
-
-ACL's can be imported using the `resource name`, e.g.
-
-```shell
-$ terraform import opc_compute_security_protocol.default example
-```
diff --git a/website/source/docs/providers/opc/r/opc_compute_security_rule.html.markdown b/website/source/docs/providers/opc/r/opc_compute_security_rule.html.markdown
deleted file mode 100644
index 0a44150e7..000000000
--- a/website/source/docs/providers/opc/r/opc_compute_security_rule.html.markdown
+++ /dev/null
@@ -1,62 +0,0 @@
----
-layout: "opc"
-page_title: "Oracle: opc_compute_security_rule"
-sidebar_current: "docs-opc-resource-security-rule"
-description: |-
- Creates and manages a security rule in an OPC identity domain.
----
-
-# opc\_compute\_security\_rule
-
-The ``opc_compute_security_rule`` resource creates and manages a security rule in an OPC identity domain.
-
-## Example Usage
-
-```hcl
-resource "opc_compute_security_rule" "default" {
- name = "SecurityRule1"
- flow_direction = "ingress"
- acl = "${opc_compute_acl.default.name}"
- security_protocols = ["${opc_compute_security_protocol.default.name}"]
-}
-```
-
-## Argument Reference
-
-The following arguments are supported:
-
-* `name` - (Required) The name of the security rule.
-
-* `flow_direction` - (Required) Specify the direction of flow of traffic, which is relative to the instances, for this security rule. Allowed values are ingress or egress.
-
-* `disabled` - (Optional) Whether to disable this security rule. This is useful if you want to temporarily disable a rule without removing it outright from your Terraform resource definition. Defaults to `false`.
-
-* `acl` - (Optional) Name of the ACL that contains this security rule.
-
-* `dst_ip_address_prefixes` - (Optional) List of IP address prefix set names to match the packet's destination IP address.
-
-* `src_ip_address_prefixes` - (Optional) List of names of IP address prefix set to match the packet's source IP address.
-
-* `dst_vnic_set` - (Optional) Name of virtual NIC set containing the packet's destination virtual NIC.
-
-* `src_vnic_set` - (Optional) Name of virtual NIC set containing the packet's source virtual NIC.
-
-* `security_protocols` - (Optional) List of security protocol object names to match the packet's protocol and port.
-
-* `description` - (Optional) A description of the security rule.
-
-* `tags` - (Optional) List of tags that may be applied to the security rule.
-
-## Attributes Reference
-
-In addition to the above, the following attributes are exported:
-
-* `uri` - The Uniform Resource Identifier of the security rule.
-
-## Import
-
-Security Rule's can be imported using the `resource name`, e.g.
-
-```shell
-$ terraform import opc_compute_security_rule.rule1 example
-```
diff --git a/website/source/docs/providers/opc/r/opc_compute_ssh_key.html.markdown b/website/source/docs/providers/opc/r/opc_compute_ssh_key.html.markdown
deleted file mode 100644
index fc62f0664..000000000
--- a/website/source/docs/providers/opc/r/opc_compute_ssh_key.html.markdown
+++ /dev/null
@@ -1,40 +0,0 @@
----
-layout: "opc"
-page_title: "Oracle: opc_compute_ssh_key"
-sidebar_current: "docs-opc-resource-ssh-key"
-description: |-
- Creates and manages an SSH key in an OPC identity domain.
----
-
-# opc\_compute\_ssh_key
-
-The ``opc_compute_ssh_key`` resource creates and manages an SSH key in an OPC identity domain.
-
-## Example Usage
-
-```hcl
-resource "opc_compute_ssh_key" "test" {
- name = "test-key"
- key = "ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAABAQCqw6JwbjIk..."
- enabled = true
-}
-```
-
-## Argument Reference
-
-The following arguments are supported:
-
-* `name` - (Required) The unique (within this identity domain) name of the SSH key.
-
-* `key` - (Required) The SSH key itself
-
-* `enabled` - (Optional) Whether or not the key is enabled. This is useful if you want to temporarily disable an SSH key,
-without removing it entirely from your Terraform resource definition. Defaults to `true`
-
-## Import
-
-SSH Key's can be imported using the `resource name`, e.g.
-
-```shell
-$ terraform import opc_compute_ssh_key.key1 example
-```
diff --git a/website/source/docs/providers/opc/r/opc_compute_storage_volume.html.markdown b/website/source/docs/providers/opc/r/opc_compute_storage_volume.html.markdown
deleted file mode 100644
index cfa19a2ad..000000000
--- a/website/source/docs/providers/opc/r/opc_compute_storage_volume.html.markdown
+++ /dev/null
@@ -1,82 +0,0 @@
----
-layout: "opc"
-page_title: "Oracle: opc_compute_storage_volume"
-sidebar_current: "docs-opc-resource-storage-volume-type"
-description: |-
- Creates and manages a storage volume in an OPC identity domain.
----
-
-# opc\_compute\_storage\_volume
-
-The ``opc_compute_storage_volume`` resource creates and manages a storage volume in an OPC identity domain.
-
-~> **Caution:** The ``opc_compute_storage_volume`` resource can completely delete your storage volume just as easily as it can create it. To avoid costly accidents, consider setting [``prevent_destroy``](/docs/configuration/resources.html#prevent_destroy) on your storage volume resources as an extra safety measure.
-
-## Example Usage
-
-```hcl
-resource "opc_compute_storage_volume" "test" {
- name = "storageVolume1"
- description = "Description for the Storage Volume"
- size = 10
- tags = ["bar", "foo"]
-}
-```
-
-## Example Usage (Bootable Volume)
-```hcl
-resource "opc_compute_image_list" "test" {
- name = "imageList1"
- description = "Description for the Image List"
-}
-
-resource "opc_compute_image_list_entry" "test" {
- name = "${opc_compute_image_list.test.name}"
- machine_images = [ "/oracle/public/oel_6.7_apaas_16.4.5_1610211300" ]
- version = 1
-}
-
-resource "opc_compute_storage_volume" "test" {
- name = "storageVolume1"
- description = "Description for the Bootable Storage Volume"
- size = 30
- tags = ["first", "second"]
- bootable = true
- image_list = "${opc_compute_image_list.test.name}"
- image_list_entry = "${opc_compute_image_list_entry.test.version}"
-}
-```
-
-## Argument Reference
-
-The following arguments are supported:
-
-* `name` (Required) The name for the Storage Account.
-* `description` (Optional) The description of the storage volume.
-* `size` (Required) The size of this storage volume in GB. The allowed range is from 1 GB to 2 TB (2048 GB).
-* `storage_type` - (Optional) - The Type of Storage to provision. Possible values are `/oracle/public/storage/latency` or `/oracle/public/storage/default`. Defaults to `/oracle/public/storage/default`.
-* `bootable` - (Optional) Is the Volume Bootable? Defaults to `false`.
-* `image_list` - (Optional) Defines an image list. Required if `bootable` is set to `true`, optional if set to `false`.
-* `image_list_entry` - (Optional) Defines an image list entry. Required if `bootable` is set to `true`, optional if set to `false`.
-* `tags` - (Optional) Comma-separated strings that tag the storage volume.
-
-## Attributes Reference
-
-The following attributes are exported:
-
-* `hypervisor` - The hypervisor that this volume is compatible with.
-* `machine_image` - Name of the Machine Image - available if the volume is a bootable storage volume.
-* `managed` - Is this a Managed Volume?
-* `platform` - The OS platform this volume is compatible with.
-* `readonly` - Can this Volume be attached as readonly?
-* `status` - The current state of the storage volume.
-* `storage_pool` - The storage pool from which this volume is allocated.
-* `uri` - Unique Resource Identifier of the Storage Volume.
-
-## Import
-
-Storage Volume's can be imported using the `resource name`, e.g.
-
-```shell
-$ terraform import opc_compute_storage_volume.volume1 example
-```
diff --git a/website/source/docs/providers/opc/r/opc_compute_storage_volume_snapshot.html.markdown b/website/source/docs/providers/opc/r/opc_compute_storage_volume_snapshot.html.markdown
deleted file mode 100644
index 885739762..000000000
--- a/website/source/docs/providers/opc/r/opc_compute_storage_volume_snapshot.html.markdown
+++ /dev/null
@@ -1,59 +0,0 @@
----
-layout: "opc"
-page_title: "Oracle: opc_compute_storage_volume_snapshot"
-sidebar_current: "docs-opc-resource-storage-volume-snapshot"
-description: |-
- Creates and manages a storage volume snapshot in an OPC identity domain.
----
-
-# opc\_compute\_storage\_volume_snapshot
-
-The ``opc_compute_storage_volume_snapshot`` resource creates and manages a storage volume snapshot in an OPC identity domain.
-
-## Example Usage
-
-```hcl
-resource "opc_compute_storage_volume_snapshot" "test" {
- name = "storageVolume1"
- description = "Description for the Storage Volume"
- tags = ["bar", "foo"]
- collocated = true
- volume_name = "${opc_compute_storage_volume.foo.name}"
-}
-```
-
-## Argument Reference
-
-The following arguments are supported:
-
-* `volume_name` (Required) The name of the storage volume to create the snapshot from.
-* `description` (Optional) The description of the storage volume snapshot.
-* `name` (Optional) The name of the storage volume snapshot. Will be generated if unspecified.
-* `parent_volume_bootable` (Optional) A string value of whether or not the parent volume is 'bootable' or not. Defaults to `"false"`.
-* `collocated` (Optional) Boolean specifying whether the snapshot is collocated or remote. Defaults to `false`.
-* `tags` - (Optional) Comma-separated strings that tag the storage volume.
-
-## Attributes Reference
-
-In addition to the attributes above, the following attributes are exported:
-
-* `account` - Account to use for snapshots.
-* `machine_image_name` - The name of the machine image that's used in the boot volume from which this snapshot is taken.
-* `size` - The size of the snapshot in GB.
-* `property` - Where the snapshot is stored, whether collocated, or in the Oracle Storage Cloud Service instance.
-* `platform` - The OS platform this snapshot is compatible with
-* `snapshot_timestamp` - Timestamp of the storage snapshot, generated by storage server. The snapshot will contain data written to the original volume before this time.
-* `snapshot_id` - The Oracle ID of the snapshot.
-* `start_timestamp` - Timestamp when the snapshot was started.
-* `status` - Status of the snapshot.
-* `status_detail` - Details about the latest state of the storage volume snapshot.
-* `status_timestamp` - Indicates the time that the current view of the storage volume snapshot was generated.
-* `uri` - Uniform Resource Identifier
-
-## Import
-
-Storage Volume Snapshot's can be imported using the `resource name`, e.g.
-
-```shell
-$ terraform import opc_compute_storage_volume_snapshot.volume1 example
-```
diff --git a/website/source/docs/providers/opc/r/opc_compute_vnic_set.html.markdown b/website/source/docs/providers/opc/r/opc_compute_vnic_set.html.markdown
deleted file mode 100644
index 191ffa159..000000000
--- a/website/source/docs/providers/opc/r/opc_compute_vnic_set.html.markdown
+++ /dev/null
@@ -1,45 +0,0 @@
----
-layout: "opc"
-page_title: "Oracle: opc_compute_vnic_set"
-sidebar_current: "docs-opc-resource-vnic-set"
-description: |-
- Creates and manages a virtual NIC set in an OPC identity domain
----
-
-# opc\_compute\_vnic\_set
-
-The ``opc_compute_vnic_set`` resource creates and manages a virtual NIC set in an OPC identity domain.
-
-## Example Usage
-
-```hcl
-resource "opc_compute_vnic_set" "test_set" {
- name = "test_vnic_set"
- description = "My vnic set"
- applied_acls = ["acl1", "acl2"]
- virtual_nics = ["nic1", "nic2", "nic3"]
- tags = ["xyzzy", "quux"]
-}
-```
-
-## Argument Reference
-
-The following arguments are supported:
-
-* `name` - (Required) The unique (within this identity domain) name of the virtual nic set.
-
-* `description` - (Optional) A description of the virtual nic set.
-
-* `applied_acls` - (Optional) A list of the ACLs to apply to the virtual nics in the set.
-
-* `virtual_nics` - (Optional) List of virtual NICs associated with this virtual NIC set.
-
-* `tags` - (Optional) A list of tags to apply to the storage volume.
-
-## Import
-
-VNIC Set's can be imported using the `resource name`, e.g.
-
-```shell
-$ terraform import opc_compute_vnic_set.set1 example
-```
diff --git a/website/source/docs/providers/openstack/d/images_image_v2.html.markdown b/website/source/docs/providers/openstack/d/images_image_v2.html.markdown
deleted file mode 100644
index 5e9f724c3..000000000
--- a/website/source/docs/providers/openstack/d/images_image_v2.html.markdown
+++ /dev/null
@@ -1,71 +0,0 @@
----
-layout: "openstack"
-page_title: "OpenStack: openstack_images_image_v2"
-sidebar_current: "docs-openstack-datasource-images-image-v2"
-description: |-
- Get information on an OpenStack Image.
----
-
-# openstack\_images\_image\_v2
-
-Use this data source to get the ID of an available OpenStack image.
-
-## Example Usage
-
-```hcl
-data "openstack_images_image_v2" "ubuntu" {
- name = "Ubuntu 16.04"
- most_recent = true
-}
-```
-
-## Argument Reference
-
-* `region` - (Required) The region in which to obtain the V2 Glance client.
- A Glance client is needed to create an Image that can be used with
- a compute instance. If omitted, the `OS_REGION_NAME` environment variable
- is used.
-
-* `most_recent` - (Optional) If more than one result is returned, use the most
- recent image.
-
-* `name` - (Optional) The name of the image.
-
-* `owner` - (Optional) The owner (UUID) of the image.
-
-* `size_min` - (Optional) The minimum size (in bytes) of the image to return.
-
-* `size_max` - (Optional) The maximum size (in bytes) of the image to return.
-
-* `sort_direction` - (Optional) Order the results in either `asc` or `desc`.
-
-* `sort_key` - (Optional) Sort images based on a certain key. Defaults to `name`.
-
-* `tag` - (Optional) Search for images with a specific tag.
-
-* `visibility` - (Optional) The visibility of the image. Must be one of
- "public", "private", "community", or "shared". Defaults to "private".
-
-
-## Attributes Reference
-
-`id` is set to the ID of the found image. In addition, the following attributes
-are exported:
-
-* `checksum` - The checksum of the data associated with the image.
-* `created_at` - The date the image was created.
-* `container_format`: The format of the image's container.
-* `disk_format`: The format of the image's disk.
-* `file` - the trailing path after the glance endpoint that represent the
-location of the image or the path to retrieve it.
-* `metadata` - The metadata associated with the image.
- Image metadata allow for meaningfully define the image properties
- and tags. See http://docs.openstack.org/developer/glance/metadefs-concepts.html.
-* `min_disk_gb`: The minimum amount of disk space required to use the image.
-* `min_ram_mb`: The minimum amount of ram required to use the image.
-* `protected` - Whether or not the image is protected.
-* `schema` - The path to the JSON-schema that represent
- the image or image
-* `size_bytes` - The size of the image (in bytes).
-* `tags` - See Argument Reference above.
-* `update_at` - The date the image was last updated.
diff --git a/website/source/docs/providers/openstack/d/networking_network_v2.html.markdown b/website/source/docs/providers/openstack/d/networking_network_v2.html.markdown
deleted file mode 100644
index c297769c2..000000000
--- a/website/source/docs/providers/openstack/d/networking_network_v2.html.markdown
+++ /dev/null
@@ -1,44 +0,0 @@
----
-layout: "openstack"
-page_title: "OpenStack: openstack_networking_network_v2"
-sidebar_current: "docs-openstack-datasource-networking-network-v2"
-description: |-
- Get information on an OpenStack Network.
----
-
-# openstack\_networking\_network\_v2
-
-Use this data source to get the ID of an available OpenStack network.
-
-## Example Usage
-
-```hcl
-data "openstack_networking_network_v2" "network" {
- name = "tf_test_network"
-}
-```
-
-## Argument Reference
-
-* `region` - (Required) The region in which to obtain the V2 Neutron client.
- A Neutron client is needed to retrieve networks ids. If omitted, the
- `OS_REGION_NAME` environment variable is used.
-
-* `network_id` - (Optional) The ID of the network.
-
-* `name` - (Optional) The name of the network.
-
-* `matching_subnet_cidr` - (Optional) The CIDR of a subnet within the network.
-
-* `tenant_id` - (Optional) The owner of the network.
-
-## Attributes Reference
-
-`id` is set to the ID of the found network. In addition, the following attributes
-are exported:
-
-* `admin_state_up` - (Optional) The administrative state of the network.
-* `name` - See Argument Reference above.
-* `region` - See Argument Reference above.
-* `shared` - (Optional) Specifies whether the network resource can be accessed
- by any tenant or not.
diff --git a/website/source/docs/providers/openstack/index.html.markdown b/website/source/docs/providers/openstack/index.html.markdown
deleted file mode 100644
index 378af04c1..000000000
--- a/website/source/docs/providers/openstack/index.html.markdown
+++ /dev/null
@@ -1,187 +0,0 @@
----
-layout: "openstack"
-page_title: "Provider: OpenStack"
-sidebar_current: "docs-openstack-index"
-description: |-
- The OpenStack provider is used to interact with the many resources supported by OpenStack. The provider needs to be configured with the proper credentials before it can be used.
----
-
-# OpenStack Provider
-
-The OpenStack provider is used to interact with the
-many resources supported by OpenStack. The provider needs to be configured
-with the proper credentials before it can be used.
-
-Use the navigation to the left to read about the available resources.
-
-## Example Usage
-
-```hcl
-# Configure the OpenStack Provider
-provider "openstack" {
- user_name = "admin"
- tenant_name = "admin"
- password = "pwd"
- auth_url = "http://myauthurl:5000/v2.0"
-}
-
-# Create a web server
-resource "openstack_compute_instance_v2" "test-server" {
- # ...
-}
-```
-
-## Configuration Reference
-
-The following arguments are supported:
-
-* `auth_url` - (Required) The Identity authentication URL. If omitted, the
- `OS_AUTH_URL` environment variable is used.
-
-* `user_name` - (Optional) The Username to login with. If omitted, the
- `OS_USERNAME` environment variable is used.
-
-* `user_id` - (Optional) The User ID to login with. If omitted, the
- `OS_USER_ID` environment variable is used.
-
-* `tenant_id` - (Optional) The ID of the Tenant (Identity v2) or Project
- (Identity v3) to login with. If omitted, the `OS_TENANT_ID` or
- `OS_PROJECT_ID` environment variables are used.
-
-* `tenant_name` - (Optional) The Name of the Tenant (Identity v2) or Project
- (Identity v3) to login with. If omitted, the `OS_TENANT_NAME` or
- `OS_PROJECT_NAME` environment variable are used.
-
-* `password` - (Optional) The Password to login with. If omitted, the
- `OS_PASSWORD` environment variable is used.
-
-* `token` - (Optional; Required if not using `user_name` and `password`)
- A token is an expiring, temporary means of access issued via the Keystone
- service. By specifying a token, you do not have to specify a username/password
- combination, since the token was already created by a username/password out of
- band of Terraform. If omitted, the `OS_AUTH_TOKEN` environment variable is used.
-
-* `domain_id` - (Optional) The ID of the Domain to scope to (Identity v3). If
- If omitted, the following environment variables are checked (in this order):
- `OS_USER_DOMAIN_ID`, `OS_PROJECT_DOMAIN_ID`, `OS_DOMAIN_ID`.
-
-* `domain_name` - (Optional) The Name of the Domain to scope to (Identity v3).
- If omitted, the following environment variables are checked (in this order):
- `OS_USER_DOMAIN_NAME`, `OS_PROJECT_DOMAIN_NAME`, `OS_DOMAIN_NAME`,
- `DEFAULT_DOMAIN`.
-
-* `insecure` - (Optional) Trust self-signed SSL certificates. If omitted, the
- `OS_INSECURE` environment variable is used.
-
-* `cacert_file` - (Optional) Specify a custom CA certificate when communicating
- over SSL. You can specify either a path to the file or the contents of the
- certificate. If omitted, the `OS_CACERT` environment variable is used.
-
-* `cert` - (Optional) Specify client certificate file for SSL client
- authentication. You can specify either a path to the file or the contents of
- the certificate. If omitted the `OS_CERT` environment variable is used.
-
-* `key` - (Optional) Specify client private key file for SSL client
- authentication. You can specify either a path to the file or the contents of
- the key. If omitted the `OS_KEY` environment variable is used.
-
-* `endpoint_type` - (Optional) Specify which type of endpoint to use from the
- service catalog. It can be set using the OS_ENDPOINT_TYPE environment
- variable. If not set, public endpoints is used.
-
-* `swauth` - (Optional) Set to `true` to authenticate against Swauth, a
- Swift-native authentication system. If omitted, the `OS_SWAUTH` environment
- variable is used. You must also set `username` to the Swauth/Swift username
- such as `username:project`. Set the `password` to the Swauth/Swift key.
- Finally, set `auth_url` as the location of the Swift service. Note that this
- will only work when used with the OpenStack Object Storage resources.
-
-## Additional Logging
-
-This provider has the ability to log all HTTP requests and responses between
-Terraform and the OpenStack cloud which is useful for troubleshooting and
-debugging.
-
-To enable these logs, set the `OS_DEBUG` environment variable to `1` along
-with the usual `TF_LOG=DEBUG` environment variable:
-
-```shell
-$ OS_DEBUG=1 TF_LOG=DEBUG terraform apply
-```
-
-If you submit these logs with a bug report, please ensure any sensitive
-information has been scrubbed first!
-
-## Rackspace Compatibility
-
-Using this OpenStack provider with Rackspace is not supported and not
-guaranteed to work; however, users have reported success with the
-following notes in mind:
-
-* Interacting with instances has been seen to work. Interacting with
-all other resources is either untested or known to not work.
-
-* Use your _password_ instead of your Rackspace API KEY.
-
-* Explicitly define the public and private networks in your
-instances as shown below:
-
-```
-resource "openstack_compute_instance_v2" "my_instance" {
- name = "my_instance"
- region = "DFW"
- image_id = "fabe045f-43f8-4991-9e6c-5cabd617538c"
- flavor_id = "general1-4"
- key_pair = "provisioning_key"
-
- network {
- uuid = "00000000-0000-0000-0000-000000000000"
- name = "public"
- }
-
- network {
- uuid = "11111111-1111-1111-1111-111111111111"
- name = "private"
- }
-}
-```
-
-If you try using this provider with Rackspace and run into bugs, you
-are welcomed to open a bug report / issue on Github, but please keep
-in mind that this is unsupported and the reported bug may not be
-able to be fixed.
-
-If you have successfully used this provider with Rackspace and can
-add any additional comments, please let us know.
-
-## Testing and Development
-
-In order to run the Acceptance Tests for development, the following environment
-variables must also be set:
-
-* `OS_REGION_NAME` - The region in which to create the server instance.
-
-* `OS_IMAGE_ID` or `OS_IMAGE_NAME` - a UUID or name of an existing image in
- Glance.
-
-* `OS_FLAVOR_ID` or `OS_FLAVOR_NAME` - an ID or name of an existing flavor.
-
-* `OS_POOL_NAME` - The name of a Floating IP pool.
-
-* `OS_NETWORK_ID` - The UUID of a network in your test environment.
-
-* `OS_EXTGW_ID` - The UUID of the external gateway.
-
-You should be able to use any OpenStack environment to develop on as long as the
-above environment variables are set.
-
-Most of Terraform's OpenStack support is done in a standardized Packstack
-all-in-one environment. You can find the scripts to build this environment
-[here](https://github.com/jtopjian/terraform-devstack/tree/master/packstack-standard).
-The included `main.tf` file will need to be modified for your specific
-environment. Once it's up and running, you will have access to a standard,
-up-to-date OpenStack environment with the latest OpenStack services.
-
-If you require access to deprecated services, such as Keystone v2 and
-LBaaS v1, you can use the "legacy" environment
-[here](https://github.com/jtopjian/terraform-devstack/tree/master/packstack-legacy).
diff --git a/website/source/docs/providers/openstack/r/blockstorage_volume_attach_v2.html.markdown b/website/source/docs/providers/openstack/r/blockstorage_volume_attach_v2.html.markdown
deleted file mode 100644
index f7d078b09..000000000
--- a/website/source/docs/providers/openstack/r/blockstorage_volume_attach_v2.html.markdown
+++ /dev/null
@@ -1,131 +0,0 @@
----
-layout: "openstack"
-page_title: "OpenStack: openstack_blockstorage_volume_attach_v2"
-sidebar_current: "docs-openstack-resource-blockstorage-volume-attach-v2"
-description: |-
- Creates an attachment connection to a Block Storage volume
----
-
-# openstack\_blockstorage\_volume\_attach\_v2
-
-This resource is experimental and may be removed in the future! Feedback
-is requested if you find this resource useful or if you find any problems
-with it.
-
-Creates a general purpose attachment connection to a Block
-Storage volume using the OpenStack Block Storage (Cinder) v2 API.
-Depending on your Block Storage service configuration, this
-resource can assist in attaching a volume to a non-OpenStack resource
-such as a bare-metal server or a remote virtual machine in a
-different cloud provider.
-
-This does not actually attach a volume to an instance. Please use
-the `openstack_compute_volume_attach_v2` resource for that.
-
-## Example Usage
-
-```hcl
-resource "openstack_blockstorage_volume_v2" "volume_1" {
- name = "volume_1"
- size = 1
-}
-
-resource "openstack_blockstorage_volume_attach_v2" "va_1" {
- volume_id = "${openstack_blockstorage_volume_v2.volume_1.id}"
- device = "auto"
- host_name = "devstack"
- ip_address = "192.168.255.10"
- initiator = "iqn.1993-08.org.debian:01:e9861fb1859"
- os_type = "linux2"
- platform = "x86_64"
-}
-```
-
-## Argument Reference
-
-The following arguments are supported:
-
-* `region` - (Required) The region in which to obtain the V2 Block Storage
- client. A Block Storage client is needed to create a volume attachment.
- If omitted, the `OS_REGION_NAME` environment variable is used. Changing
- this creates a new volume attachment.
-
-* `attach_mode` - (Optional) Specify whether to attach the volume as Read-Only
- (`ro`) or Read-Write (`rw`). Only values of `ro` and `rw` are accepted.
- If left unspecified, the Block Storage API will apply a default of `rw`.
-
-* `device` - (Optional) The device to tell the Block Storage service this
- volume will be attached as. This is purely for informational purposes.
- You can specify `auto` or a device such as `/dev/vdc`.
-
-* `host_name` - (Required) The host to attach the volume to.
-
-* `initiator` - (Optional) The iSCSI initiator string to make the connection.
-
-* `ip_address` - (Optional) The IP address of the `host_name` above.
-
-* `multipath` - (Optional) Whether to connect to this volume via multipath.
-
-* `os_type` - (Optional) The iSCSI initiator OS type.
-
-* `platform` - (Optional) The iSCSI initiator platform.
-
-* `volume_id` - (Required) The ID of the Volume to attach to an Instance.
-
-* `wwpn` - (Optional) An array of wwpn strings. Used for Fibre Channel
- connections.
-
-* `wwnn` - (Optional) A wwnn name. Used for Fibre Channel connections.
-
-## Attributes Reference
-
-In addition to the above, the following attributes are exported:
-
-* `data` - This is a map of key/value pairs that contain the connection
- information. You will want to pass this information to a provisioner
- script to finalize the connection. See below for more information.
-
-* `driver_volume_type` - The storage driver that the volume is based on.
-
-* `mount_point_base` - A mount point base name for shared storage.
-
-## Volume Connection Data
-
-Upon creation of this resource, a `data` exported attribute will be available.
-This attribute is a set of key/value pairs that contains the information
-required to complete the block storage connection.
-
-As an example, creating an iSCSI-based volume will return the following:
-
-```
-data.access_mode = rw
-data.auth_method = CHAP
-data.auth_password = xUhbGKQ8QCwKmHQ2
-data.auth_username = Sphn5X4EoyFUUMYVYSA4
-data.target_iqn = iqn.2010-10.org.openstack:volume-2d87ed25-c312-4f42-be1d-3b36b014561d
-data.target_portal = 192.168.255.10:3260
-data.volume_id = 2d87ed25-c312-4f42-be1d-3b36b014561d
-```
-
-This information can then be fed into a provisioner or a template shell script,
-where the final result would look something like:
-
-```
-iscsiadm -m node -T ${self.data.target_iqn} -p ${self.data.target_portal} --interface default --op new
-iscsiadm -m node -T ${self.data.target_iqn} -p ${self.data.target_portal} --op update -n node.session.auth.authmethod -v ${self.data.auth_method}
-iscsiadm -m node -T ${self.data.target_iqn} -p ${self.data.target_portal} --op update -n node.session.auth.username -v ${self.data.auth_username}
-iscsiadm -m node -T ${self.data.target_iqn} -p ${self.data.target_portal} --op update -n node.session.auth.password -v ${self.data.auth_password}
-iscsiadm -m node -T ${self.data.target_iqn} -p ${self.data.target_portal} --login
-iscsiadm -m node -T ${self.data.target_iqn} -p ${self.data.target_portal} --op update -n node.startup -v automatic
-iscsiadm -m node -T ${self.data.target_iqn} -p ${self.data.target_portal} --rescan
-```
-
-The contents of `data` will vary from each Block Storage service. You must have
-a good understanding of how the service is configured and how to make the
-appropriate final connection. However, if used correctly, this has the
-flexibility to be able to attach OpenStack Block Storage volumes to
-non-OpenStack resources.
-
-## Import
-
-It is not possible to import this resource.
diff --git a/website/source/docs/providers/openstack/r/blockstorage_volume_v1.html.markdown b/website/source/docs/providers/openstack/r/blockstorage_volume_v1.html.markdown
deleted file mode 100644
index c0282bf7b..000000000
--- a/website/source/docs/providers/openstack/r/blockstorage_volume_v1.html.markdown
+++ /dev/null
@@ -1,83 +0,0 @@
----
-layout: "openstack"
-page_title: "OpenStack: openstack_blockstorage_volume_v1"
-sidebar_current: "docs-openstack-resource-blockstorage-volume-v1"
-description: |-
- Manages a V1 volume resource within OpenStack.
----
-
-# openstack\_blockstorage\_volume_v1
-
-Manages a V1 volume resource within OpenStack.
-
-## Example Usage
-
-```hcl
-resource "openstack_blockstorage_volume_v1" "volume_1" {
- region = "RegionOne"
- name = "tf-test-volume"
- description = "first test volume"
- size = 3
-}
-```
-
-## Argument Reference
-
-The following arguments are supported:
-
-* `region` - (Required) The region in which to create the volume. If
- omitted, the `OS_REGION_NAME` environment variable is used. Changing this
- creates a new volume.
-
-* `size` - (Required) The size of the volume to create (in gigabytes). Changing
- this creates a new volume.
-
-* `name` - (Optional) A unique name for the volume. Changing this updates the
- volume's name.
-
-* `description` - (Optional) A description of the volume. Changing this updates
- the volume's description.
-
-* `availability_zone` - (Optional) The availability zone for the volume.
- Changing this creates a new volume.
-
-* `image_id` - (Optional) The image ID from which to create the volume.
- Changing this creates a new volume.
-
-* `snapshot_id` - (Optional) The snapshot ID from which to create the volume.
- Changing this creates a new volume.
-
-* `source_vol_id` - (Optional) The volume ID from which to create the volume.
- Changing this creates a new volume.
-
-* `metadata` - (Optional) Metadata key/value pairs to associate with the volume.
- Changing this updates the existing volume metadata.
-
-* `volume_type` - (Optional) The type of volume to create.
- Changing this creates a new volume.
-
-## Attributes Reference
-
-The following attributes are exported:
-
-* `region` - See Argument Reference above.
-* `size` - See Argument Reference above.
-* `name` - See Argument Reference above.
-* `description` - See Argument Reference above.
-* `availability_zone` - See Argument Reference above.
-* `image_id` - See Argument Reference above.
-* `source_vol_id` - See Argument Reference above.
-* `snapshot_id` - See Argument Reference above.
-* `metadata` - See Argument Reference above.
-* `volume_type` - See Argument Reference above.
-* `attachment` - If a volume is attached to an instance, this attribute will
- display the Attachment ID, Instance ID, and the Device as the Instance
- sees it.
-
-## Import
-
-Volumes can be imported using the `id`, e.g.
-
-```
-$ terraform import openstack_blockstorage_volume_v1.volume_1 ea257959-eeb1-4c10-8d33-26f0409a755d
-```
diff --git a/website/source/docs/providers/openstack/r/blockstorage_volume_v2.html.markdown b/website/source/docs/providers/openstack/r/blockstorage_volume_v2.html.markdown
deleted file mode 100644
index 951084d4d..000000000
--- a/website/source/docs/providers/openstack/r/blockstorage_volume_v2.html.markdown
+++ /dev/null
@@ -1,88 +0,0 @@
----
-layout: "openstack"
-page_title: "OpenStack: openstack_blockstorage_volume_v2"
-sidebar_current: "docs-openstack-resource-blockstorage-volume-v2"
-description: |-
- Manages a V2 volume resource within OpenStack.
----
-
-# openstack\_blockstorage\_volume_v2
-
-Manages a V2 volume resource within OpenStack.
-
-## Example Usage
-
-```hcl
-resource "openstack_blockstorage_volume_v2" "volume_1" {
- region = "RegionOne"
- name = "volume_1"
- description = "first test volume"
- size = 3
-}
-```
-
-## Argument Reference
-
-The following arguments are supported:
-
-* `region` - (Required) The region in which to create the volume. If
- omitted, the `OS_REGION_NAME` environment variable is used. Changing this
- creates a new volume.
-
-* `size` - (Required) The size of the volume to create (in gigabytes). Changing
- this creates a new volume.
-
-* `availability_zone` - (Optional) The availability zone for the volume.
- Changing this creates a new volume.
-
-* `consistency_group_id` - (Optional) The consistency group to place the volume
- in.
-
-* `description` - (Optional) A description of the volume. Changing this updates
- the volume's description.
-
-* `image_id` - (Optional) The image ID from which to create the volume.
- Changing this creates a new volume.
-
-* `metadata` - (Optional) Metadata key/value pairs to associate with the volume.
- Changing this updates the existing volume metadata.
-
-* `name` - (Optional) A unique name for the volume. Changing this updates the
- volume's name.
-
-* `snapshot_id` - (Optional) The snapshot ID from which to create the volume.
- Changing this creates a new volume.
-
-* `source_replica` - (Optional) The volume ID to replicate with.
-
-* `source_vol_id` - (Optional) The volume ID from which to create the volume.
- Changing this creates a new volume.
-
-* `volume_type` - (Optional) The type of volume to create.
- Changing this creates a new volume.
-
-## Attributes Reference
-
-The following attributes are exported:
-
-* `region` - See Argument Reference above.
-* `size` - See Argument Reference above.
-* `name` - See Argument Reference above.
-* `description` - See Argument Reference above.
-* `availability_zone` - See Argument Reference above.
-* `image_id` - See Argument Reference above.
-* `source_vol_id` - See Argument Reference above.
-* `snapshot_id` - See Argument Reference above.
-* `metadata` - See Argument Reference above.
-* `volume_type` - See Argument Reference above.
-* `attachment` - If a volume is attached to an instance, this attribute will
- display the Attachment ID, Instance ID, and the Device as the Instance
- sees it.
-
-## Import
-
-Volumes can be imported using the `id`, e.g.
-
-```
-$ terraform import openstack_blockstorage_volume_v2.volume_1 ea257959-eeb1-4c10-8d33-26f0409a755d
-```
diff --git a/website/source/docs/providers/openstack/r/compute_floatingip_associate_v2.html.markdown b/website/source/docs/providers/openstack/r/compute_floatingip_associate_v2.html.markdown
deleted file mode 100644
index 698869f6e..000000000
--- a/website/source/docs/providers/openstack/r/compute_floatingip_associate_v2.html.markdown
+++ /dev/null
@@ -1,98 +0,0 @@
----
-layout: "openstack"
-page_title: "OpenStack: openstack_compute_floatingip_associate_v2"
-sidebar_current: "docs-openstack-resource-compute-floatingip-associate-v2"
-description: |-
- Associate a floating IP to an instance
----
-
-# openstack\_compute\_floatingip_associate_v2
-
-Associate a floating IP to an instance. This can be used instead of the
-`floating_ip` options in `openstack_compute_instance_v2`.
-
-## Example Usage
-
-### Automatically detect the correct network
-
-```hcl
-resource "openstack_compute_instance_v2" "instance_1" {
- name = "instance_1"
- image_id = "ad091b52-742f-469e-8f3c-fd81cadf0743"
- flavor_id = 3
- key_pair = "my_key_pair_name"
- security_groups = ["default"]
-}
-
-resource "openstack_networking_floatingip_v2" "fip_1" {
- pool = "my_pool"
-}
-
-resource "openstack_compute_floatingip_associate_v2" "fip_1" {
- floating_ip = "${openstack_networking_floatingip_v2.fip_1.address}"
- instance_id = "${openstack_compute_instance_v2.instance_1.id}"
-}
-```
-
-### Explicitly set the network to attach to
-
-```hcl
-resource "openstack_compute_instance_v2" "instance_1" {
- name = "instance_1"
- image_id = "ad091b52-742f-469e-8f3c-fd81cadf0743"
- flavor_id = 3
- key_pair = "my_key_pair_name"
- security_groups = ["default"]
-
- network {
- name = "my_network"
- }
-
- network {
- name = "default"
- }
-}
-
-resource "openstack_networking_floatingip_v2" "fip_1" {
- pool = "my_pool"
-}
-
-resource "openstack_compute_floatingip_associate_v2" "fip_1" {
- floating_ip = "${openstack_networking_floatingip_v2.fip_1.address}"
- instance_id = "${openstack_compute_instance_v2.instance_1.id}"
- fixed_ip = "${openstack_compute_instance_v2.instance_1.network.1.fixed_ip_v4}"
-}
-```
-
-## Argument Reference
-
-The following arguments are supported:
-
-* `region` - (Required) The region in which to obtain the V2 Compute client.
- Keypairs are associated with accounts, but a Compute client is needed to
- create one. If omitted, the `OS_REGION_NAME` environment variable is used.
- Changing this creates a new floatingip_associate.
-
-* `floating_ip` - (Required) The floating IP to associate.
-
-* `instance_id` - (Required) The instance to associte the floating IP with.
-
-* `fixed_ip` - (Optional) The specific IP address to direct traffic to.
-
-## Attributes Reference
-
-The following attributes are exported:
-
-* `region` - See Argument Reference above.
-* `floating_ip` - See Argument Reference above.
-* `instance_id` - See Argument Reference above.
-* `fixed_ip` - See Argument Reference above.
-
-## Import
-
-This resource can be imported by specifying all three arguments, separated
-by a forward slash:
-
-```
-$ terraform import openstack_compute_floatingip_associate_v2.fip_1 //
-```
diff --git a/website/source/docs/providers/openstack/r/compute_floatingip_v2.html.markdown b/website/source/docs/providers/openstack/r/compute_floatingip_v2.html.markdown
deleted file mode 100644
index c00945e13..000000000
--- a/website/source/docs/providers/openstack/r/compute_floatingip_v2.html.markdown
+++ /dev/null
@@ -1,53 +0,0 @@
----
-layout: "openstack"
-page_title: "OpenStack: openstack_compute_floatingip_v2"
-sidebar_current: "docs-openstack-resource-compute-floatingip-v2"
-description: |-
- Manages a V2 floating IP resource within OpenStack Nova (compute).
----
-
-# openstack\_compute\_floatingip_v2
-
-Manages a V2 floating IP resource within OpenStack Nova (compute)
-that can be used for compute instances.
-These are similar to Neutron (networking) floating IP resources,
-but only networking floating IPs can be used with load balancers.
-
-## Example Usage
-
-```hcl
-resource "openstack_compute_floatingip_v2" "floatip_1" {
- pool = "public"
-}
-```
-
-## Argument Reference
-
-The following arguments are supported:
-
-* `region` - (Required) The region in which to obtain the V2 Compute client.
- A Compute client is needed to create a floating IP that can be used with
- a compute instance. If omitted, the `OS_REGION_NAME` environment variable
- is used. Changing this creates a new floating IP (which may or may not
- have a different address).
-
-* `pool` - (Required) The name of the pool from which to obtain the floating
- IP. Changing this creates a new floating IP.
-
-## Attributes Reference
-
-The following attributes are exported:
-
-* `region` - See Argument Reference above.
-* `pool` - See Argument Reference above.
-* `address` - The actual floating IP address itself.
-* `fixed_ip` - The fixed IP address corresponding to the floating IP.
-* `instance_id` - UUID of the compute instance associated with the floating IP.
-
-## Import
-
-Floating IPs can be imported using the `id`, e.g.
-
-```
-$ terraform import openstack_compute_floatingip_v2.floatip_1 89c60255-9bd6-460c-822a-e2b959ede9d2
-```
diff --git a/website/source/docs/providers/openstack/r/compute_instance_v2.html.markdown b/website/source/docs/providers/openstack/r/compute_instance_v2.html.markdown
deleted file mode 100644
index 7eb38e85a..000000000
--- a/website/source/docs/providers/openstack/r/compute_instance_v2.html.markdown
+++ /dev/null
@@ -1,587 +0,0 @@
----
-layout: "openstack"
-page_title: "OpenStack: openstack_compute_instance_v2"
-sidebar_current: "docs-openstack-resource-compute-instance-v2"
-description: |-
- Manages a V2 VM instance resource within OpenStack.
----
-
-# openstack\_compute\_instance_v2
-
-Manages a V2 VM instance resource within OpenStack.
-
-## Example Usage
-
-### Basic Instance
-
-```hcl
-resource "openstack_compute_instance_v2" "basic" {
- name = "basic"
- image_id = "ad091b52-742f-469e-8f3c-fd81cadf0743"
- flavor_id = "3"
- key_pair = "my_key_pair_name"
- security_groups = ["default"]
-
- metadata {
- this = "that"
- }
-
- network {
- name = "my_network"
- }
-}
-```
-
-### Instance With Attached Volume
-
-```hcl
-resource "openstack_blockstorage_volume_v2" "myvol" {
- name = "myvol"
- size = 1
-}
-
-resource "openstack_compute_instance_v2" "myinstance" {
- name = "myinstance"
- image_id = "ad091b52-742f-469e-8f3c-fd81cadf0743"
- flavor_id = "3"
- key_pair = "my_key_pair_name"
- security_groups = ["default"]
-
- network {
- name = "my_network"
- }
-}
-
-resource "openstack_compute_volume_attach_v2" "attached" {
- compute_id = "${openstack_compute_instance_v2.myinstance.id}"
- volume_id = "${openstack_blockstorage_volume_v2.myvol.id}"
-}
-```
-
-### Boot From Volume
-
-```hcl
-resource "openstack_compute_instance_v2" "boot-from-volume" {
- name = "boot-from-volume"
- flavor_id = "3"
- key_pair = "my_key_pair_name"
- security_groups = ["default"]
-
- block_device {
- uuid = ""
- source_type = "image"
- volume_size = 5
- boot_index = 0
- destination_type = "volume"
- delete_on_termination = true
- }
-
- network {
- name = "my_network"
- }
-}
-```
-
-### Boot From an Existing Volume
-
-```hcl
-resource "openstack_blockstorage_volume_v1" "myvol" {
- name = "myvol"
- size = 5
- image_id = ""
-}
-
-resource "openstack_compute_instance_v2" "boot-from-volume" {
- name = "bootfromvolume"
- flavor_id = "3"
- key_pair = "my_key_pair_name"
- security_groups = ["default"]
-
- block_device {
- uuid = "${openstack_blockstorage_volume_v1.myvol.id}"
- source_type = "volume"
- boot_index = 0
- destination_type = "volume"
- delete_on_termination = true
- }
-
- network {
- name = "my_network"
- }
-}
-```
-
-### Boot Instance, Create Volume, and Attach Volume as a Block Device
-
-```hcl
-resource "openstack_compute_instance_v2" "instance_1" {
- name = "instance_1"
- image_id = ""
- flavor_id = "3"
- key_pair = "my_key_pair_name"
- security_groups = ["default"]
-
- block_device {
- uuid = ""
- source_type = "image"
- destination_type = "local"
- boot_index = 0
- delete_on_termination = true
- }
-
- block_device {
- source_type = "blank"
- destination_type = "volume"
- volume_size = 1
- boot_index = 1
- delete_on_termination = true
- }
-}
-```
-
-### Boot Instance and Attach Existing Volume as a Block Device
-
-```hcl
-resource "openstack_blockstorage_volume_v2" "volume_1" {
- name = "volume_1"
- size = 1
-}
-
-resource "openstack_compute_instance_v2" "instance_1" {
- name = "instance_1"
- image_id = ""
- flavor_id = "3"
- key_pair = "my_key_pair_name"
- security_groups = ["default"]
-
- block_device {
- uuid = ""
- source_type = "image"
- destination_type = "local"
- boot_index = 0
- delete_on_termination = true
- }
-
- block_device {
- uuid = "${openstack_blockstorage_volume_v2.volume_1.id}"
- source_type = "volume"
- destination_type = "volume"
- boot_index = 1
- delete_on_termination = true
- }
-}
-```
-
-### Instance With Multiple Networks
-
-```hcl
-resource "openstack_networking_floatingip_v2" "myip" {
- pool = "my_pool"
-}
-
-resource "openstack_compute_instance_v2" "multi-net" {
- name = "multi-net"
- image_id = "ad091b52-742f-469e-8f3c-fd81cadf0743"
- flavor_id = "3"
- key_pair = "my_key_pair_name"
- security_groups = ["default"]
-
- network {
- name = "my_first_network"
- }
-
- network {
- name = "my_second_network"
- }
-}
-
-resource "openstack_compute_floatingip_associate_v2" "myip" {
- floating_ip = "${openstack_networking_floatingip_v2.myip.address}"
- instance_id = "${openstack_compute_instance_v2.multi-net.id}"
- fixed_ip = "${openstack_compute_instance_v2.multi-net.network.1.fixed_ip_v4}"
-}
-```
-
-### Instance With Personality
-
-```hcl
-resource "openstack_compute_instance_v2" "personality" {
- name = "personality"
- image_id = "ad091b52-742f-469e-8f3c-fd81cadf0743"
- flavor_id = "3"
- key_pair = "my_key_pair_name"
- security_groups = ["default"]
-
- personality {
- file = "/path/to/file/on/instance.txt"
- content = "contents of file"
- }
-
- network {
- name = "my_network"
- }
-}
-```
-
-### Instance with Multiple Ephemeral Disks
-
-```hcl
-resource "openstack_compute_instance_v2" "multi-eph" {
- name = "multi_eph"
- image_id = "ad091b52-742f-469e-8f3c-fd81cadf0743"
- flavor_id = "3"
- key_pair = "my_key_pair_name"
- security_groups = ["default"]
-
- block_device {
- boot_index = 0
- delete_on_termination = true
- destination_type = "local"
- source_type = "image"
- uuid = ""
- }
-
- block_device {
- boot_index = -1
- delete_on_termination = true
- destination_type = "local"
- source_type = "blank"
- volume_size = 1
- }
-
- block_device {
- boot_index = -1
- delete_on_termination = true
- destination_type = "local"
- source_type = "blank"
- volume_size = 1
- }
-}
-```
-
-### Instance with User Data (cloud-init)
-
-```hcl
-resource "openstack_compute_instance_v2" "instance_1" {
- name = "basic"
- image_id = "ad091b52-742f-469e-8f3c-fd81cadf0743"
- flavor_id = "3"
- key_pair = "my_key_pair_name"
- security_groups = ["default"]
- user_data = "#cloud-config\nhostname: instance_1.example.com\nfqdn: instance_1.example.com"
-
- network {
- name = "my_network"
- }
-}
-```
-
-`user_data` can come from a variety of sources: inline, read in from the `file`
-function, or the `template_cloudinit_config` resource.
-
-## Argument Reference
-
-The following arguments are supported:
-
-* `region` - (Required) The region in which to create the server instance. If
- omitted, the `OS_REGION_NAME` environment variable is used. Changing this
- creates a new server.
-
-* `name` - (Required) A unique name for the resource.
-
-* `image_id` - (Optional; Required if `image_name` is empty and not booting
- from a volume. Do not specify if booting from a volume.) The image ID of
- the desired image for the server. Changing this creates a new server.
-
-* `image_name` - (Optional; Required if `image_id` is empty and not booting
- from a volume. Do not specify if booting from a volume.) The name of the
- desired image for the server. Changing this creates a new server.
-
-* `flavor_id` - (Optional; Required if `flavor_name` is empty) The flavor ID of
- the desired flavor for the server. Changing this resizes the existing server.
-
-* `flavor_name` - (Optional; Required if `flavor_id` is empty) The name of the
- desired flavor for the server. Changing this resizes the existing server.
-
-* `floating_ip` - (Deprecated) A *Compute* Floating IP that will be associated
- with the Instance. The Floating IP must be provisioned already. See *Notes*
- for more information about Floating IPs.
-
-* `user_data` - (Optional) The user data to provide when launching the instance.
- Changing this creates a new server.
-
-* `security_groups` - (Optional) An array of one or more security group names
- to associate with the server. Changing this results in adding/removing
- security groups from the existing server. *Note*: When attaching the
- instance to networks using Ports, place the security groups on the Port
- and not the instance.
-
-* `availability_zone` - (Optional) The availability zone in which to create
- the server. Changing this creates a new server.
-
-* `network` - (Optional) An array of one or more networks to attach to the
- instance. The network object structure is documented below. Changing this
- creates a new server.
-
-* `metadata` - (Optional) Metadata key/value pairs to make available from
- within the instance. Changing this updates the existing server metadata.
-
-* `config_drive` - (Optional) Whether to use the config_drive feature to
- configure the instance. Changing this creates a new server.
-
-* `admin_pass` - (Optional) The administrative password to assign to the server.
- Changing this changes the root password on the existing server.
-
-* `key_pair` - (Optional) The name of a key pair to put on the server. The key
- pair must already be created and associated with the tenant's account.
- Changing this creates a new server.
-
-* `block_device` - (Optional) Configuration of block devices. The block_device
- structure is documented below. Changing this creates a new server.
- You can specify multiple block devices which will create an instance with
- multiple disks. This configuration is very flexible, so please see the
- following [reference](http://docs.openstack.org/developer/nova/block_device_mapping.html)
- for more information.
-
-* `volume` - (Deprecated) Attach an existing volume to the instance. The volume
- structure is described below. *Note*: This is no longer the recommended
- method of attaching a volume to an instance. Please see `block_device`
- (above) or the `openstack_compute_volume_attach_v2` and
- `openstack_blockstorage_volume_attach_v2` resources.
-
-* `scheduler_hints` - (Optional) Provide the Nova scheduler with hints on how
- the instance should be launched. The available hints are described below.
-
-* `personality` - (Optional) Customize the personality of an instance by
- defining one or more files and their contents. The personality structure
- is described below.
-
-* `stop_before_destroy` - (Optional) Whether to try stop instance gracefully
- before destroying it, thus giving chance for guest OS daemons to stop correctly.
- If instance doesn't stop within timeout, it will be destroyed anyway.
-
-* `force_delete` - (Optional) Whether to force the OpenStack instance to be
- forcefully deleted. This is useful for environments that have reclaim / soft
- deletion enabled.
-
-
-The `network` block supports:
-
-* `uuid` - (Required unless `port` or `name` is provided) The network UUID to
- attach to the server. Changing this creates a new server.
-
-* `name` - (Required unless `uuid` or `port` is provided) The human-readable
- name of the network. Changing this creates a new server.
-
-* `port` - (Required unless `uuid` or `name` is provided) The port UUID of a
- network to attach to the server. Changing this creates a new server.
-
-* `fixed_ip_v4` - (Optional) Specifies a fixed IPv4 address to be used on this
- network. Changing this creates a new server.
-
-* `fixed_ip_v6` - (Optional) Specifies a fixed IPv6 address to be used on this
- network. Changing this creates a new server.
-
-* `floating_ip` - (Deprecated) Specifies a floating IP address to be associated
- with this network. Cannot be combined with a top-level floating IP. See
- *Notes* for more information about Floating IPs.
-
-* `access_network` - (Optional) Specifies if this network should be used for
- provisioning access. Accepts true or false. Defaults to false.
-
-The `block_device` block supports:
-
-* `uuid` - (Required unless `source_type` is set to `"blank"` ) The UUID of
- the image, volume, or snapshot. Changing this creates a new server.
-
-* `source_type` - (Required) The source type of the device. Must be one of
- "blank", "image", "volume", or "snapshot". Changing this creates a new
- server.
-
-* `volume_size` - The size of the volume to create (in gigabytes). Required
- in the following combinations: source=image and destination=volume,
- source=blank and destination=local, and source=blank and destination=volume.
- Changing this creates a new server.
-
-* `boot_index` - (Optional) The boot index of the volume. It defaults to 0.
- Changing this creates a new server.
-
-* `destination_type` - (Optional) The type that gets created. Possible values
- are "volume" and "local". Changing this creates a new server.
-
-* `delete_on_termination` - (Optional) Delete the volume / block device upon
- termination of the instance. Defaults to false. Changing this creates a
- new server.
-
-The `volume` block supports:
-
-* `volume_id` - (Required) The UUID of the volume to attach.
-
-* `device` - (Optional) The device that the volume will be attached as. For
- example: `/dev/vdc`. Omit this option to allow the volume to be
- auto-assigned a device.
-
-The `scheduler_hints` block supports:
-
-* `group` - (Optional) A UUID of a Server Group. The instance will be placed
- into that group.
-
-* `different_host` - (Optional) A list of instance UUIDs. The instance will
- be scheduled on a different host than all other instances.
-
-* `same_host` - (Optional) A list of instance UUIDs. The instance will be
- scheduled on the same host of those specified.
-
-* `query` - (Optional) A conditional query that a compute node must pass in
- order to host an instance.
-
-* `target_cell` - (Optional) The name of a cell to host the instance.
-
-* `build_near_host_ip` - (Optional) An IP Address in CIDR form. The instance
- will be placed on a compute node that is in the same subnet.
-
-The `personality` block supports:
-
-* `file` - (Required) The absolute path of the destination file.
-
-* `contents` - (Required) The contents of the file. Limited to 255 bytes.
-
-## Attributes Reference
-
-The following attributes are exported:
-
-* `region` - See Argument Reference above.
-* `name` - See Argument Reference above.
-* `access_ip_v4` - The first detected Fixed IPv4 address _or_ the
- Floating IP.
-* `access_ip_v6` - The first detected Fixed IPv6 address.
-* `metadata` - See Argument Reference above.
-* `security_groups` - See Argument Reference above.
-* `flavor_id` - See Argument Reference above.
-* `flavor_name` - See Argument Reference above.
-* `network/uuid` - See Argument Reference above.
-* `network/name` - See Argument Reference above.
-* `network/port` - See Argument Reference above.
-* `network/fixed_ip_v4` - The Fixed IPv4 address of the Instance on that
- network.
-* `network/fixed_ip_v6` - The Fixed IPv6 address of the Instance on that
- network.
-* `network/floating_ip` - The Floating IP address of the Instance on that
- network.
-* `network/mac` - The MAC address of the NIC on that network.
-* `all_metadata` - Contains all instance metadata, even metadata not set
- by Terraform.
-
-## Notes
-
-### Floating IPs
-
-Specifying Floating IPs within the instance is now deprecated. Please use
-either the `openstack_compute_floatingip_associate_v2` resource or attach
-the floating IP to an `openstack_networking_port_v2` resource.
-
-Floating IPs can be associated in one of two ways:
-
-* You can specify a Floating IP address by using the top-level `floating_ip`
-attribute. This floating IP will be associated with either the network defined
-in the first `network` block or the default network if no `network` blocks are
-defined.
-
-* You can specify a Floating IP address by using the `floating_ip` attribute
-defined in the `network` block. Each `network` block can have its own floating
-IP address.
-
-Only one of the above methods can be used.
-
-### Multiple Ephemeral Disks
-
-It's possible to specify multiple `block_device` entries to create an instance
-with multiple ephemeral (local) disks. In order to create multiple ephemeral
-disks, the sum of the total amount of ephemeral space must be less than or
-equal to what the chosen flavor supports.
-
-The following example shows how to create an instance with multiple ephemeral
-disks:
-
-```
-resource "openstack_compute_instance_v2" "foo" {
- name = "terraform-test"
- security_groups = ["default"]
-
- block_device {
- boot_index = 0
- delete_on_termination = true
- destination_type = "local"
- source_type = "image"
- uuid = ""
- }
-
- block_device {
- boot_index = -1
- delete_on_termination = true
- destination_type = "local"
- source_type = "blank"
- volume_size = 1
- }
-
- block_device {
- boot_index = -1
- delete_on_termination = true
- destination_type = "local"
- source_type = "blank"
- volume_size = 1
- }
-}
-```
-
-### Instances and Ports
-
-Neutron Ports are a great feature and provide a lot of functionality. However,
-there are some notes to be aware of when mixing Instances and Ports:
-
-* When attaching an Instance to one or more networks using Ports, place the
-security groups on the Port and not the Instance. If you place the security
-groups on the Instance, the security groups will not be applied upon creation,
-but they will be applied upon a refresh. This is a known OpenStack bug.
-
-* Network IP information is not available within an instance for networks that
-are attached with Ports. This is mostly due to the flexibility Neutron Ports
-provide when it comes to IP addresses. For example, a Neutron Port can have
-multiple Fixed IP addresses associated with it. It's not possible to know which
-single IP address the user would want returned to the Instance's state
-information. Therefore, in order for a Provisioner to connect to an Instance
-via it's network Port, customize the `connection` information:
-
-```hcl
-resource "openstack_networking_port_v2" "port_1" {
- name = "port_1"
- admin_state_up = "true"
-
- network_id = "0a1d0a27-cffa-4de3-92c5-9d3fd3f2e74d"
-
- security_group_ids = [
- "2f02d20a-8dca-49b7-b26f-b6ce9fddaf4f",
- "ca1e5ed7-dae8-4605-987b-fadaeeb30461",
- ]
-}
-
-resource "openstack_compute_instance_v2" "instance_1" {
- name = "instance_1"
-
- network {
- port = "${openstack_networking_port_v2.port_1.id}"
- }
-
- connection {
- user = "root"
- host = "${openstack_networking_port_v2.port_1.fixed_ip.0.ip_address}"
- private_key = "~/path/to/key"
- }
-
- provisioner "remote-exec" {
- inline = [
- "echo terraform executed > /tmp/foo",
- ]
- }
-}
-```
diff --git a/website/source/docs/providers/openstack/r/compute_keypair_v2.html.markdown b/website/source/docs/providers/openstack/r/compute_keypair_v2.html.markdown
deleted file mode 100644
index 9f788060b..000000000
--- a/website/source/docs/providers/openstack/r/compute_keypair_v2.html.markdown
+++ /dev/null
@@ -1,53 +0,0 @@
----
-layout: "openstack"
-page_title: "OpenStack: openstack_compute_keypair_v2"
-sidebar_current: "docs-openstack-resource-compute-keypair-v2"
-description: |-
- Manages a V2 keypair resource within OpenStack.
----
-
-# openstack\_compute\_keypair_v2
-
-Manages a V2 keypair resource within OpenStack.
-
-## Example Usage
-
-```hcl
-resource "openstack_compute_keypair_v2" "test-keypair" {
- name = "my-keypair"
- public_key = "ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAABAQDAjpC1hwiOCCmKEWxJ4qzTTsJbKzndLotBCz5PcwtUnflmU+gHJtWMZKpuEGVi29h0A/+ydKek1O18k10Ff+4tyFjiHDQAnOfgWf7+b1yK+qDip3X1C0UPMbwHlTfSGWLGZqd9LvEFx9k3h/M+VtMvwR1lJ9LUyTAImnNjWG7TaIPmui30HvM2UiFEmqkr4ijq45MyX2+fLIePLRIF61p4whjHAQYufqyno3BS48icQb4p6iVEZPo4AE2o9oIyQvj2mx4dk5Y8CgSETOZTYDOR3rU2fZTRDRgPJDH9FWvQjF5tA0p3d9CoWWd2s6GKKbfoUIi8R/Db1BSPJwkqB"
-}
-```
-
-## Argument Reference
-
-The following arguments are supported:
-
-* `region` - (Required) The region in which to obtain the V2 Compute client.
- Keypairs are associated with accounts, but a Compute client is needed to
- create one. If omitted, the `OS_REGION_NAME` environment variable is used.
- Changing this creates a new keypair.
-
-* `name` - (Required) A unique name for the keypair. Changing this creates a new
- keypair.
-
-* `public_key` - (Required) A pregenerated OpenSSH-formatted public key.
- Changing this creates a new keypair.
-
-* `value_specs` - (Optional) Map of additional options.
-
-## Attributes Reference
-
-The following attributes are exported:
-
-* `region` - See Argument Reference above.
-* `name` - See Argument Reference above.
-* `public_key` - See Argument Reference above.
-
-## Import
-
-Keypairs can be imported using the `name`, e.g.
-
-```
-$ terraform import openstack_compute_keypair_v2.my-keypair test-keypair
-```
diff --git a/website/source/docs/providers/openstack/r/compute_secgroup_v2.html.markdown b/website/source/docs/providers/openstack/r/compute_secgroup_v2.html.markdown
deleted file mode 100644
index 7385524bb..000000000
--- a/website/source/docs/providers/openstack/r/compute_secgroup_v2.html.markdown
+++ /dev/null
@@ -1,126 +0,0 @@
----
-layout: "openstack"
-page_title: "OpenStack: openstack_compute_secgroup_v2"
-sidebar_current: "docs-openstack-resource-compute-secgroup-v2"
-description: |-
- Manages a V2 security group resource within OpenStack.
----
-
-# openstack\_compute\_secgroup_v2
-
-Manages a V2 security group resource within OpenStack.
-
-## Example Usage
-
-```hcl
-resource "openstack_compute_secgroup_v2" "secgroup_1" {
- name = "my_secgroup"
- description = "my security group"
-
- rule {
- from_port = 22
- to_port = 22
- ip_protocol = "tcp"
- cidr = "0.0.0.0/0"
- }
-
- rule {
- from_port = 80
- to_port = 80
- ip_protocol = "tcp"
- cidr = "0.0.0.0/0"
- }
-}
-```
-
-## Argument Reference
-
-The following arguments are supported:
-
-* `region` - (Required) The region in which to obtain the V2 Compute client.
- A Compute client is needed to create a security group. If omitted, the
- `OS_REGION_NAME` environment variable is used. Changing this creates a new
- security group.
-
-* `name` - (Required) A unique name for the security group. Changing this
- updates the `name` of an existing security group.
-
-* `description` - (Required) A description for the security group. Changing this
- updates the `description` of an existing security group.
-
-* `rule` - (Optional) A rule describing how the security group operates. The
- rule object structure is documented below. Changing this updates the
- security group rules. As shown in the example above, multiple rule blocks
- may be used.
-
-The `rule` block supports:
-
-* `from_port` - (Required) An integer representing the lower bound of the port
-range to open. Changing this creates a new security group rule.
-
-* `to_port` - (Required) An integer representing the upper bound of the port
-range to open. Changing this creates a new security group rule.
-
-* `ip_protocol` - (Required) The protocol type that will be allowed. Changing
-this creates a new security group rule.
-
-* `cidr` - (Optional) Required if `from_group_id` or `self` is empty. The IP range
-that will be the source of network traffic to the security group. Use 0.0.0.0/0
-to allow all IP addresses. Changing this creates a new security group rule. Cannot
-be combined with `from_group_id` or `self`.
-
-* `from_group_id` - (Optional) Required if `cidr` or `self` is empty. The ID of a
-group from which to forward traffic to the parent group. Changing this creates a
-new security group rule. Cannot be combined with `cidr` or `self`.
-
-* `self` - (Optional) Required if `cidr` and `from_group_id` is empty. If true,
-the security group itself will be added as a source to this ingress rule. Cannot
-be combined with `cidr` or `from_group_id`.
-
-## Attributes Reference
-
-The following attributes are exported:
-
-* `region` - See Argument Reference above.
-* `name` - See Argument Reference above.
-* `description` - See Argument Reference above.
-* `rule` - See Argument Reference above.
-
-## Notes
-
-### ICMP Rules
-
-When using ICMP as the `ip_protocol`, the `from_port` sets the ICMP _type_ and the `to_port` sets the ICMP _code_. To allow all ICMP types, set each value to `-1`, like so:
-
-```hcl
-rule {
- from_port = -1
- to_port = -1
- ip_protocol = "icmp"
- cidr = "0.0.0.0/0"
-}
-```
-
-A list of ICMP types and codes can be found [here](https://en.wikipedia.org/wiki/Internet_Control_Message_Protocol#Control_messages).
-
-### Referencing Security Groups
-
-When referencing a security group in a configuration (for example, a configuration creates a new security group and then needs to apply it to an instance being created in the same configuration), it is currently recommended to reference the security group by name and not by ID, like this:
-
-```hcl
-resource "openstack_compute_instance_v2" "test-server" {
- name = "tf-test"
- image_id = "ad091b52-742f-469e-8f3c-fd81cadf0743"
- flavor_id = "3"
- key_pair = "my_key_pair_name"
- security_groups = ["${openstack_compute_secgroup_v2.secgroup_1.name}"]
-}
-```
-
-## Import
-
-Security Groups can be imported using the `id`, e.g.
-
-```
-$ terraform import openstack_compute_secgroup_v2.my_secgroup 1bc30ee9-9d5b-4c30-bdd5-7f1e663f5edf
-```
diff --git a/website/source/docs/providers/openstack/r/compute_servergroup_v2.html.markdown b/website/source/docs/providers/openstack/r/compute_servergroup_v2.html.markdown
deleted file mode 100644
index 75f8f426d..000000000
--- a/website/source/docs/providers/openstack/r/compute_servergroup_v2.html.markdown
+++ /dev/null
@@ -1,63 +0,0 @@
----
-layout: "openstack"
-page_title: "OpenStack: openstack_compute_servergroup_v2"
-sidebar_current: "docs-openstack-resource-compute-servergroup-v2"
-description: |-
- Manages a V2 Server Group resource within OpenStack.
----
-
-# openstack\_compute\_servergroup_v2
-
-Manages a V2 Server Group resource within OpenStack.
-
-## Example Usage
-
-```hcl
-resource "openstack_compute_servergroup_v2" "test-sg" {
- name = "my-sg"
- policies = ["anti-affinity"]
-}
-```
-
-## Argument Reference
-
-The following arguments are supported:
-
-* `region` - (Required) The region in which to obtain the V2 Compute client.
- If omitted, the `OS_REGION_NAME` environment variable is used. Changing
- this creates a new server group.
-
-* `name` - (Required) A unique name for the server group. Changing this creates
- a new server group.
-
-* `policies` - (Required) The set of policies for the server group. Only two
- two policies are available right now, and both are mutually exclusive. See
- the Policies section for more information. Changing this creates a new
- server group.
-
-* `value_specs` - (Optional) Map of additional options.
-
-## Policies
-
-* `affinity` - All instances/servers launched in this group will be hosted on
- the same compute node.
-
-* `anti-affinity` - All instances/servers launched in this group will be
- hosted on different compute nodes.
-
-## Attributes Reference
-
-The following attributes are exported:
-
-* `region` - See Argument Reference above.
-* `name` - See Argument Reference above.
-* `policies` - See Argument Reference above.
-* `members` - The instances that are part of this server group.
-
-## Import
-
-Server Groups can be imported using the `id`, e.g.
-
-```
-$ terraform import openstack_compute_servergroup_v2.test-sg 1bc30ee9-9d5b-4c30-bdd5-7f1e663f5edf
-```
diff --git a/website/source/docs/providers/openstack/r/compute_volume_attach_v2.html.markdown b/website/source/docs/providers/openstack/r/compute_volume_attach_v2.html.markdown
deleted file mode 100644
index 1b9b2fdf1..000000000
--- a/website/source/docs/providers/openstack/r/compute_volume_attach_v2.html.markdown
+++ /dev/null
@@ -1,71 +0,0 @@
----
-layout: "openstack"
-page_title: "OpenStack: openstack_compute_volume_attach_v2"
-sidebar_current: "docs-openstack-resource-compute-volume-attach-v2"
-description: |-
- Attaches a Block Storage Volume to an Instance.
----
-
-# openstack\_compute\_volume_attach_v2
-
-Attaches a Block Storage Volume to an Instance using the OpenStack
-Compute (Nova) v2 API.
-
-## Example Usage
-
-```hcl
-resource "openstack_blockstorage_volume_v2" "volume_1" {
- name = "volume_1"
- size = 1
-}
-
-resource "openstack_compute_instance_v2" "instance_1" {
- name = "instance_1"
- security_groups = ["default"]
-}
-
-resource "openstack_compute_volume_attach_v2" "va_1" {
- instance_id = "${openstack_compute_instance_v2.instance_1.id}"
- volume_id = "${openstack_blockstorage_volume_v2.volume_1.id}"
-}
-```
-
-## Argument Reference
-
-The following arguments are supported:
-
-* `region` - (Required) The region in which to obtain the V2 Compute client.
- A Compute client is needed to create a volume attachment. If omitted, the
- `OS_REGION_NAME` environment variable is used. Changing this creates a
- new volume attachment.
-
-* `instance_id` - (Required) The ID of the Instance to attach the Volume to.
-
-* `volume_id` - (Required) The ID of the Volume to attach to an Instance.
-
-* `device` - (Optional) The device of the volume attachment (ex: `/dev/vdc`).
- _NOTE_: Being able to specify a device is dependent upon the hypervisor in
- use. There is a chance that the device specified in Terraform will not be
- the same device the hypervisor chose. If this happens, Terraform will wish
- to update the device upon subsequent applying which will cause the volume
- to be detached and reattached indefinitely. Please use with caution.
-
-## Attributes Reference
-
-The following attributes are exported:
-
-* `region` - See Argument Reference above.
-* `instance_id` - See Argument Reference above.
-* `volume_id` - See Argument Reference above.
-* `device` - See Argument Reference above. _NOTE_: The correctness of this
- information is dependent upon the hypervisor in use. In some cases, this
- should not be used as an authoritative piece of information.
-
-## Import
-
-Volume Attachments can be imported using the Instance ID and Volume ID
-separated by a slash, e.g.
-
-```
-$ terraform import openstack_compute_volume_attach_v2.va_1 89c60255-9bd6-460c-822a-e2b959ede9d2/45670584-225f-46c3-b33e-6707b589b666
-```
diff --git a/website/source/docs/providers/openstack/r/dns_recordset_v2.html.markdown b/website/source/docs/providers/openstack/r/dns_recordset_v2.html.markdown
deleted file mode 100644
index c8312e7f9..000000000
--- a/website/source/docs/providers/openstack/r/dns_recordset_v2.html.markdown
+++ /dev/null
@@ -1,82 +0,0 @@
----
-layout: "openstack"
-page_title: "OpenStack: openstack_dns_recordset_v2"
-sidebar_current: "docs-openstack-resource-dns-recordset-v2"
-description: |-
- Manages a DNS record set in the OpenStack DNS Service
----
-
-# openstack\_dns\_recordset_v2
-
-Manages a DNS record set in the OpenStack DNS Service.
-
-## Example Usage
-
-### Automatically detect the correct network
-
-```hcl
-resource "openstack_dns_zone_v2" "example_zone" {
- name = "example.com."
- email = "email2@example.com"
- description = "a zone"
- ttl = 6000
- type = "PRIMARY"
-}
-
-resource "openstack_dns_recordset_v2" "rs_example_com" {
- zone_id = "${openstack_dns_zone_v2.example_zone.id}"
- name = "rs.example.com."
- description = "An example record set"
- ttl = 3000
- type = "A"
- records = ["10.0.0.1"]
-}
-```
-
-## Argument Reference
-
-The following arguments are supported:
-
-* `region` - (Required) The region in which to obtain the V2 DNS client.
- If omitted, the `OS_REGION_NAME` environment variable is used.
- Changing this creates a new DNS record set.
-
-* `zone_id` - (Required) The ID of the zone in which to create the record set.
- Changing this creates a new DNS record set.
-
-* `name` - (Required) The name of the record set. Note the `.` at the end of the name.
- Changing this creates a new DNS record set.
-
-* `type` - (Optional) The type of record set. Examples: "A", "MX".
- Changing this creates a new DNS record set.
-
-* `ttl` - (Optional) The time to live (TTL) of the record set.
-
-* `description` - (Optional) A description of the record set.
-
-* `records` - (Optional) An array of DNS records.
-
-* `value_specs` - (Optional) Map of additional options. Changing this creates a
- new record set.
-
-## Attributes Reference
-
-The following attributes are exported:
-
-* `region` - See Argument Reference above.
-* `name` - See Argument Reference above.
-* `type` - See Argument Reference above.
-* `ttl` - See Argument Reference above.
-* `description` - See Argument Reference above.
-* `records` - See Argument Reference above.
-* `zone_id` - See Argument Reference above.
-* `value_specs` - See Argument Reference above.
-
-## Import
-
-This resource can be imported by specifying the zone ID and recordset ID,
-separated by a forward slash.
-
-```
-$ terraform import openstack_dns_recordset_v2.recordset_1 /
-```
diff --git a/website/source/docs/providers/openstack/r/dns_zone_v2.html.markdown b/website/source/docs/providers/openstack/r/dns_zone_v2.html.markdown
deleted file mode 100644
index 3b9269f36..000000000
--- a/website/source/docs/providers/openstack/r/dns_zone_v2.html.markdown
+++ /dev/null
@@ -1,77 +0,0 @@
----
-layout: "openstack"
-page_title: "OpenStack: openstack_dns_zone_v2"
-sidebar_current: "docs-openstack-resource-dns-zone-v2"
-description: |-
- Manages a DNS zone in the OpenStack DNS Service
----
-
-# openstack\_dns\_zone_v2
-
-Manages a DNS zone in the OpenStack DNS Service.
-
-## Example Usage
-
-### Automatically detect the correct network
-
-```hcl
-resource "openstack_dns_zone_v2" "example.com" {
- name = "example.com."
- email = "jdoe@example.com"
- description = "An example zone"
- ttl = 3000
- type = "PRIMARY"
-}
-```
-
-## Argument Reference
-
-The following arguments are supported:
-
-* `region` - (Required) The region in which to obtain the V2 Compute client.
- Keypairs are associated with accounts, but a Compute client is needed to
- create one. If omitted, the `OS_REGION_NAME` environment variable is used.
- Changing this creates a new DNS zone.
-
-* `name` - (Required) The name of the zone. Note the `.` at the end of the name.
- Changing this creates a new DNS zone.
-
-* `email` - (Optional) The email contact for the zone record.
-
-* `type` - (Optional) The type of zone. Can either be `PRIMARY` or `SECONDARY`.
- Changing this creates a new zone.
-
-* `attributes` - (Optional) Attributes for the DNS Service scheduler.
- Changing this creates a new zone.
-
-* `ttl` - (Optional) The time to live (TTL) of the zone.
-
-* `description` - (Optional) A description of the zone.
-
-* `masters` - (Optional) An array of master DNS servers. For when `type` is
- `SECONDARY`.
-
-* `value_specs` - (Optional) Map of additional options. Changing this creates a
- new zone.
-
-## Attributes Reference
-
-The following attributes are exported:
-
-* `region` - See Argument Reference above.
-* `name` - See Argument Reference above.
-* `email` - See Argument Reference above.
-* `type` - See Argument Reference above.
-* `attributes` - See Argument Reference above.
-* `ttl` - See Argument Reference above.
-* `description` - See Argument Reference above.
-* `masters` - See Argument Reference above.
-* `value_specs` - See Argument Reference above.
-
-## Import
-
-This resource can be imported by specifying the zone ID:
-
-```
-$ terraform import openstack_dns_zone_v2.zone_1
-```
diff --git a/website/source/docs/providers/openstack/r/fw_firewall_v1.html.markdown b/website/source/docs/providers/openstack/r/fw_firewall_v1.html.markdown
deleted file mode 100644
index 0cd99be02..000000000
--- a/website/source/docs/providers/openstack/r/fw_firewall_v1.html.markdown
+++ /dev/null
@@ -1,103 +0,0 @@
----
-layout: "openstack"
-page_title: "OpenStack: openstack_fw_firewall_v1"
-sidebar_current: "docs-openstack-resource-fw-firewall-v1"
-description: |-
- Manages a v1 firewall resource within OpenStack.
----
-
-# openstack\_fw\_firewall_v1
-
-Manages a v1 firewall resource within OpenStack.
-
-## Example Usage
-
-```hcl
-resource "openstack_fw_rule_v1" "rule_1" {
- name = "my-rule-1"
- description = "drop TELNET traffic"
- action = "deny"
- protocol = "tcp"
- destination_port = "23"
- enabled = "true"
-}
-
-resource "openstack_fw_rule_v1" "rule_2" {
- name = "my-rule-2"
- description = "drop NTP traffic"
- action = "deny"
- protocol = "udp"
- destination_port = "123"
- enabled = "false"
-}
-
-resource "openstack_fw_policy_v1" "policy_1" {
- name = "my-policy"
-
- rules = ["${openstack_fw_rule_v1.rule_1.id}",
- "${openstack_fw_rule_v1.rule_2.id}",
- ]
-}
-
-resource "openstack_fw_firewall_v1" "firewall_1" {
- name = "my-firewall"
- policy_id = "${openstack_fw_policy_v1.policy_1.id}"
-}
-```
-
-## Argument Reference
-
-The following arguments are supported:
-
-* `region` - (Required) The region in which to obtain the v1 networking client.
- A networking client is needed to create a firewall. If omitted, the
- `OS_REGION_NAME` environment variable is used. Changing this creates a new
- firewall.
-
-* `policy_id` - (Required) The policy resource id for the firewall. Changing
- this updates the `policy_id` of an existing firewall.
-
-* `name` - (Optional) A name for the firewall. Changing this
- updates the `name` of an existing firewall.
-
-* `description` - (Required) A description for the firewall. Changing this
- updates the `description` of an existing firewall.
-
-* `admin_state_up` - (Optional) Administrative up/down status for the firewall
- (must be "true" or "false" if provided - defaults to "true").
- Changing this updates the `admin_state_up` of an existing firewall.
-
-* `tenant_id` - (Optional) The owner of the floating IP. Required if admin wants
- to create a firewall for another tenant. Changing this creates a new
- firewall.
-
-* `associated_routers` - (Optional) Router(s) to associate this firewall instance
- with. Must be a list of strings. Changing this updates the associated routers
- of an existing firewall. Conflicts with `no_routers`.
-
-* `no_routers` - (Optional) Should this firewall not be associated with any routers
- (must be "true" or "false" if provide - defaults to "false").
- Conflicts with `associated_routers`.
-
-* `value_specs` - (Optional) Map of additional options.
-
-## Attributes Reference
-
-The following attributes are exported:
-
-* `region` - See Argument Reference above.
-* `policy_id` - See Argument Reference above.
-* `name` - See Argument Reference above.
-* `description` - See Argument Reference above.
-* `admin_state_up` - See Argument Reference above.
-* `tenant_id` - See Argument Reference above.
-* `associated_routers` - See Argument Reference above.
-* `no_routers` - See Argument Reference above.
-
-## Import
-
-Firewalls can be imported using the `id`, e.g.
-
-```
-$ terraform import openstack_fw_firewall_v1.firewall_1 c9e39fb2-ce20-46c8-a964-25f3898c7a97
-```
diff --git a/website/source/docs/providers/openstack/r/fw_policy_v1.html.markdown b/website/source/docs/providers/openstack/r/fw_policy_v1.html.markdown
deleted file mode 100644
index e2eabf3a8..000000000
--- a/website/source/docs/providers/openstack/r/fw_policy_v1.html.markdown
+++ /dev/null
@@ -1,92 +0,0 @@
----
-layout: "openstack"
-page_title: "OpenStack: openstack_fw_policy_v1"
-sidebar_current: "docs-openstack-resource-fw-policy-v1"
-description: |-
- Manages a v1 firewall policy resource within OpenStack.
----
-
-# openstack\_fw\_policy_v1
-
-Manages a v1 firewall policy resource within OpenStack.
-
-## Example Usage
-
-```hcl
-resource "openstack_fw_rule_v1" "rule_1" {
- name = "my-rule-1"
- description = "drop TELNET traffic"
- action = "deny"
- protocol = "tcp"
- destination_port = "23"
- enabled = "true"
-}
-
-resource "openstack_fw_rule_v1" "rule_2" {
- name = "my-rule-2"
- description = "drop NTP traffic"
- action = "deny"
- protocol = "udp"
- destination_port = "123"
- enabled = "false"
-}
-
-resource "openstack_fw_policy_v1" "policy_1" {
- name = "my-policy"
-
- rules = ["${openstack_fw_rule_v1.rule_1.id}",
- "${openstack_fw_rule_v1.rule_2.id}",
- ]
-}
-```
-
-## Argument Reference
-
-The following arguments are supported:
-
-* `region` - (Required) The region in which to obtain the v1 networking client.
- A networking client is needed to create a firewall policy. If omitted, the
- `OS_REGION_NAME` environment variable is used. Changing this creates a new
- firewall policy.
-
-* `name` - (Optional) A name for the firewall policy. Changing this
- updates the `name` of an existing firewall policy.
-
-* `description` - (Optional) A description for the firewall policy. Changing
- this updates the `description` of an existing firewall policy.
-
-* `rules` - (Optional) An array of one or more firewall rules that comprise
- the policy. Changing this results in adding/removing rules from the
- existing firewall policy.
-
-* `audited` - (Optional) Audit status of the firewall policy
- (must be "true" or "false" if provided - defaults to "false").
- This status is set to "false" whenever the firewall policy or any of its
- rules are changed. Changing this updates the `audited` status of an existing
- firewall policy.
-
-* `shared` - (Optional) Sharing status of the firewall policy (must be "true"
- or "false" if provided). If this is "true" the policy is visible to, and
- can be used in, firewalls in other tenants. Changing this updates the
- `shared` status of an existing firewall policy. Only administrative users
- can specify if the policy should be shared.
-
-* `value_specs` - (Optional) Map of additional options.
-
-## Attributes Reference
-
-The following attributes are exported:
-
-* `region` - See Argument Reference above.
-* `name` - See Argument Reference above.
-* `description` - See Argument Reference above.
-* `audited` - See Argument Reference above.
-* `shared` - See Argument Reference above.
-
-## Import
-
-Firewall Policies can be imported using the `id`, e.g.
-
-```
-$ terraform import openstack_fw_policy_v1.policy_1 07f422e6-c596-474b-8b94-fe2c12506ce0
-```
diff --git a/website/source/docs/providers/openstack/r/fw_rule_v1.html.markdown b/website/source/docs/providers/openstack/r/fw_rule_v1.html.markdown
deleted file mode 100644
index 69ea31a00..000000000
--- a/website/source/docs/providers/openstack/r/fw_rule_v1.html.markdown
+++ /dev/null
@@ -1,101 +0,0 @@
----
-layout: "openstack"
-page_title: "OpenStack: openstack_fw_rule_v1"
-sidebar_current: "docs-openstack-resource-fw-rule-v1"
-description: |-
- Manages a v1 firewall rule resource within OpenStack.
----
-
-# openstack\_fw\_rule_v1
-
-Manages a v1 firewall rule resource within OpenStack.
-
-## Example Usage
-
-```hcl
-resource "openstack_fw_rule_v1" "rule_1" {
- name = "my_rule"
- description = "drop TELNET traffic"
- action = "deny"
- protocol = "tcp"
- destination_port = "23"
- enabled = "true"
-}
-```
-
-## Argument Reference
-
-The following arguments are supported:
-
-* `region` - (Required) The region in which to obtain the v1 Compute client.
- A Compute client is needed to create a firewall rule. If omitted, the
- `OS_REGION_NAME` environment variable is used. Changing this creates a new
- firewall rule.
-
-* `name` - (Optional) A unique name for the firewall rule. Changing this
- updates the `name` of an existing firewall rule.
-
-* `description` - (Optional) A description for the firewall rule. Changing this
- updates the `description` of an existing firewall rule.
-
-* `protocol` - (Required) The protocol type on which the firewall rule operates.
- Valid values are: `tcp`, `udp`, `icmp`, and `any`. Changing this updates the
- `protocol` of an existing firewall rule.
-
-* `action` - (Required) Action to be taken ( must be "allow" or "deny") when the
- firewall rule matches. Changing this updates the `action` of an existing
- firewall rule.
-
-* `ip_version` - (Optional) IP version, either 4 (default) or 6. Changing this
- updates the `ip_version` of an existing firewall rule.
-
-* `source_ip_address` - (Optional) The source IP address on which the firewall
- rule operates. Changing this updates the `source_ip_address` of an existing
- firewall rule.
-
-* `destination_ip_address` - (Optional) The destination IP address on which the
- firewall rule operates. Changing this updates the `destination_ip_address`
- of an existing firewall rule.
-
-* `source_port` - (Optional) The source port on which the firewall
- rule operates. Changing this updates the `source_port` of an existing
- firewall rule.
-
-* `destination_port` - (Optional) The destination port on which the firewall
- rule operates. Changing this updates the `destination_port` of an existing
- firewall rule.
-
-* `enabled` - (Optional) Enabled status for the firewall rule (must be "true"
- or "false" if provided - defaults to "true"). Changing this updates the
- `enabled` status of an existing firewall rule.
-
-* `tenant_id` - (Optional) The owner of the firewall rule. Required if admin
- wants to create a firewall rule for another tenant. Changing this creates a
- new firewall rule.
-
-* `value_specs` - (Optional) Map of additional options.
-
-## Attributes Reference
-
-The following attributes are exported:
-
-* `region` - See Argument Reference above.
-* `name` - See Argument Reference above.
-* `description` - See Argument Reference above.
-* `protocol` - See Argument Reference above.
-* `action` - See Argument Reference above.
-* `ip_version` - See Argument Reference above.
-* `source_ip_address` - See Argument Reference above.
-* `destination_ip_address` - See Argument Reference above.
-* `source_port` - See Argument Reference above.
-* `destination_port` - See Argument Reference above.
-* `enabled` - See Argument Reference above.
-* `tenant_id` - See Argument Reference above.
-
-## Import
-
-Firewall Rules can be imported using the `id`, e.g.
-
-```
-$ terraform import openstack_fw_rule_v1.rule_1 8dbc0c28-e49c-463f-b712-5c5d1bbac327
-```
diff --git a/website/source/docs/providers/openstack/r/images_image_v2.html.markdown b/website/source/docs/providers/openstack/r/images_image_v2.html.markdown
deleted file mode 100644
index 91ec99463..000000000
--- a/website/source/docs/providers/openstack/r/images_image_v2.html.markdown
+++ /dev/null
@@ -1,109 +0,0 @@
----
-layout: "openstack"
-page_title: "OpenStack: openstack_images_image_v2"
-sidebar_current: "docs-openstack-resource-images-image-v2"
-description: |-
- Manages a V2 Image resource within OpenStack Glance.
----
-
-# openstack\_images\_image_v2
-
-Manages a V2 Image resource within OpenStack Glance.
-
-## Example Usage
-
-```hcl
-resource "openstack_images_image_v2" "rancheros" {
- name = "RancherOS"
- image_source_url = "https://releases.rancher.com/os/latest/rancheros-openstack.img"
- container_format = "bare"
- disk_format = "qcow2"
-}
-```
-
-## Argument Reference
-
-The following arguments are supported:
-
-* `container_format` - (Required) The container format. Must be one of
- "ami", "ari", "aki", "bare", "ovf".
-
-* `disk_format` - (Required) The disk format. Must be one of
- "ami", "ari", "aki", "vhd", "vmdk", "raw", "qcow2", "vdi", "iso".
-
-* `local_file_path` - (Optional) This is the filepath of the raw image file
- that will be uploaded to Glance. Conflicts with `image_source_url`.
-
-* `image_cache_path` - (Optional) This is the directory where the images will
- be downloaded. Images will be stored with a filename corresponding to
- the url's md5 hash. Defaults to "$HOME/.terraform/image_cache"
-
-* `image_source_url` - (Optional) This is the url of the raw image that will
- be downloaded in the `image_cache_path` before being uploaded to Glance.
- Glance is able to download image from internet but the `gophercloud` library
- does not yet provide a way to do so.
- Conflicts with `local_file_path`.
-
-* `min_disk_gb` - (Optional) Amount of disk space (in GB) required to boot image.
- Defaults to 0.
-
-* `min_ram_mb` - (Optional) Amount of ram (in MB) required to boot image.
- Defauts to 0.
-
-* `name` - (Required) The name of the image.
-
-* `protected` - (Optional) If true, image will not be deletable.
- Defaults to false.
-
-* `region` - (Required) The region in which to obtain the V2 Glance client.
- A Glance client is needed to create an Image that can be used with
- a compute instance. If omitted, the `OS_REGION_NAME` environment variable
- is used. Changing this creates a new Image.
-
-* `tags` - (Optional) The tags of the image. It must be a list of strings.
- At this time, it is not possible to delete all tags of an image.
-
-* `visibility` - (Optional) The visibility of the image. Must be one of
- "public", "private", "community", or "shared". The ability to set the
- visibility depends upon the configuration of the OpenStack cloud.
-
-Note: The `properties` attribute handling in the gophercloud library is currently buggy
-and needs to be fixed before being implemented in this resource.
-
-## Attributes Reference
-
-The following attributes are exported:
-
-* `checksum` - The checksum of the data associated with the image.
-* `container_format` - See Argument Reference above.
-* `created_at` - The date the image was created.
-* `disk_format` - See Argument Reference above.
-* `file` - the trailing path after the glance
- endpoint that represent the location of the image
- or the path to retrieve it.
-* `id` - A unique ID assigned by Glance.
-* `metadata` - The metadata associated with the image.
- Image metadata allow for meaningfully define the image properties
- and tags. See http://docs.openstack.org/developer/glance/metadefs-concepts.html.
-* `min_disk_gb` - See Argument Reference above.
-* `min_ram_mb` - See Argument Reference above.
-* `name` - See Argument Reference above.
-* `owner` - The id of the openstack user who owns the image.
-* `protected` - See Argument Reference above.
-* `region` - See Argument Reference above.
-* `schema` - The path to the JSON-schema that represent
- the image or image
-* `size_bytes` - The size in bytes of the data associated with the image.
-* `status` - The status of the image. It can be "queued", "active"
- or "saving".
-* `tags` - See Argument Reference above.
-* `update_at` - The date the image was last updated.
-* `visibility` - See Argument Reference above.
-
-## Import
-
-Images can be imported using the `id`, e.g.
-
-```
-$ terraform import openstack_images_image_v2.rancheros 89c60255-9bd6-460c-822a-e2b959ede9d2
-```
diff --git a/website/source/docs/providers/openstack/r/lb_listener_v2.html.markdown b/website/source/docs/providers/openstack/r/lb_listener_v2.html.markdown
deleted file mode 100644
index 66c3cc9c3..000000000
--- a/website/source/docs/providers/openstack/r/lb_listener_v2.html.markdown
+++ /dev/null
@@ -1,78 +0,0 @@
----
-layout: "openstack"
-page_title: "OpenStack: openstack_lb_listener_v2"
-sidebar_current: "docs-openstack-resource-lb-listener-v2"
-description: |-
- Manages a V2 listener resource within OpenStack.
----
-
-# openstack\_lb\_listener\_v2
-
-Manages a V2 listener resource within OpenStack.
-
-## Example Usage
-
-```hcl
-resource "openstack_lb_listener_v2" "listener_1" {
- protocol = "HTTP"
- protocol_port = 8080
- loadbalancer_id = "d9415786-5f1a-428b-b35f-2f1523e146d2"
-}
-```
-
-## Argument Reference
-
-The following arguments are supported:
-
-* `region` - (Required) The region in which to obtain the V2 Networking client.
- A Networking client is needed to create an . If omitted, the
- `OS_REGION_NAME` environment variable is used. Changing this creates a new
- Listener.
-
-* `protocol` = (Required) The protocol - can either be TCP, HTTP or HTTPS.
- Changing this creates a new Listener.
-
-* `protocol_port` = (Required) The port on which to listen for client traffic.
- Changing this creates a new Listener.
-
-* `tenant_id` - (Optional) Required for admins. The UUID of the tenant who owns
- the Listener. Only administrative users can specify a tenant UUID
- other than their own. Changing this creates a new Listener.
-
-* `loadbalancer_id` - (Required) The load balancer on which to provision this
- Listener. Changing this creates a new Listener.
-
-* `name` - (Optional) Human-readable name for the Listener. Does not have
- to be unique.
-
-* `default_pool_id` - (Optional) The ID of the default pool with which the
- Listener is associated. Changing this creates a new Listener.
-
-* `description` - (Optional) Human-readable description for the Listener.
-
-* `connection_limit` - (Optional) The maximum number of connections allowed
- for the Listener.
-
-* `default_tls_container_ref` - (Optional) A reference to a container of TLS
- secrets.
-
-* `sni_container_refs` - (Optional) A list of references to TLS secrets.
-
-* `admin_state_up` - (Optional) The administrative state of the Listener.
- A valid value is true (UP) or false (DOWN).
-
-## Attributes Reference
-
-The following attributes are exported:
-
-* `id` - The unique ID for the Listener.
-* `protocol` - See Argument Reference above.
-* `protocol_port` - See Argument Reference above.
-* `tenant_id` - See Argument Reference above.
-* `name` - See Argument Reference above.
-* `default_port_id` - See Argument Reference above.
-* `description` - See Argument Reference above.
-* `connection_limit` - See Argument Reference above.
-* `default_tls_container_ref` - See Argument Reference above.
-* `sni_container_refs` - See Argument Reference above.
-* `admin_state_up` - See Argument Reference above.
diff --git a/website/source/docs/providers/openstack/r/lb_loadbalancer_v2.html.markdown b/website/source/docs/providers/openstack/r/lb_loadbalancer_v2.html.markdown
deleted file mode 100644
index e8ee33e9c..000000000
--- a/website/source/docs/providers/openstack/r/lb_loadbalancer_v2.html.markdown
+++ /dev/null
@@ -1,76 +0,0 @@
----
-layout: "openstack"
-page_title: "OpenStack: openstack_lb_loadbalancer_v2"
-sidebar_current: "docs-openstack-resource-lb-loadbalancer-v2"
-description: |-
- Manages a V2 loadbalancer resource within OpenStack.
----
-
-# openstack\_lb\_loadbalancer\_v2
-
-Manages a V2 loadbalancer resource within OpenStack.
-
-## Example Usage
-
-```hcl
-resource "openstack_lb_loadbalancer_v2" "lb_1" {
- vip_subnet_id = "d9415786-5f1a-428b-b35f-2f1523e146d2"
-}
-```
-
-## Argument Reference
-
-The following arguments are supported:
-
-* `region` - (Required) The region in which to obtain the V2 Networking client.
- A Networking client is needed to create an LB member. If omitted, the
- `OS_REGION_NAME` environment variable is used. Changing this creates a new
- LB member.
-
-* `vip_subnet_id` - (Required) The network on which to allocate the
- Loadbalancer's address. A tenant can only create Loadbalancers on networks
- authorized by policy (e.g. networks that belong to them or networks that
- are shared). Changing this creates a new loadbalancer.
-
-* `name` - (Optional) Human-readable name for the Loadbalancer. Does not have
- to be unique.
-
-* `description` - (Optional) Human-readable description for the Loadbalancer.
-
-* `tenant_id` - (Optional) Required for admins. The UUID of the tenant who owns
- the Loadbalancer. Only administrative users can specify a tenant UUID
- other than their own. Changing this creates a new loadbalancer.
-
-* `vip_address` - (Optional) The ip address of the load balancer.
- Changing this creates a new loadbalancer.
-
-* `admin_state_up` - (Optional) The administrative state of the Loadbalancer.
- A valid value is true (UP) or false (DOWN).
-
-* `flavor` - (Optional) The UUID of a flavor. Changing this creates a new
- loadbalancer.
-
-* `provider` - (Deprecated) Use `loadbalancer_provider` instead.
-
-* `loadbalancer_provider` - (Optional) The name of the provider. Changing this
- creates a new loadbalancer.
-
-* `security_group_ids` - (Optional) A list of security group IDs to apply to the
- loadbalancer. The security groups must be specified by ID and not name (as
- opposed to how they are configured with the Compute Instance).
-
-## Attributes Reference
-
-The following attributes are exported:
-
-* `region` - See Argument Reference above.
-* `vip_subnet_id` - See Argument Reference above.
-* `name` - See Argument Reference above.
-* `description` - See Argument Reference above.
-* `tenant_id` - See Argument Reference above.
-* `vip_address` - See Argument Reference above.
-* `admin_state_up` - See Argument Reference above.
-* `flavor` - See Argument Reference above.
-* `loadbalancer_provider` - See Argument Reference above.
-* `security_group_ids` - See Argument Reference above.
-* `vip_port_id` - The Port ID of the Load Balancer IP.
diff --git a/website/source/docs/providers/openstack/r/lb_member_v1.html.markdown b/website/source/docs/providers/openstack/r/lb_member_v1.html.markdown
deleted file mode 100644
index a6bf9d544..000000000
--- a/website/source/docs/providers/openstack/r/lb_member_v1.html.markdown
+++ /dev/null
@@ -1,66 +0,0 @@
----
-layout: "openstack"
-page_title: "OpenStack: openstack_lb_member_v1"
-sidebar_current: "docs-openstack-resource-lb-member-v1"
-description: |-
- Manages a V1 load balancer member resource within OpenStack.
----
-
-# openstack\_lb\_member_v1
-
-Manages a V1 load balancer member resource within OpenStack.
-
-## Example Usage
-
-```hcl
-resource "openstack_lb_member_v1" "member_1" {
- pool_id = "d9415786-5f1a-428b-b35f-2f1523e146d2"
- address = "192.168.0.10"
- port = 80
-}
-```
-
-## Argument Reference
-
-The following arguments are supported:
-
-* `region` - (Required) The region in which to obtain the V2 Networking client.
- A Networking client is needed to create an LB member. If omitted, the
- `OS_REGION_NAME` environment variable is used. Changing this creates a new
- LB member.
-
-* `pool_id` - (Required) The ID of the LB pool. Changing this creates a new
- member.
-
-* `address` - (Required) The IP address of the member. Changing this creates a
- new member.
-
-* `port` - (Required) An integer representing the port on which the member is
- hosted. Changing this creates a new member.
-
-* `admin_state_up` - (Optional) The administrative state of the member.
- Acceptable values are 'true' and 'false'. Changing this value updates the
- state of the existing member.
-
-* `tenant_id` - (Optional) The owner of the member. Required if admin wants to
- create a member for another tenant. Changing this creates a new member.
-
-## Attributes Reference
-
-The following attributes are exported:
-
-* `region` - See Argument Reference above.
-* `pool_id` - See Argument Reference above.
-* `address` - See Argument Reference above.
-* `port` - See Argument Reference above.
-* `admin_state_up` - See Argument Reference above.
-* `weight` - The load balancing weight of the member. This is currently unable
- to be set through Terraform.
-
-## Import
-
-Load Balancer Members can be imported using the `id`, e.g.
-
-```
-$ terraform import openstack_lb_member_v1.member_1 a7498676-4fe4-4243-a864-2eaaf18c73df
-```
diff --git a/website/source/docs/providers/openstack/r/lb_member_v2.html.markdown b/website/source/docs/providers/openstack/r/lb_member_v2.html.markdown
deleted file mode 100644
index 9f26c070e..000000000
--- a/website/source/docs/providers/openstack/r/lb_member_v2.html.markdown
+++ /dev/null
@@ -1,68 +0,0 @@
----
-layout: "openstack"
-page_title: "OpenStack: openstack_lb_member_v2"
-sidebar_current: "docs-openstack-resource-lb-member-v2"
-description: |-
- Manages a V2 member resource within OpenStack.
----
-
-# openstack\_lb\_member\_v2
-
-Manages a V2 member resource within OpenStack.
-
-## Example Usage
-
-```hcl
-resource "openstack_lb_member_v2" "member_1" {
- address = "192.168.199.23"
- protocol_port = 8080
-}
-```
-
-## Argument Reference
-
-The following arguments are supported:
-
-* `region` - (Required) The region in which to obtain the V2 Networking client.
- A Networking client is needed to create an . If omitted, the
- `OS_REGION_NAME` environment variable is used. Changing this creates a new
- member.
-
-* `pool_id` - (Required) The id of the pool that this member will be
- assigned to.
-
-* `subnet_id` - (Required) The subnet in which to access the member
-
-* `name` - (Optional) Human-readable name for the member.
-
-* `tenant_id` - (Optional) Required for admins. The UUID of the tenant who owns
- the member. Only administrative users can specify a tenant UUID
- other than their own. Changing this creates a new member.
-
-* `address` - (Required) The IP address of the member to receive traffic from
- the load balancer. Changing this creates a new member.
-
-* `protocol_port` - (Required) The port on which to listen for client traffic.
- Changing this creates a new member.
-
-* `weight` - (Optional) A positive integer value that indicates the relative
- portion of traffic that this member should receive from the pool. For
- example, a member with a weight of 10 receives five times as much traffic
- as a member with a weight of 2.
-
-* `admin_state_up` - (Optional) The administrative state of the member.
- A valid value is true (UP) or false (DOWN).
-
-## Attributes Reference
-
-The following attributes are exported:
-
-* `id` - The unique ID for the member.
-* `name` - See Argument Reference above.
-* `weight` - See Argument Reference above.
-* `admin_state_up` - See Argument Reference above.
-* `tenant_id` - See Argument Reference above.
-* `subnet_id` - See Argument Reference above.
-* `pool_id` - See Argument Reference above.
-* `address` - See Argument Reference above.
-* `protocol_port` - See Argument Reference above.
diff --git a/website/source/docs/providers/openstack/r/lb_monitor_v1.html.markdown b/website/source/docs/providers/openstack/r/lb_monitor_v1.html.markdown
deleted file mode 100644
index a03f748ed..000000000
--- a/website/source/docs/providers/openstack/r/lb_monitor_v1.html.markdown
+++ /dev/null
@@ -1,90 +0,0 @@
----
-layout: "openstack"
-page_title: "OpenStack: openstack_lb_monitor_v1"
-sidebar_current: "docs-openstack-resource-lb-monitor-v1"
-description: |-
- Manages a V1 load balancer monitor resource within OpenStack.
----
-
-# openstack\_lb\_monitor_v1
-
-Manages a V1 load balancer monitor resource within OpenStack.
-
-## Example Usage
-
-```hcl
-resource "openstack_lb_monitor_v1" "monitor_1" {
- type = "PING"
- delay = 30
- timeout = 5
- max_retries = 3
- admin_state_up = "true"
-}
-```
-
-## Argument Reference
-
-The following arguments are supported:
-
-* `region` - (Required) The region in which to obtain the V2 Networking client.
- A Networking client is needed to create an LB monitor. If omitted, the
- `OS_REGION_NAME` environment variable is used. Changing this creates a new
- LB monitor.
-
-* `type` - (Required) The type of probe, which is PING, TCP, HTTP, or HTTPS,
- that is sent by the monitor to verify the member state. Changing this
- creates a new monitor.
-
-* `delay` - (Required) The time, in seconds, between sending probes to members.
- Changing this creates a new monitor.
-
-* `timeout` - (Required) Maximum number of seconds for a monitor to wait for a
- ping reply before it times out. The value must be less than the delay value.
- Changing this updates the timeout of the existing monitor.
-
-* `max_retries` - (Required) Number of permissible ping failures before changing
- the member's status to INACTIVE. Must be a number between 1 and 10. Changing
- this updates the max_retries of the existing monitor.
-
-* `url_path` - (Optional) Required for HTTP(S) types. URI path that will be
- accessed if monitor type is HTTP or HTTPS. Changing this updates the
- url_path of the existing monitor.
-
-* `http_method` - (Optional) Required for HTTP(S) types. The HTTP method used
- for requests by the monitor. If this attribute is not specified, it defaults
- to "GET". Changing this updates the http_method of the existing monitor.
-
-* `expected_codes` - (Optional) equired for HTTP(S) types. Expected HTTP codes
- for a passing HTTP(S) monitor. You can either specify a single status like
- "200", or a range like "200-202". Changing this updates the expected_codes
- of the existing monitor.
-
-* `admin_state_up` - (Optional) The administrative state of the monitor.
- Acceptable values are "true" and "false". Changing this value updates the
- state of the existing monitor.
-
-* `tenant_id` - (Optional) The owner of the monitor. Required if admin wants to
- create a monitor for another tenant. Changing this creates a new monitor.
-
-## Attributes Reference
-
-The following attributes are exported:
-
-* `region` - See Argument Reference above.
-* `type` - See Argument Reference above.
-* `delay` - See Argument Reference above.
-* `timeout` - See Argument Reference above.
-* `max_retries` - See Argument Reference above.
-* `url_path` - See Argument Reference above.
-* `http_method` - See Argument Reference above.
-* `expected_codes` - See Argument Reference above.
-* `admin_state_up` - See Argument Reference above.
-* `tenant_id` - See Argument Reference above.
-
-## Import
-
-Load Balancer Members can be imported using the `id`, e.g.
-
-```
-$ terraform import openstack_lb_monitor_v1.monitor_1 119d7530-72e9-449a-aa97-124a5ef1992c
-```
diff --git a/website/source/docs/providers/openstack/r/lb_monitor_v2.html.markdown b/website/source/docs/providers/openstack/r/lb_monitor_v2.html.markdown
deleted file mode 100644
index 73e49eba3..000000000
--- a/website/source/docs/providers/openstack/r/lb_monitor_v2.html.markdown
+++ /dev/null
@@ -1,84 +0,0 @@
----
-layout: "openstack"
-page_title: "OpenStack: openstack_lb_monitor_v2"
-sidebar_current: "docs-openstack-resource-lb-monitor-v2"
-description: |-
- Manages a V2 monitor resource within OpenStack.
----
-
-# openstack\_lb\_monitor\_v2
-
-Manages a V2 monitor resource within OpenStack.
-
-## Example Usage
-
-```hcl
-resource "openstack_lb_monitor_v2" "monitor_1" {
- pool_id = "${openstack_lb_pool_v2.pool_1.id}"
- type = "PING"
- delay = 20
- timeout = 10
- max_retries = 5
-}
-```
-
-## Argument Reference
-
-The following arguments are supported:
-
-* `region` - (Required) The region in which to obtain the V2 Networking client.
- A Networking client is needed to create an . If omitted, the
- `OS_REGION_NAME` environment variable is used. Changing this creates a new
- monitor.
-
-* `pool_id` - (Required) The id of the pool that this monitor will be assigned to.
-
-* `name` - (Optional) The Name of the Monitor.
-
-* `tenant_id` - (Optional) Required for admins. The UUID of the tenant who owns
- the monitor. Only administrative users can specify a tenant UUID
- other than their own. Changing this creates a new monitor.
-
-* `type` - (Required) The type of probe, which is PING, TCP, HTTP, or HTTPS,
- that is sent by the load balancer to verify the member state. Changing this
- creates a new monitor.
-
-* `delay` - (Required) The time, in seconds, between sending probes to members.
-
-* `timeout` - (Required) Maximum number of seconds for a monitor to wait for a
- ping reply before it times out. The value must be less than the delay
- value.
-
-* `max_retries` - (Required) Number of permissible ping failures before
- changing the member's status to INACTIVE. Must be a number between 1
- and 10..
-
-* `url_path` - (Optional) Required for HTTP(S) types. URI path that will be
- accessed if monitor type is HTTP or HTTPS.
-
-* `http_method` - (Optional) Required for HTTP(S) types. The HTTP method used
- for requests by the monitor. If this attribute is not specified, it
- defaults to "GET".
-
-* `expected_codes` - (Optional) Required for HTTP(S) types. Expected HTTP codes
- for a passing HTTP(S) monitor. You can either specify a single status like
- "200", or a range like "200-202".
-
-* `admin_state_up` - (Optional) The administrative state of the monitor.
- A valid value is true (UP) or false (DOWN).
-
-
-## Attributes Reference
-
-The following attributes are exported:
-
-* `id` - The unique ID for the monitor.
-* `tenant_id` - See Argument Reference above.
-* `type` - See Argument Reference above.
-* `delay` - See Argument Reference above.
-* `timeout` - See Argument Reference above.
-* `max_retries` - See Argument Reference above.
-* `url_path` - See Argument Reference above.
-* `http_method` - See Argument Reference above.
-* `expected_codes` - See Argument Reference above.
-* `admin_state_up` - See Argument Reference above.
diff --git a/website/source/docs/providers/openstack/r/lb_pool_v1.html.markdown b/website/source/docs/providers/openstack/r/lb_pool_v1.html.markdown
deleted file mode 100644
index 54afa16d3..000000000
--- a/website/source/docs/providers/openstack/r/lb_pool_v1.html.markdown
+++ /dev/null
@@ -1,190 +0,0 @@
----
-layout: "openstack"
-page_title: "OpenStack: openstack_lb_pool_v1"
-sidebar_current: "docs-openstack-resource-lb-pool-v1"
-description: |-
- Manages a V1 load balancer pool resource within OpenStack.
----
-
-# openstack\_lb\_pool_v1
-
-Manages a V1 load balancer pool resource within OpenStack.
-
-## Example Usage
-
-```hcl
-resource "openstack_lb_pool_v1" "pool_1" {
- name = "tf_test_lb_pool"
- protocol = "HTTP"
- subnet_id = "12345"
- lb_method = "ROUND_ROBIN"
- lb_provider = "haproxy"
- monitor_ids = ["67890"]
-}
-```
-
-## Complete Load Balancing Stack Example
-
-```
-resource "openstack_networking_network_v2" "network_1" {
- name = "network_1"
- admin_state_up = "true"
-}
-
-resource "openstack_networking_subnet_v2" "subnet_1" {
- network_id = "${openstack_networking_network_v2.network_1.id}"
- cidr = "192.168.199.0/24"
- ip_version = 4
-}
-
-resource "openstack_compute_secgroup_v2" "secgroup_1" {
- name = "secgroup_1"
- description = "Rules for secgroup_1"
-
- rule {
- from_port = -1
- to_port = -1
- ip_protocol = "icmp"
- cidr = "0.0.0.0/0"
- }
-
- rule {
- from_port = 80
- to_port = 80
- ip_protocol = "tcp"
- cidr = "0.0.0.0/0"
- }
-}
-
-resource "openstack_compute_instance_v2" "instance_1" {
- name = "instance_1"
- security_groups = ["default", "${openstack_compute_secgroup_v2.secgroup_1.name}"]
-
- network {
- uuid = "${openstack_networking_network_v2.network_1.id}"
- }
-}
-
-resource "openstack_compute_instance_v2" "instance_2" {
- name = "instance_2"
- security_groups = ["default", "${openstack_compute_secgroup_v2.secgroup_1.name}"]
-
- network {
- uuid = "${openstack_networking_network_v2.network_1.id}"
- }
-}
-
-resource "openstack_lb_monitor_v1" "monitor_1" {
- type = "TCP"
- delay = 30
- timeout = 5
- max_retries = 3
- admin_state_up = "true"
-}
-
-resource "openstack_lb_pool_v1" "pool_1" {
- name = "pool_1"
- protocol = "TCP"
- subnet_id = "${openstack_networking_subnet_v2.subnet_1.id}"
- lb_method = "ROUND_ROBIN"
- monitor_ids = ["${openstack_lb_monitor_v1.monitor_1.id}"]
-}
-
-resource "openstack_lb_member_v1" "member_1" {
- pool_id = "${openstack_lb_pool_v1.pool_1.id}"
- address = "${openstack_compute_instance_v2.instance_1.access_ip_v4}"
- port = 80
-}
-
-resource "openstack_lb_member_v1" "member_2" {
- pool_id = "${openstack_lb_pool_v1.pool_1.id}"
- address = "${openstack_compute_instance_v2.instance_2.access_ip_v4}"
- port = 80
-}
-
-resource "openstack_lb_vip_v1" "vip_1" {
- name = "vip_1"
- subnet_id = "${openstack_networking_subnet_v2.subnet_1.id}"
- protocol = "TCP"
- port = 80
- pool_id = "${openstack_lb_pool_v1.pool_1.id}"
-}
-```
-
-## Argument Reference
-
-The following arguments are supported:
-
-* `region` - (Required) The region in which to obtain the V2 Networking client.
- A Networking client is needed to create an LB pool. If omitted, the
- `OS_REGION_NAME` environment variable is used. Changing this creates a new
- LB pool.
-
-* `name` - (Required) The name of the pool. Changing this updates the name of
- the existing pool.
-
-* `protocol` - (Required) The protocol used by the pool members, you can use
- either 'TCP, 'HTTP', or 'HTTPS'. Changing this creates a new pool.
-
-* `subnet_id` - (Required) The network on which the members of the pool will be
- located. Only members that are on this network can be added to the pool.
- Changing this creates a new pool.
-
-* `lb_method` - (Required) The algorithm used to distribute load between the
- members of the pool. The current specification supports 'ROUND_ROBIN' and
- 'LEAST_CONNECTIONS' as valid values for this attribute.
-
-* `lb_provider` - (Optional) The backend load balancing provider. For example:
- `haproxy`, `F5`, etc.
-
-* `tenant_id` - (Optional) The owner of the pool. Required if admin wants to
- create a pool member for another tenant. Changing this creates a new pool.
-
-* `monitor_ids` - (Optional) A list of IDs of monitors to associate with the
- pool.
-
-* `member` - (Optional) An existing node to add to the pool. Changing this
- updates the members of the pool. The member object structure is documented
- below. Please note that the `member` block is deprecated in favor of the
- `openstack_lb_member_v1` resource.
-
-The `member` block supports:
-
-* `address` - (Required) The IP address of the member. Changing this creates a
-new member.
-
-* `port` - (Required) An integer representing the port on which the member is
-hosted. Changing this creates a new member.
-
-* `admin_state_up` - (Required) The administrative state of the member.
-Acceptable values are 'true' and 'false'. Changing this value updates the
-state of the existing member.
-
-* `tenant_id` - (Optional) The owner of the member. Required if admin wants to
-create a pool member for another tenant. Changing this creates a new member.
-
-## Attributes Reference
-
-The following attributes are exported:
-
-* `region` - See Argument Reference above.
-* `name` - See Argument Reference above.
-* `protocol` - See Argument Reference above.
-* `subnet_id` - See Argument Reference above.
-* `lb_method` - See Argument Reference above.
-* `lb_provider` - See Argument Reference above.
-* `tenant_id` - See Argument Reference above.
-* `monitor_id` - See Argument Reference above.
-* `member` - See Argument Reference above.
-
-## Notes
-
-The `member` block is deprecated in favor of the `openstack_lb_member_v1` resource.
-
-## Import
-
-Load Balancer Pools can be imported using the `id`, e.g.
-
-```
-$ terraform import openstack_lb_pool_v1.pool_1 b255e6ba-02ad-43e6-8951-3428ca26b713
-```
diff --git a/website/source/docs/providers/openstack/r/lb_pool_v2.html.markdown b/website/source/docs/providers/openstack/r/lb_pool_v2.html.markdown
deleted file mode 100644
index f1ea4a721..000000000
--- a/website/source/docs/providers/openstack/r/lb_pool_v2.html.markdown
+++ /dev/null
@@ -1,86 +0,0 @@
----
-layout: "openstack"
-page_title: "OpenStack: openstack_lb_pool_v2"
-sidebar_current: "docs-openstack-resource-lb-pool-v2"
-description: |-
- Manages a V2 pool resource within OpenStack.
----
-
-# openstack\_lb\_pool\_v2
-
-Manages a V2 pool resource within OpenStack.
-
-## Example Usage
-
-```hcl
-resource "openstack_lb_pool_v2" "pool_1" {
- protocol = "HTTP"
- lb_method = "ROUND_ROBIN"
- listener_id = "d9415786-5f1a-428b-b35f-2f1523e146d2"
-
- persistence {
- type = "HTTP_COOKIE"
- cookie_name = "testCookie"
- }
-}
-```
-
-## Argument Reference
-
-The following arguments are supported:
-
-* `region` - (Required) The region in which to obtain the V2 Networking client.
- A Networking client is needed to create an . If omitted, the
- `OS_REGION_NAME` environment variable is used. Changing this creates a new
- pool.
-
-* `tenant_id` - (Optional) Required for admins. The UUID of the tenant who owns
- the pool. Only administrative users can specify a tenant UUID
- other than their own. Changing this creates a new pool.
-
-* `name` - (Optional) Human-readable name for the pool.
-
-* `description` - (Optional) Human-readable description for the pool.
-
-* `protocol` = (Required) The protocol - can either be TCP, HTTP or HTTPS.
- Changing this creates a new pool.
-
-* `loadbalancer_id` - (Optional) The load balancer on which to provision this
- pool. Changing this creates a new pool.
- Note: One of LoadbalancerID or ListenerID must be provided.
-
-* `listener_id` - (Optional) The Listener on which the members of the pool
- will be associated with. Changing this creates a new pool.
- Note: One of LoadbalancerID or ListenerID must be provided.
-
-* `lb_method` - (Required) The load balancing algorithm to
- distribute traffic to the pool's members. Must be one of
- ROUND_ROBIN, LEAST_CONNECTIONS, or SOURCE_IP.
-
-* `persistence` - Omit this field to prevent session persistence. Indicates
- whether connections in the same session will be processed by the same Pool
- member or not. Changing this creates a new pool.
-
-* `admin_state_up` - (Optional) The administrative state of the pool.
- A valid value is true (UP) or false (DOWN).
-
-The `persistence` argument supports:
-
-* `type` - (Required) The type of persistence mode. The current specification
- supports SOURCE_IP, HTTP_COOKIE, and APP_COOKIE.
-
-* `cookie_name` - (Required) The name of the cookie if persistence mode is set
- appropriately.
-
-## Attributes Reference
-
-The following attributes are exported:
-
-* `id` - The unique ID for the pool.
-* `tenant_id` - See Argument Reference above.
-* `name` - See Argument Reference above.
-* `description` - See Argument Reference above.
-* `protocol` - See Argument Reference above.
-* `lb_method` - See Argument Reference above.
-* `persistence` - See Argument Reference above.
-* `admin_state_up` - See Argument Reference above.
diff --git a/website/source/docs/providers/openstack/r/lb_vip_v1.html.markdown b/website/source/docs/providers/openstack/r/lb_vip_v1.html.markdown
deleted file mode 100644
index 403354a99..000000000
--- a/website/source/docs/providers/openstack/r/lb_vip_v1.html.markdown
+++ /dev/null
@@ -1,108 +0,0 @@
----
-layout: "openstack"
-page_title: "OpenStack: openstack_lb_vip_v1"
-sidebar_current: "docs-openstack-resource-lb-vip-v1"
-description: |-
- Manages a V1 load balancer vip resource within OpenStack.
----
-
-# openstack\_lb\_vip_v1
-
-Manages a V1 load balancer vip resource within OpenStack.
-
-## Example Usage
-
-```hcl
-resource "openstack_lb_vip_v1" "vip_1" {
- name = "tf_test_lb_vip"
- subnet_id = "12345"
- protocol = "HTTP"
- port = 80
- pool_id = "67890"
-}
-```
-
-## Argument Reference
-
-The following arguments are supported:
-
-* `region` - (Required) The region in which to obtain the V2 Networking client.
- A Networking client is needed to create a VIP. If omitted, the
- `OS_REGION_NAME` environment variable is used. Changing this creates a new
- VIP.
-
-* `name` - (Required) The name of the vip. Changing this updates the name of
- the existing vip.
-
-* `subnet_id` - (Required) The network on which to allocate the vip's address. A
- tenant can only create vips on networks authorized by policy (e.g. networks
- that belong to them or networks that are shared). Changing this creates a
- new vip.
-
-* `protocol` - (Required) The protocol - can be either 'TCP, 'HTTP', or
- HTTPS'. Changing this creates a new vip.
-
-* `port` - (Required) The port on which to listen for client traffic. Changing
- this creates a new vip.
-
-* `pool_id` - (Required) The ID of the pool with which the vip is associated.
- Changing this updates the pool_id of the existing vip.
-
-* `tenant_id` - (Optional) The owner of the vip. Required if admin wants to
- create a vip member for another tenant. Changing this creates a new vip.
-
-* `address` - (Optional) The IP address of the vip. Changing this creates a new
- vip.
-
-* `description` - (Optional) Human-readable description for the vip. Changing
- this updates the description of the existing vip.
-
-* `persistence` - (Optional) Omit this field to prevent session persistence.
- The persistence object structure is documented below. Changing this updates
- the persistence of the existing vip.
-
-* `conn_limit` - (Optional) The maximum number of connections allowed for the
- vip. Default is -1, meaning no limit. Changing this updates the conn_limit
- of the existing vip.
-
-* `floating_ip` - (Optional) A *Networking* Floating IP that will be associated
- with the vip. The Floating IP must be provisioned already.
-
-* `admin_state_up` - (Optional) The administrative state of the vip.
- Acceptable values are "true" and "false". Changing this value updates the
- state of the existing vip.
-
-The `persistence` block supports:
-
-* `type` - (Required) The type of persistence mode. Valid values are "SOURCE_IP",
- "HTTP_COOKIE", or "APP_COOKIE".
-
-* `cookie_name` - (Optional) The name of the cookie if persistence mode is set
- appropriately.
-
-## Attributes Reference
-
-The following attributes are exported:
-
-* `region` - See Argument Reference above.
-* `name` - See Argument Reference above.
-* `subnet_id` - See Argument Reference above.
-* `protocol` - See Argument Reference above.
-* `port` - See Argument Reference above.
-* `pool_id` - See Argument Reference above.
-* `tenant_id` - See Argument Reference above.
-* `address` - See Argument Reference above.
-* `description` - See Argument Reference above.
-* `persistence` - See Argument Reference above.
-* `conn_limit` - See Argument Reference above.
-* `floating_ip` - See Argument Reference above.
-* `admin_state_up` - See Argument Reference above.
-* `port_id` - Port UUID for this VIP at associated floating IP (if any).
-
-## Import
-
-Load Balancer VIPs can be imported using the `id`, e.g.
-
-```
-$ terraform import openstack_lb_vip_v1.vip_1 50e16b26-89c1-475e-a492-76167182511e
-```
diff --git a/website/source/docs/providers/openstack/r/networking_floatingip_v2.html.markdown b/website/source/docs/providers/openstack/r/networking_floatingip_v2.html.markdown
deleted file mode 100644
index a59f9d341..000000000
--- a/website/source/docs/providers/openstack/r/networking_floatingip_v2.html.markdown
+++ /dev/null
@@ -1,67 +0,0 @@
----
-layout: "openstack"
-page_title: "OpenStack: openstack_networking_floatingip_v2"
-sidebar_current: "docs-openstack-resource-networking-floatingip-v2"
-description: |-
- Manages a V2 floating IP resource within OpenStack Neutron (networking).
----
-
-# openstack\_networking\_floatingip_v2
-
-Manages a V2 floating IP resource within OpenStack Neutron (networking)
-that can be used for load balancers.
-These are similar to Nova (compute) floating IP resources,
-but only compute floating IPs can be used with compute instances.
-
-## Example Usage
-
-```hcl
-resource "openstack_networking_floatingip_v2" "floatip_1" {
- pool = "public"
-}
-```
-
-## Argument Reference
-
-The following arguments are supported:
-
-* `region` - (Required) The region in which to obtain the V2 Networking client.
- A Networking client is needed to create a floating IP that can be used with
- another networking resource, such as a load balancer. If omitted, the
- `OS_REGION_NAME` environment variable is used. Changing this creates a new
- floating IP (which may or may not have a different address).
-
-* `pool` - (Required) The name of the pool from which to obtain the floating
- IP. Changing this creates a new floating IP.
-
-* `port_id` - (Optional) ID of an existing port with at least one IP address to
- associate with this floating IP.
-
-* `tenant_id` - (Optional) The target tenant ID in which to allocate the floating
- IP, if you specify this together with a port_id, make sure the target port
- belongs to the same tenant. Changing this creates a new floating IP (which
- may or may not have a different address)
-
-* `fixed_ip` - Fixed IP of the port to associate with this floating IP. Required if
-the port has multiple fixed IPs.
-
-* `value_specs` - (Optional) Map of additional options.
-
-## Attributes Reference
-
-The following attributes are exported:
-
-* `region` - See Argument Reference above.
-* `pool` - See Argument Reference above.
-* `address` - The actual floating IP address itself.
-* `port_id` - ID of associated port.
-* `tenant_id` - the ID of the tenant in which to create the floating IP.
-* `fixed_ip` - The fixed IP which the floating IP maps to.
-
-## Import
-
-Floating IPs can be imported using the `id`, e.g.
-
-```
-$ terraform import openstack_networking_floatingip_v2.floatip_1 2c7f39f3-702b-48d1-940c-b50384177ee1
-```
diff --git a/website/source/docs/providers/openstack/r/networking_network_v2.html.markdown b/website/source/docs/providers/openstack/r/networking_network_v2.html.markdown
deleted file mode 100644
index e14b5f28f..000000000
--- a/website/source/docs/providers/openstack/r/networking_network_v2.html.markdown
+++ /dev/null
@@ -1,111 +0,0 @@
----
-layout: "openstack"
-page_title: "OpenStack: openstack_networking_network_v2"
-sidebar_current: "docs-openstack-resource-networking-network-v2"
-description: |-
- Manages a V2 Neutron network resource within OpenStack.
----
-
-# openstack\_networking\_network_v2
-
-Manages a V2 Neutron network resource within OpenStack.
-
-## Example Usage
-
-```hcl
-resource "openstack_networking_network_v2" "network_1" {
- name = "network_1"
- admin_state_up = "true"
-}
-
-resource "openstack_networking_subnet_v2" "subnet_1" {
- name = "subnet_1"
- network_id = "${openstack_networking_network_v2.network_1.id}"
- cidr = "192.168.199.0/24"
- ip_version = 4
-}
-
-resource "openstack_compute_secgroup_v2" "secgroup_1" {
- name = "secgroup_1"
- description = "a security group"
-
- rule {
- from_port = 22
- to_port = 22
- ip_protocol = "tcp"
- cidr = "0.0.0.0/0"
- }
-}
-
-resource "openstack_networking_port_v2" "port_1" {
- name = "port_1"
- network_id = "${openstack_networking_network_v2.network_1.id}"
- admin_state_up = "true"
- security_group_ids = ["${openstack_compute_secgroup_v2.secgroup_1.id}"]
-
- fixed_ip {
- "subnet_id" = "${openstack_networking_subnet_v2.subnet_1.id}"
- "ip_address" = "192.168.199.10"
- }
-}
-
-resource "openstack_compute_instance_v2" "instance_1" {
- name = "instance_1"
- security_groups = ["${openstack_compute_secgroup_v2.secgroup_1.name}"]
-
- network {
- port = "${openstack_networking_port_v2.port_1.id}"
- }
-}
-```
-
-## Argument Reference
-
-The following arguments are supported:
-
-* `region` - (Required) The region in which to obtain the V2 Networking client.
- A Networking client is needed to create a Neutron network. If omitted, the
- `OS_REGION_NAME` environment variable is used. Changing this creates a new
- network.
-
-* `name` - (Optional) The name of the network. Changing this updates the name of
- the existing network.
-
-* `shared` - (Optional) Specifies whether the network resource can be accessed
- by any tenant or not. Changing this updates the sharing capabalities of the
- existing network.
-
-* `tenant_id` - (Optional) The owner of the network. Required if admin wants to
- create a network for another tenant. Changing this creates a new network.
-
-* `admin_state_up` - (Optional) The administrative state of the network.
- Acceptable values are "true" and "false". Changing this value updates the
- state of the existing network.
-
-* `segments` - (Optional) An array of one or more provider segment objects.
-
-* `value_specs` - (Optional) Map of additional options.
-
-The `segments` block supports:
-
-* `physical_network` - The phisical network where this network is implemented.
-* `segmentation_id` - An isolated segment on the physical network.
-* `network_type` - The type of physical network.
-
-## Attributes Reference
-
-The following attributes are exported:
-
-* `region` - See Argument Reference above.
-* `name` - See Argument Reference above.
-* `shared` - See Argument Reference above.
-* `tenant_id` - See Argument Reference above.
-* `admin_state_up` - See Argument Reference above.
-
-## Import
-
-Networks can be imported using the `id`, e.g.
-
-```
-$ terraform import openstack_networking_network_v2.network_1 d90ce693-5ccf-4136-a0ed-152ce412b6b9
-```
diff --git a/website/source/docs/providers/openstack/r/networking_port_v2.html.markdown b/website/source/docs/providers/openstack/r/networking_port_v2.html.markdown
deleted file mode 100644
index 5cbef5478..000000000
--- a/website/source/docs/providers/openstack/r/networking_port_v2.html.markdown
+++ /dev/null
@@ -1,116 +0,0 @@
----
-layout: "openstack"
-page_title: "OpenStack: openstack_networking_port_v2"
-sidebar_current: "docs-openstack-resource-networking-port-v2"
-description: |-
- Manages a V2 port resource within OpenStack.
----
-
-# openstack\_networking\_port_v2
-
-Manages a V2 port resource within OpenStack.
-
-## Example Usage
-
-```hcl
-resource "openstack_networking_network_v2" "network_1" {
- name = "network_1"
- admin_state_up = "true"
-}
-
-resource "openstack_networking_port_v2" "port_1" {
- name = "port_1"
- network_id = "${openstack_networking_network_v2.network_1.id}"
- admin_state_up = "true"
-}
-```
-
-## Argument Reference
-
-The following arguments are supported:
-
-* `region` - (Required) The region in which to obtain the V2 networking client.
- A networking client is needed to create a port. If omitted, the
- `OS_REGION_NAME` environment variable is used. Changing this creates a new
- port.
-
-* `name` - (Optional) A unique name for the port. Changing this
- updates the `name` of an existing port.
-
-* `network_id` - (Required) The ID of the network to attach the port to. Changing
- this creates a new port.
-
-* `admin_state_up` - (Optional) Administrative up/down status for the port
- (must be "true" or "false" if provided). Changing this updates the
- `admin_state_up` of an existing port.
-
-* `mac_address` - (Optional) Specify a specific MAC address for the port. Changing
- this creates a new port.
-
-* `tenant_id` - (Optional) The owner of the Port. Required if admin wants
- to create a port for another tenant. Changing this creates a new port.
-
-* `device_owner` - (Optional) The device owner of the Port. Changing this creates
- a new port.
-
-* `security_group_ids` - (Optional) A list of security group IDs to apply to the
- port. The security groups must be specified by ID and not name (as opposed
- to how they are configured with the Compute Instance).
-
-* `device_id` - (Optional) The ID of the device attached to the port. Changing this
- creates a new port.
-
-* `fixed_ip` - (Optional) An array of desired IPs for this port. The structure is
- described below.
-
-* `allowed_address_pairs` - (Optional) An IP/MAC Address pair of additional IP
- addresses that can be active on this port. The structure is described
- below.
-
-* `value_specs` - (Optional) Map of additional options.
-
-The `fixed_ip` block supports:
-
-* `subnet_id` - (Required) Subnet in which to allocate IP address for
-this port.
-
-* `ip_address` - (Optional) IP address desired in the subnet for this port. If
-you don't specify `ip_address`, an available IP address from the specified
-subnet will be allocated to this port.
-
-The `allowed_address_pairs` block supports:
-
-* `ip_address` - (Required) The additional IP address.
-
-* `mac_address` - (Optional) The additional MAC address.
-
-## Attributes Reference
-
-The following attributes are exported:
-
-* `region` - See Argument Reference above.
-* `admin_state_up` - See Argument Reference above.
-* `mac_address` - See Argument Reference above.
-* `tenant_id` - See Argument Reference above.
-* `device_owner` - See Argument Reference above.
-* `security_group_ids` - See Argument Reference above.
-* `device_id` - See Argument Reference above.
-* `fixed_ip` - See Argument Reference above.
-* `all fixed_ips` - The collection of Fixed IP addresses on the port in the
- order returned by the Network v2 API.
-
-## Import
-
-Ports can be imported using the `id`, e.g.
-
-```
-$ terraform import openstack_networking_port_v2.port_1 eae26a3e-1c33-4cc1-9c31-0cd729c438a1
-```
-
-## Notes
-
-### Ports and Instances
-
-There are some notes to consider when connecting Instances to networks using
-Ports. Please see the `openstack_compute_instance_v2` documentation for further
-documentation.
diff --git a/website/source/docs/providers/openstack/r/networking_router_interface_v2.html.markdown b/website/source/docs/providers/openstack/r/networking_router_interface_v2.html.markdown
deleted file mode 100644
index 610c04ed1..000000000
--- a/website/source/docs/providers/openstack/r/networking_router_interface_v2.html.markdown
+++ /dev/null
@@ -1,63 +0,0 @@
----
-layout: "openstack"
-page_title: "OpenStack: openstack_networking_router_interface_v2"
-sidebar_current: "docs-openstack-resource-networking-router-interface-v2"
-description: |-
- Manages a V2 router interface resource within OpenStack.
----
-
-# openstack\_networking\_router_interface_v2
-
-Manages a V2 router interface resource within OpenStack.
-
-## Example Usage
-
-```hcl
-resource "openstack_networking_network_v2" "network_1" {
- name = "tf_test_network"
- admin_state_up = "true"
-}
-
-resource "openstack_networking_subnet_v2" "subnet_1" {
- network_id = "${openstack_networking_network_v2.network_1.id}"
- cidr = "192.168.199.0/24"
- ip_version = 4
-}
-
-resource "openstack_networking_router_v2" "router_1" {
- name = "my_router"
- external_gateway = "f67f0d72-0ddf-11e4-9d95-e1f29f417e2f"
-}
-
-resource "openstack_networking_router_interface_v2" "router_interface_1" {
- router_id = "${openstack_networking_router_v2.router_1.id}"
- subnet_id = "${openstack_networking_subnet_v2.subnet_1.id}"
-}
-```
-
-## Argument Reference
-
-The following arguments are supported:
-
-* `region` - (Required) The region in which to obtain the V2 networking client.
- A networking client is needed to create a router. If omitted, the
- `OS_REGION_NAME` environment variable is used. Changing this creates a new
- router interface.
-
-* `router_id` - (Required) ID of the router this interface belongs to. Changing
- this creates a new router interface.
-
-* `subnet_id` - ID of the subnet this interface connects to. Changing
- this creates a new router interface.
-
-* `port_id` - ID of the port this interface connects to. Changing
- this creates a new router interface.
-
-## Attributes Reference
-
-The following attributes are exported:
-
-* `region` - See Argument Reference above.
-* `router_id` - See Argument Reference above.
-* `subnet_id` - See Argument Reference above.
-* `port_id` - See Argument Reference above.
diff --git a/website/source/docs/providers/openstack/r/networking_router_route_v2.html.markdown b/website/source/docs/providers/openstack/r/networking_router_route_v2.html.markdown
deleted file mode 100644
index fac11c0f1..000000000
--- a/website/source/docs/providers/openstack/r/networking_router_route_v2.html.markdown
+++ /dev/null
@@ -1,76 +0,0 @@
----
-layout: "openstack"
-page_title: "OpenStack: openstack_networking_router_route_v2"
-sidebar_current: "docs-openstack-resource-networking-router-route-v2"
-description: |-
- Creates a routing entry on a OpenStack V2 router.
----
-
-# openstack\_networking\_router_route_v2
-
-Creates a routing entry on a OpenStack V2 router.
-
-## Example Usage
-
-```hcl
-resource "openstack_networking_router_v2" "router_1" {
- name = "router_1"
- admin_state_up = "true"
-}
-
-resource "openstack_networking_network_v2" "network_1" {
- name = "network_1"
- admin_state_up = "true"
-}
-
-resource "openstack_networking_subnet_v2" "subnet_1" {
- network_id = "${openstack_networking_network_v2.network_1.id}"
- cidr = "192.168.199.0/24"
- ip_version = 4
-}
-
-resource "openstack_networking_router_interface_v2" "int_1" {
- router_id = "${openstack_networking_router_v2.router_1.id}"
- subnet_id = "${openstack_networking_subnet_v2.subnet_1.id}"
-}
-
-resource "openstack_networking_router_route_v2" "router_route_1" {
- depends_on = ["openstack_networking_router_interface_v2.int_1"]
- router_id = "${openstack_networking_router_v2.router_1.id}"
- destination_cidr = "10.0.1.0/24"
- next_hop = "192.168.199.254"
-}
-```
-
-## Argument Reference
-
-The following arguments are supported:
-
-* `region` - (Required) The region in which to obtain the V2 networking client.
- A networking client is needed to configure a routing entry on a router. If omitted, the
- `OS_REGION_NAME` environment variable is used. Changing this creates a new
- routing entry.
-
-* `router_id` - (Required) ID of the router this routing entry belongs to. Changing
- this creates a new routing entry.
-
-* `destination_cidr` - (Required) CIDR block to match on the packet’s destination IP. Changing
- this creates a new routing entry.
-
-* `next_hop` - (Required) IP address of the next hop gateway. Changing
- this creates a new routing entry.
-
-## Attributes Reference
-
-The following attributes are exported:
-
-* `region` - See Argument Reference above.
-* `router_id` - See Argument Reference above.
-* `destination_cidr` - See Argument Reference above.
-* `next_hop` - See Argument Reference above.
-
-## Notes
-
-The `next_hop` IP address must be directly reachable from the router at the ``openstack_networking_router_route_v2``
-resource creation time. You can ensure that by explicitly specifying a dependency on the ``openstack_networking_router_interface_v2``
-resource that connects the next hop to the router, as in the example above.
diff --git a/website/source/docs/providers/openstack/r/networking_router_v2.html.markdown b/website/source/docs/providers/openstack/r/networking_router_v2.html.markdown
deleted file mode 100644
index be760746f..000000000
--- a/website/source/docs/providers/openstack/r/networking_router_v2.html.markdown
+++ /dev/null
@@ -1,62 +0,0 @@
----
-layout: "openstack"
-page_title: "OpenStack: openstack_networking_router_v2"
-sidebar_current: "docs-openstack-resource-networking-router-v2"
-description: |-
- Manages a V2 router resource within OpenStack.
----
-
-# openstack\_networking\_router_v2
-
-Manages a V2 router resource within OpenStack.
-
-## Example Usage
-
-```hcl
-resource "openstack_networking_router_v2" "router_1" {
- name = "my_router"
- external_gateway = "f67f0d72-0ddf-11e4-9d95-e1f29f417e2f"
-}
-```
-
-## Argument Reference
-
-The following arguments are supported:
-
-* `region` - (Required) The region in which to obtain the V2 networking client.
- A networking client is needed to create a router. If omitted, the
- `OS_REGION_NAME` environment variable is used. Changing this creates a new
- router.
-
-* `name` - (Optional) A unique name for the router. Changing this
- updates the `name` of an existing router.
-
-* `admin_state_up` - (Optional) Administrative up/down status for the router
- (must be "true" or "false" if provided). Changing this updates the
- `admin_state_up` of an existing router.
-
-* `distributed` - (Optional) Indicates whether or not to create a
- distributed router. The default policy setting in Neutron restricts
- usage of this property to administrative users only.
-
-* `external_gateway` - (Optional) The network UUID of an external gateway for
- the router. A router with an external gateway is required if any compute
- instances or load balancers will be using floating IPs. Changing this
- updates the `external_gateway` of an existing router.
-
-* `tenant_id` - (Optional) The owner of the floating IP. Required if admin wants
- to create a router for another tenant. Changing this creates a new router.
-
-* `value_specs` - (Optional) Map of additional driver-specific options.
-
-## Attributes Reference
-
-The following attributes are exported:
-
-* `id` - ID of the router.
-* `region` - See Argument Reference above.
-* `name` - See Argument Reference above.
-* `admin_state_up` - See Argument Reference above.
-* `external_gateway` - See Argument Reference above.
-* `tenant_id` - See Argument Reference above.
-* `value_specs` - See Argument Reference above.
diff --git a/website/source/docs/providers/openstack/r/networking_secgroup_rule_v2.html.markdown b/website/source/docs/providers/openstack/r/networking_secgroup_rule_v2.html.markdown
deleted file mode 100644
index a0c181493..000000000
--- a/website/source/docs/providers/openstack/r/networking_secgroup_rule_v2.html.markdown
+++ /dev/null
@@ -1,116 +0,0 @@
----
-layout: "openstack"
-page_title: "OpenStack: openstack_networking_secgroup_rule_v2"
-sidebar_current: "docs-openstack-resource-networking-secgroup-rule-v2"
-description: |-
- Manages a V2 Neutron security group rule resource within OpenStack.
----
-
-# openstack\_networking\_secgroup\_rule_v2
-
-Manages a V2 neutron security group rule resource within OpenStack.
-Unlike Nova security groups, neutron separates the group from the rules
-and also allows an admin to target a specific tenant_id.
-
-## Example Usage
-
-```hcl
-resource "openstack_networking_secgroup_v2" "secgroup_1" {
- name = "secgroup_1"
- description = "My neutron security group"
-}
-
-resource "openstack_networking_secgroup_rule_v2" "secgroup_rule_1" {
- direction = "ingress"
- ethertype = "IPv4"
- protocol = "tcp"
- port_range_min = 22
- port_range_max = 22
- remote_ip_prefix = "0.0.0.0/0"
- security_group_id = "${openstack_networking_secgroup_v2.secgroup_1.id}"
-}
-```
-
-## Argument Reference
-
-The following arguments are supported:
-
-* `region` - (Required) The region in which to obtain the V2 networking client.
- A networking client is needed to create a port. If omitted, the
- `OS_REGION_NAME` environment variable is used. Changing this creates a new
- security group rule.
-
-* `direction` - (Required) The direction of the rule, valid values are __ingress__
- or __egress__. Changing this creates a new security group rule.
-
-* `ethertype` - (Required) The layer 3 protocol type, valid values are __IPv4__
- or __IPv6__. Changing this creates a new security group rule.
-
-* `protocol` - (Optional) The layer 4 protocol type, valid values are following. Changing this creates a new security group rule. This is required if you want to specify a port range.
- * __tcp__
- * __udp__
- * __icmp__
- * __ah__
- * __dccp__
- * __egp__
- * __esp__
- * __gre__
- * __igmp__
- * __ipv6-encap__
- * __ipv6-frag__
- * __ipv6-icmp__
- * __ipv6-nonxt__
- * __ipv6-opts__
- * __ipv6-route__
- * __ospf__
- * __pgm__
- * __rsvp__
- * __sctp__
- * __udplite__
- * __vrrp__
-
-* `port_range_min` - (Optional) The lower part of the allowed port range, valid
- integer value needs to be between 1 and 65535. Changing this creates a new
- security group rule.
-
-* `port_range_max` - (Optional) The higher part of the allowed port range, valid
- integer value needs to be between 1 and 65535. Changing this creates a new
- security group rule.
-
-* `remote_ip_prefix` - (Optional) The remote CIDR, the value needs to be a valid
- CIDR (i.e. 192.168.0.0/16). Changing this creates a new security group rule.
-
-* `remote_group_id` - (Optional) The remote group id, the value needs to be an
- Openstack ID of a security group in the same tenant. Changing this creates
- a new security group rule.
-
-* `security_group_id` - (Required) The security group id the rule should belong
- to, the value needs to be an Openstack ID of a security group in the same
- tenant. Changing this creates a new security group rule.
-
-* `tenant_id` - (Optional) The owner of the security group. Required if admin
- wants to create a port for another tenant. Changing this creates a new
- security group rule.
-
-## Attributes Reference
-
-The following attributes are exported:
-
-* `region` - See Argument Reference above.
-* `direction` - See Argument Reference above.
-* `ethertype` - See Argument Reference above.
-* `protocol` - See Argument Reference above.
-* `port_range_min` - See Argument Reference above.
-* `port_range_max` - See Argument Reference above.
-* `remote_ip_prefix` - See Argument Reference above.
-* `remote_group_id` - See Argument Reference above.
-* `security_group_id` - See Argument Reference above.
-* `tenant_id` - See Argument Reference above.
-
-## Import
-
-Security Group Rules can be imported using the `id`, e.g.
-
-```
-$ terraform import openstack_networking_secgroup_rule_v2.secgroup_rule_1 aeb68ee3-6e9d-4256-955c-9584a6212745
-```
diff --git a/website/source/docs/providers/openstack/r/networking_secgroup_v2.html.markdown b/website/source/docs/providers/openstack/r/networking_secgroup_v2.html.markdown
deleted file mode 100644
index 7271c4c11..000000000
--- a/website/source/docs/providers/openstack/r/networking_secgroup_v2.html.markdown
+++ /dev/null
@@ -1,88 +0,0 @@
----
-layout: "openstack"
-page_title: "OpenStack: openstack_networking_secgroup_v2"
-sidebar_current: "docs-openstack-resource-networking-secgroup-v2"
-description: |-
- Manages a V2 Neutron security group resource within OpenStack.
----
-
-# openstack\_networking\_secgroup_v2
-
-Manages a V2 neutron security group resource within OpenStack.
-Unlike Nova security groups, neutron separates the group from the rules
-and also allows an admin to target a specific tenant_id.
-
-## Example Usage
-
-```hcl
-resource "openstack_networking_secgroup_v2" "secgroup_1" {
- name = "secgroup_1"
- description = "My neutron security group"
-}
-```
-
-## Argument Reference
-
-The following arguments are supported:
-
-* `region` - (Required) The region in which to obtain the V2 networking client.
- A networking client is needed to create a port. If omitted, the
- `OS_REGION_NAME` environment variable is used. Changing this creates a new
- security group.
-
-* `name` - (Required) A unique name for the security group.
-
-* `description` - (Optional) A unique name for the security group.
-
-* `tenant_id` - (Optional) The owner of the security group. Required if admin
- wants to create a port for another tenant. Changing this creates a new
- security group.
-
-* `delete_default_rules` - (Optional) Whether or not to delete the default
- egress security rules. This is `false` by default. See the below note
- for more information.
-
-## Attributes Reference
-
-The following attributes are exported:
-
-* `region` - See Argument Reference above.
-* `name` - See Argument Reference above.
-* `description` - See Argument Reference above.
-* `tenant_id` - See Argument Reference above.
-
-## Default Security Group Rules
-
-In most cases, OpenStack will create some egress security group rules for each
-new security group. These security group rules will not be managed by
-Terraform, so if you prefer to have *all* aspects of your infrastructure
-managed by Terraform, set `delete_default_rules` to `true` and then create
-separate security group rules such as the following:
-
-```hcl
-resource "openstack_networking_secgroup_rule_v2" "secgroup_rule_v4" {
- direction = "egress"
- ethertype = "IPv4"
- security_group_id = "${openstack_networking_secgroup_v2.secgroup.id}"
-}
-
-resource "openstack_networking_secgroup_rule_v2" "secgroup_rule_v6" {
- direction = "egress"
- ethertype = "IPv6"
- security_group_id = "${openstack_networking_secgroup_v2.secgroup.id}"
-}
-```
-
-Please note that this behavior may differ depending on the configuration of
-the OpenStack cloud. The above illustrates the current default Neutron
-behavior. Some OpenStack clouds might provide additional rules and some might
-not provide any rules at all (in which case the `delete_default_rules` setting
-is moot).
-
-## Import
-
-Security Groups can be imported using the `id`, e.g.
-
-```
-$ terraform import openstack_networking_secgroup_v2.secgroup_1 38809219-5e8a-4852-9139-6f461c90e8bc
-```
diff --git a/website/source/docs/providers/openstack/r/networking_subnet_v2.html.markdown b/website/source/docs/providers/openstack/r/networking_subnet_v2.html.markdown
deleted file mode 100644
index 0d094591c..000000000
--- a/website/source/docs/providers/openstack/r/networking_subnet_v2.html.markdown
+++ /dev/null
@@ -1,112 +0,0 @@
----
-layout: "openstack"
-page_title: "OpenStack: openstack_networking_subnet_v2"
-sidebar_current: "docs-openstack-resource-networking-subnet-v2"
-description: |-
- Manages a V2 Neutron subnet resource within OpenStack.
----
-
-# openstack\_networking\_subnet_v2
-
-Manages a V2 Neutron subnet resource within OpenStack.
-
-## Example Usage
-
-```hcl
-resource "openstack_networking_network_v2" "network_1" {
- name = "tf_test_network"
- admin_state_up = "true"
-}
-
-resource "openstack_networking_subnet_v2" "subnet_1" {
- network_id = "${openstack_networking_network_v2.network_1.id}"
- cidr = "192.168.199.0/24"
-}
-```
-
-## Argument Reference
-
-The following arguments are supported:
-
-* `region` - (Required) The region in which to obtain the V2 Networking client.
- A Networking client is needed to create a Neutron subnet. If omitted, the
- `OS_REGION_NAME` environment variable is used. Changing this creates a new
- subnet.
-
-* `network_id` - (Required) The UUID of the parent network. Changing this
- creates a new subnet.
-
-* `cidr` - (Required) CIDR representing IP range for this subnet, based on IP
- version. Changing this creates a new subnet.
-
-* `ip_version` - (Optional) IP version, either 4 (default) or 6. Changing this creates a
- new subnet.
-
-* `name` - (Optional) The name of the subnet. Changing this updates the name of
- the existing subnet.
-
-* `tenant_id` - (Optional) The owner of the subnet. Required if admin wants to
- create a subnet for another tenant. Changing this creates a new subnet.
-
-* `allocation_pools` - (Optional) An array of sub-ranges of CIDR available for
- dynamic allocation to ports. The allocation_pool object structure is
- documented below. Changing this creates a new subnet.
-
-* `gateway_ip` - (Optional) Default gateway used by devices in this subnet.
- Leaving this blank and not setting `no_gateway` will cause a default
- gateway of `.1` to be used. Changing this updates the gateway IP of the
- existing subnet.
-
-* `no_gateway` - (Optional) Do not set a gateway IP on this subnet. Changing
- this removes or adds a default gateway IP of the existing subnet.
-
-* `enable_dhcp` - (Optional) The administrative state of the network.
- Acceptable values are "true" and "false". Changing this value enables or
- disables the DHCP capabilities of the existing subnet. Defaults to true.
-
-* `dns_nameservers` - (Optional) An array of DNS name server names used by hosts
- in this subnet. Changing this updates the DNS name servers for the existing
- subnet.
-
-* `host_routes` - (Optional) An array of routes that should be used by devices
- with IPs from this subnet (not including local subnet route). The host_route
- object structure is documented below. Changing this updates the host routes
- for the existing subnet.
-
-* `value_specs` - (Optional) Map of additional options.
-
-The `allocation_pools` block supports:
-
-* `start` - (Required) The starting address.
-
-* `end` - (Required) The ending address.
-
-The `host_routes` block supports:
-
-* `destination_cidr` - (Required) The destination CIDR.
-
-* `next_hop` - (Required) The next hop in the route.
-
-## Attributes Reference
-
-The following attributes are exported:
-
-* `region` - See Argument Reference above.
-* `network_id` - See Argument Reference above.
-* `cidr` - See Argument Reference above.
-* `ip_version` - See Argument Reference above.
-* `name` - See Argument Reference above.
-* `tenant_id` - See Argument Reference above.
-* `allocation_pools` - See Argument Reference above.
-* `gateway_ip` - See Argument Reference above.
-* `enable_dhcp` - See Argument Reference above.
-* `dns_nameservers` - See Argument Reference above.
-* `host_routes` - See Argument Reference above.
-
-## Import
-
-Subnets can be imported using the `id`, e.g.
-
-```
-$ terraform import openstack_networking_subnet_v2.subnet_1 da4faf16-5546-41e4-8330-4d0002b74048
-```
diff --git a/website/source/docs/providers/openstack/r/objectstorage_container_v1.html.markdown b/website/source/docs/providers/openstack/r/objectstorage_container_v1.html.markdown
deleted file mode 100644
index 21ca25c6a..000000000
--- a/website/source/docs/providers/openstack/r/objectstorage_container_v1.html.markdown
+++ /dev/null
@@ -1,70 +0,0 @@
----
-layout: "openstack"
-page_title: "OpenStack: openstack_objectstorage_container_v1"
-sidebar_current: "docs-openstack-resource-objectstorage-container-v1"
-description: |-
- Manages a V1 container resource within OpenStack.
----
-
-# openstack\_objectstorage\_container_v1
-
-Manages a V1 container resource within OpenStack.
-
-## Example Usage
-
-```hcl
-resource "openstack_objectstorage_container_v1" "container_1" {
- region = "RegionOne"
- name = "tf-test-container-1"
-
- metadata {
- test = "true"
- }
-
- content_type = "application/json"
-}
-```
-
-## Argument Reference
-
-The following arguments are supported:
-
-* `region` - (Required) The region in which to create the container. If
- omitted, the `OS_REGION_NAME` environment variable is used. Changing this
- creates a new container.
-
-* `name` - (Required) A unique name for the container. Changing this creates a
- new container.
-
-* `container_read` - (Optional) Sets an access control list (ACL) that grants
- read access. This header can contain a comma-delimited list of users that
- can read the container (allows the GET method for all objects in the
- container). Changing this updates the access control list read access.
-
-* `container_sync_to` - (Optional) The destination for container synchronization.
- Changing this updates container synchronization.
-
-* `container_sync_key` - (Optional) The secret key for container synchronization.
- Changing this updates container synchronization.
-
-* `container_write` - (Optional) Sets an ACL that grants write access.
- Changing this updates the access control list write access.
-
-* `metadata` - (Optional) Custom key/value pairs to associate with the container.
- Changing this updates the existing container metadata.
-
-* `content_type` - (Optional) The MIME type for the container. Changing this
- updates the MIME type.
-
-## Attributes Reference
-
-The following attributes are exported:
-
-* `region` - See Argument Reference above.
-* `name` - See Argument Reference above.
-* `container_read` - See Argument Reference above.
-* `container_sync_to` - See Argument Reference above.
-* `container_sync_key` - See Argument Reference above.
-* `container_write` - See Argument Reference above.
-* `metadata` - See Argument Reference above.
-* `content_type` - See Argument Reference above.
diff --git a/website/source/docs/providers/opsgenie/d/user.html.markdown b/website/source/docs/providers/opsgenie/d/user.html.markdown
deleted file mode 100644
index 253a10df3..000000000
--- a/website/source/docs/providers/opsgenie/d/user.html.markdown
+++ /dev/null
@@ -1,38 +0,0 @@
----
-layout: "opsgenie"
-page_title: "OpsGenie: opsgenie_user"
-sidebar_current: "docs-opsgenie-datasource-user"
-description: |-
- Gets information about a specific user within OpsGenie
----
-
-# opsgenie\_user
-
-Use this data source to get information about a specific user within OpsGenie.
-
-## Example Usage
-
-```hcl
-data "opsgenie_user" "cookie_monster" {
- username = "me@cookie-monster.com"
-}
-
-resource "opsgenie_team" "test" {
- name = "cookieeaters"
-
- member {
- username = "${data.opsgenie_user.cookie_monster.username}"
- role = "${data.opsgenie_user.cookie_monster.role}"
- }
-}
-```
-
-## Argument Reference
-
-The following arguments are supported:
-
-* `username` - (Required) The username (email) to use to find a user in OpsGenie.
-
-## Attributes Reference
-* `full_name` - The full name of the found user.
-* `role` - The role of the found user.
diff --git a/website/source/docs/providers/opsgenie/index.html.markdown b/website/source/docs/providers/opsgenie/index.html.markdown
deleted file mode 100644
index 526764fc0..000000000
--- a/website/source/docs/providers/opsgenie/index.html.markdown
+++ /dev/null
@@ -1,45 +0,0 @@
----
-layout: "opsgenie"
-page_title: "Provider: OpsGenie"
-sidebar_current: "docs-opsgenie-index"
-description: |-
- The OpsGenie provider is used to interact with the many resources supported by OpsGenie. The provider needs to be configured with the proper credentials before it can be used.
----
-
-# OpsGenie Provider
-
-The OpsGenie provider is used to interact with the
-many resources supported by OpsGenie. The provider needs to be configured
-with the proper credentials before it can be used.
-
-Use the navigation to the left to read about the available resources.
-
-## Example Usage
-
-```hcl
-# Configure the OpenStack Provider
-provider "opsgenie" {
- api_key = "key"
-}
-
-# Create a user
-resource "opsgenie_user" "test" {
- # ...
-}
-```
-
-## Configuration Reference
-
-The following arguments are supported:
-
-* `api_key` - (Required) The API Key for the OpsGenie Integration. If omitted, the
- `OPSGENIE_API_KEY` environment variable is used.
-
-You can generate an API Key within OpsGenie by creating a new API Integration with Read/Write permissions.
-
-## Testing and Development
-
-In order to run the Acceptance Tests for development, the following environment
-variables must also be set:
-
-* `OPSGENIE_API_KEY` - The API Key used for the OpsGenie Integration.
diff --git a/website/source/docs/providers/opsgenie/r/team.html.markdown b/website/source/docs/providers/opsgenie/r/team.html.markdown
deleted file mode 100644
index 3991ee680..000000000
--- a/website/source/docs/providers/opsgenie/r/team.html.markdown
+++ /dev/null
@@ -1,71 +0,0 @@
----
-layout: "opsgenie"
-page_title: "OpsGenie: opsgenie_team"
-sidebar_current: "docs-opsgenie-resource-team"
-description: |-
- Manages a Team within OpsGenie.
----
-
-# opsgenie\_team
-
-Manages a Team within OpsGenie.
-
-## Example Usage
-
-```hcl
-resource "opsgenie_user" "first" {
- username = "user@domain.com"
- full_name = "Cookie Monster"
- role = "User"
-}
-
-resource "opsgenie_user" "second" {
- username = "eggman@dr-robotnik.com"
- full_name = "Dr Ivo Eggman Robotnik"
- role = "User"
-}
-
-resource "opsgenie_team" "test" {
- name = "example"
- description = "This team deals with all the things"
-
- member {
- username = "${opsgenie_user.first.username}"
- role = "admin"
- }
-
- member {
- username = "${opsgenie_user.second.username}"
- role = "user"
- }
-}
-```
-
-## Argument Reference
-
-The following arguments are supported:
-
-* `name` - (Required) The name associated with this team. OpsGenie defines that this must not be longer than 100 characters.
-
-* `description` - (Optional) A description for this team.
-
-* `member` - (Optional) A Member block as documented below.
-
-`member` supports the following:
-
-* `username` - (Required) The username for the member to add to this Team.
-* `role` - (Required) The role for the user within the Team - can be either 'Admin' or 'User'.
-
-## Attributes Reference
-
-The following attributes are exported:
-
-* `id` - The ID of the OpsGenie User.
-
-## Import
-
-Users can be imported using the `id`, e.g.
-
-```
-$ terraform import opsgenie_team.team1 812be1a1-32c8-4666-a7fb-03ecc385106c
-```
diff --git a/website/source/docs/providers/opsgenie/r/user.html.markdown b/website/source/docs/providers/opsgenie/r/user.html.markdown
deleted file mode 100644
index 88cb904bb..000000000
--- a/website/source/docs/providers/opsgenie/r/user.html.markdown
+++ /dev/null
@@ -1,51 +0,0 @@
----
-layout: "opsgenie"
-page_title: "OpsGenie: opsgenie_user"
-sidebar_current: "docs-opsgenie-resource-user"
-description: |-
- Manages a User within OpsGenie.
----
-
-# opsgenie\_user
-
-Manages a User within OpsGenie.
-
-## Example Usage
-
-```hcl
-resource "opsgenie_user" "test" {
- username = "user@domain.com"
- full_name = "Cookie Monster"
- role = "User"
- locale = "en_US"
- timezone = "America/New_York"
-}
-```
-
-## Argument Reference
-
-The following arguments are supported:
-
-* `username` - (Required) The email address associated with this user. OpsGenie defines that this must not be longer than 100 characters.
-
-* `full_name` - (Required) The Full Name of the User.
-
-* `role` - (Required) The Role assigned to the User. Either a built-in such as 'Owner', 'Admin' or 'User' - or the name of a custom role.
-
-* `locale` - (Optional) Location information for the user. Please look at [Supported Locale Ids](https://www.opsgenie.com/docs/miscellaneous/supported-locales) for available locales - Defaults to "en_US".
-
-* `timezone` - (Optional) Timezone information of the user. Please look at [Supported Timezone Ids](https://www.opsgenie.com/docs/miscellaneous/supported-timezone-ids) for available timezones - Defaults to "America/New_York".
-
-## Attributes Reference
-
-The following attributes are exported:
-
-* `id` - The ID of the OpsGenie User.
-
-## Import
-
-Users can be imported using the `id`, e.g.
-
-```
-$ terraform import opsgenie_user.user da4faf16-5546-41e4-8330-4d0002b74048
-```
diff --git a/website/source/docs/providers/ovh/d/publiccloud_region.html.markdown b/website/source/docs/providers/ovh/d/publiccloud_region.html.markdown
deleted file mode 100644
index b9d2b7b7d..000000000
--- a/website/source/docs/providers/ovh/d/publiccloud_region.html.markdown
+++ /dev/null
@@ -1,43 +0,0 @@
----
-layout: "ovh"
-page_title: "OVH: publiccloud_region"
-sidebar_current: "docs-ovh-datasource-publiccloud-region"
-description: |-
- Get information & status of a region associated with a public cloud project.
----
-
-# publiccloud\_region
-
-Use this data source to retrieve information about a region associated with a
-public cloud project. The region must be associated with the project.
-
-## Example Usage
-
-```hcl
-data "ovh_publiccloud_region" "GRA1" {
- project_id = "XXXXXX"
- region = "GRA1"
-}
-```
-
-## Argument Reference
-
-
-* `project_id` - (Required) The id of the public cloud project. If omitted,
- the `OVH_PROJECT_ID` environment variable is used.
-
-* `region` - (Required) The name of the region associated with the public cloud
-project.
-
-## Attributes Reference
-
-`id` is set to the ID of the project concatenated with the name of the region.
-In addition, the following attributes are exported:
-
-* `continentCode` - the code of the geographic continent the region is running.
-E.g.: EU for Europe, US for America...
-* `datacenterLocation` - The location code of the datacenter.
-E.g.: "GRA", meaning Gravelines, for region "GRA1"
-* `services` - The list of public cloud services running within the region
- * `name` - the name of the public cloud service
- * `status` - the status of the service
diff --git a/website/source/docs/providers/ovh/d/publiccloud_regions.html.markdown b/website/source/docs/providers/ovh/d/publiccloud_regions.html.markdown
deleted file mode 100644
index ca5f722f2..000000000
--- a/website/source/docs/providers/ovh/d/publiccloud_regions.html.markdown
+++ /dev/null
@@ -1,32 +0,0 @@
----
-layout: "ovh"
-page_title: "OVH: publiccloud_regions"
-sidebar_current: "docs-ovh-datasource-publiccloud-regions"
-description: |-
- Get the list of regions associated with a public cloud project.
----
-
-# publiccloud\_regions
-
-Use this data source to get the regions of a public cloud project.
-
-## Example Usage
-
-```hcl
-data "ovh_publiccloud_regions" "regions" {
- project_id = "XXXXXX"
-}
-```
-
-## Argument Reference
-
-
-* `project_id` - (Required) The id of the public cloud project. If omitted,
- the `OVH_PROJECT_ID` environment variable is used.
-
-## Attributes Reference
-
-`id` is set to the ID of the project. In addition, the following attributes
-are exported:
-
-* `regions` - The list of regions associated with the project
diff --git a/website/source/docs/providers/ovh/index.html.markdown b/website/source/docs/providers/ovh/index.html.markdown
deleted file mode 100644
index ad1a949bd..000000000
--- a/website/source/docs/providers/ovh/index.html.markdown
+++ /dev/null
@@ -1,62 +0,0 @@
----
-layout: "ovh"
-page_title: "Provider: OVH"
-sidebar_current: "docs-ovh-index"
-description: |-
- The OVH provider is used to interact with the many resources supported by OVH. The provider needs to be configured with the proper credentials before it can be used.
----
-
-# OVH Provider
-
-The OVH provider is used to interact with the
-many resources supported by OVH. The provider needs to be configured
-with the proper credentials before it can be used.
-
-Use the navigation to the left to read about the available resources.
-
-## Example Usage
-
-```
-# Configure the OVH Provider
-provider "ovh" {
- endpoint = "ovh-eu"
- application_key = "yyyyyy"
- application_secret = "xxxxxxxxxxxxxx"
- consumer_key = "zzzzzzzzzzzzzz"
-}
-
-# Create a public cloud user
-resource "ovh_publiccloud_user" "user-test" {
- # ...
-}
-```
-
-## Configuration Reference
-
-The following arguments are supported:
-
-* `endpoint` - (Required) Specify which API endpoint to use.
- It can be set using the OVH_ENDPOINT environment
- variable. Value can be set to either "ovh-eu" or "ovh-ca".
-
-* `application_key` - (Required) The API Application Key. If omitted,
- the `OVH_APPLICATION_KEY` environment variable is used.
-
-* `application_secret` - (Required) The API Application Secret. If omitted,
- the `OVH_APPLICATION_SECRET` environment variable is used.
-
-* `consumer_key` - (Required) The API Consumer key. If omitted,
- the `OVH_CONSUMER_KEY` environment variable is used.
-
-
-## Testing and Development
-
-In order to run the Acceptance Tests for development, the following environment
-variables must also be set:
-
-* `OVH_VRACK` - The id of the vrack to use.
-
-* `OVH_PUBLIC_CLOUD` - The id of the public cloud project.
-
-You should be able to use any OVH environment to develop on as long as the
-above environment variables are set.
diff --git a/website/source/docs/providers/ovh/r/publiccloud_private_network.html.markdown b/website/source/docs/providers/ovh/r/publiccloud_private_network.html.markdown
deleted file mode 100644
index 1edf2f0f5..000000000
--- a/website/source/docs/providers/ovh/r/publiccloud_private_network.html.markdown
+++ /dev/null
@@ -1,50 +0,0 @@
----
-layout: "ovh"
-page_title: "OVH: publiccloud_private_network"
-sidebar_current: "docs-ovh-resource-publiccloud-private-network"
-description: |-
- Creates a private network in a public cloud project.
----
-
-# ovh_publiccloud\_private_network
-
-Creates a private network in a public cloud project.
-
-## Example Usage
-
-```
-resource "ovh_publiccloud_private_network" "net" {
- project_id = "67890"
- name = "admin_network"
- regions = ["GRA1", "BHS1"]
-}
-```
-
-## Argument Reference
-
-The following arguments are supported:
-
-* `project_id` - (Required) The id of the public cloud project. If omitted,
- the `OVH_PROJECT_ID` environment variable is used.
-
-* `name` - (Required) The name of the network.
-
-* `vlan_id` - a vlan id to associate with the network.
- Changing this value recreates the resource. Defaults to 0.
-
-* `regions` - an array of valid OVH public cloud region ID in which the network
- will be available. Ex.: "GRA1". Defaults to all public cloud regions.
-
-## Attributes Reference
-
-The following attributes are exported:
-
-* `project_id` - See Argument Reference above.
-* `name` - See Argument Reference above.
-* `vland_id` - See Argument Reference above.
-* `regions` - See Argument Reference above.
-* `regions_status` - A map representing the status of the network per region.
-* `regions_status/region` - The id of the region.
-* `regions_status/status` - The status of the network in the region.
-* `status` - the status of the network. should be normally set to 'ACTIVE'.
-* `type` - the type of the network. Either 'private' or 'public'.
diff --git a/website/source/docs/providers/ovh/r/publiccloud_private_network_subnet.html.markdown b/website/source/docs/providers/ovh/r/publiccloud_private_network_subnet.html.markdown
deleted file mode 100644
index 194fec489..000000000
--- a/website/source/docs/providers/ovh/r/publiccloud_private_network_subnet.html.markdown
+++ /dev/null
@@ -1,76 +0,0 @@
----
-layout: "ovh"
-page_title: "OVH: publiccloud_private_network_subnet"
-sidebar_current: "docs-ovh-resource-publiccloud-private-network-subnet"
-description: |-
- Creates a subnet in a private network of a public cloud project.
----
-
-# ovh_publiccloud\_private_network\_subnet
-
-Creates a subnet in a private network of a public cloud project.
-
-## Example Usage
-
-```
-resource "ovh_publiccloud_private_network_subnet" "subnet" {
- project_id = "67890"
- network_id = "0234543"
- region = "GRA1"
- start = "192.168.168.100"
- end = "192.168.168.200"
- network = "192.168.168.0/24"
- dhcp = true
- no_gateway = false
-}
-```
-
-## Argument Reference
-
-The following arguments are supported:
-
-* `project_id` - (Required) The id of the public cloud project. If omitted,
- the `OVH_PROJECT_ID` environment variable is used.
-Changing this forces a new resource to be created.
-
-* `network_id` - (Required) The id of the network.
- Changing this forces a new resource to be created.
-
-* `dhcp` - (Optional) Enable DHCP.
- Changing this forces a new resource to be created. Defaults to false.
-_
-* `start` - (Required) First ip for this region.
- Changing this value recreates the subnet.
-
-* `end` - (Required) Last ip for this region.
- Changing this value recreates the subnet.
-
-* `network` - (Required) Global network in CIDR format.
- Changing this value recreates the subnet
-
-* `region` - The region in which the network subnet will be created.
- Ex.: "GRA1". Changing this value recreates the resource.
-
-* `no_gateway` - Set to true if you don't want to set a default gateway IP.
- Changing this value recreates the resource. Defaults to false.
-
-## Attributes Reference
-
-The following attributes are exported:
-
-* `project_id` - See Argument Reference above.
-* `network_id` - See Argument Reference above.
-* `dhcp_id` - See Argument Reference above.
-* `start` - See Argument Reference above.
-* `end` - See Argument Reference above.
-* `network` - See Argument Reference above.
-* `region` - See Argument Reference above.
-* `no_gateway` - See Argument Reference above.
-* `cidr` - Ip Block representing the subnet cidr.
-* `ip_pools` - List of ip pools allocated in the subnet.
-* `ip_pools/network` - Global network with cidr.
-* `ip_pools/region` - Region where this subnet is created.
-* `ip_pools/dhcp` - DHCP enabled.
-* `ip_pools/end` - Last ip for this region.
-* `ip_pools/start` - First ip for this region.
-
diff --git a/website/source/docs/providers/ovh/r/publiccloud_user.html.markdown b/website/source/docs/providers/ovh/r/publiccloud_user.html.markdown
deleted file mode 100644
index 3ad7728b9..000000000
--- a/website/source/docs/providers/ovh/r/publiccloud_user.html.markdown
+++ /dev/null
@@ -1,44 +0,0 @@
----
-layout: "ovh"
-page_title: "OVH: publiccloud_user"
-sidebar_current: "docs-ovh-resource-publiccloud-user"
-description: |-
- Creates a user in a public cloud project.
----
-
-# ovh_publiccloud\_user
-
-Creates a user in a public cloud project.
-
-## Example Usage
-
-```
-resource "ovh_publiccloud_user" "user1" {
- project_id = "67890"
-}
-```
-
-## Argument Reference
-
-The following arguments are supported:
-
-* `project_id` - (Required) The id of the public cloud project. If omitted,
- the `OVH_PROJECT_ID` environment variable is used.
-
-* `description` - A description associated with the user.
-
-## Attributes Reference
-
-The following attributes are exported:
-
-* `project_id` - See Argument Reference above.
-* `description` - See Argument Reference above.
-* `username` - the username generated for the user. This username can be used with
- the Openstack API.
-* `password` - (Sensitive) the password generated for the user. The password can
- be used with the Openstack API. This attribute is sensitive and will only be
- retrieve once during creation.
-* `status` - the status of the user. should be normally set to 'ok'.
-* `creation_date` - the date the user was created.
-* `openstack_rc` - a convenient map representing an openstack_rc file.
- Note: no password nor sensitive token is set in this map.
diff --git a/website/source/docs/providers/ovh/r/vrack_publiccloud_attachment.html.markdown b/website/source/docs/providers/ovh/r/vrack_publiccloud_attachment.html.markdown
deleted file mode 100644
index 0d6830263..000000000
--- a/website/source/docs/providers/ovh/r/vrack_publiccloud_attachment.html.markdown
+++ /dev/null
@@ -1,46 +0,0 @@
----
-layout: "ovh"
-page_title: "OVH: vrack_publiccloud_attachment"
-sidebar_current: "docs-ovh-resource-vrack-publiccloud-attachment"
-description: |-
- Attach an existing PublicCloud project to an existing VRack.
----
-
-# ovh_vrack\_publiccloud\_attachment
-
-Attach an existing PublicCloud project to an existing VRack.
-
-## Example Usage
-
-```
-resource "ovh_vrack_publiccloud_attachment" "attach" {
- vrack_id = "12345"
- project_id = "67890"
-}
-```
-
-## Argument Reference
-
-The following arguments are supported:
-
-* `vrack_id` - (Required) The id of the vrack. If omitted, the `OVH_VRACK_ID`
- environment variable is used.
-
-* `project_id` - (Required) The id of the public cloud project. If omitted,
- the `OVH_VRACK_ID` environment variable is used.
-
-## Attributes Reference
-
-The following attributes are exported:
-
-* `vrack_id` - See Argument Reference above.
-* `project_id` - See Argument Reference above.
-
-## Notes
-
-The vrack attachment isn't a proper resource with an ID. As such, the resource id will
-be forged from the vrack and project ids and there's no correct way to import the
-resource in terraform. When the resource is created by terraform, it first checks if the
-attachment already exists within OVH infrastructure; if it exists it set the resource id
-without modifying anything. Otherwise, it will try to attach the vrack with the public
-cloud project.
diff --git a/website/source/docs/providers/packet/index.html.markdown b/website/source/docs/providers/packet/index.html.markdown
deleted file mode 100644
index c29833542..000000000
--- a/website/source/docs/providers/packet/index.html.markdown
+++ /dev/null
@@ -1,46 +0,0 @@
----
-layout: "packet"
-page_title: "Provider: Packet"
-sidebar_current: "docs-packet-index"
-description: |-
- The Packet provider is used to interact with the resources supported by Packet. The provider needs to be configured with the proper credentials before it can be used.
----
-
-# Packet Provider
-
-The Packet provider is used to interact with the resources supported by Packet.
-The provider needs to be configured with the proper credentials before it can be used.
-
-Use the navigation to the left to read about the available resources.
-
-## Example Usage
-
-```hcl
-# Configure the Packet Provider
-provider "packet" {
- auth_token = "${var.auth_token}"
-}
-
-# Create a project
-resource "packet_project" "cool_project" {
- name = "My First Terraform Project"
- payment_method = "PAYMENT_METHOD_ID" # Only required for a non-default payment method
-}
-
-# Create a device and add it to tf_project_1
-resource "packet_device" "web1" {
- hostname = "tf.coreos2"
- plan = "baremetal_1"
- facility = "ewr1"
- operating_system = "coreos_stable"
- billing_cycle = "hourly"
- project_id = "${packet_project.cool_project.id}"
-}
-```
-
-## Argument Reference
-
-The following arguments are supported:
-
-* `auth_token` - (Required) This is your Packet API Auth token. This can also be specified
- with the `PACKET_AUTH_TOKEN` shell environment variable.
diff --git a/website/source/docs/providers/packet/r/device.html.markdown b/website/source/docs/providers/packet/r/device.html.markdown
deleted file mode 100644
index 5b8cb1615..000000000
--- a/website/source/docs/providers/packet/r/device.html.markdown
+++ /dev/null
@@ -1,55 +0,0 @@
----
-layout: "packet"
-page_title: "Packet: packet_device"
-sidebar_current: "docs-packet-resource-device"
-description: |-
- Provides a Packet device resource. This can be used to create, modify, and delete devices.
----
-
-# packet\_device
-
-Provides a Packet device resource. This can be used to create,
-modify, and delete devices.
-
-## Example Usage
-
-```hcl
-# Create a device and add it to cool_project
-resource "packet_device" "web1" {
- hostname = "tf.coreos2"
- plan = "baremetal_1"
- facility = "ewr1"
- operating_system = "coreos_stable"
- billing_cycle = "hourly"
- project_id = "${packet_project.cool_project.id}"
-}
-```
-
-## Argument Reference
-
-The following arguments are supported:
-
-* `hostname` - (Required) The device name
-* `project_id` - (Required) The id of the project in which to create the device
-* `operating_system` - (Required) The operating system slug
-* `facility` - (Required) The facility in which to create the device
-* `plan` - (Required) The hardware config slug
-* `billing_cycle` - (Required) monthly or hourly
-* `user_data` (Optional) - A string of the desired User Data for the device.
-
-## Attributes Reference
-
-The following attributes are exported:
-
-* `id` - The ID of the device
-* `hostname`- The hostname of the device
-* `project_id`- The ID of the project the device belongs to
-* `facility` - The facility the device is in
-* `plan` - The hardware config of the device
-* `network` - The private and public v4 and v6 IPs assigned to the device
-* `locked` - Whether the device is locked
-* `billing_cycle` - The billing cycle of the device (monthly or hourly)
-* `operating_system` - The operating system running on the device
-* `status` - The status of the device
-* `created` - The timestamp for when the device was created
-* `updated` - The timestamp for the last time the device was updated
diff --git a/website/source/docs/providers/packet/r/project.html.markdown b/website/source/docs/providers/packet/r/project.html.markdown
deleted file mode 100644
index f7ee09db3..000000000
--- a/website/source/docs/providers/packet/r/project.html.markdown
+++ /dev/null
@@ -1,40 +0,0 @@
----
-layout: "packet"
-page_title: "Packet: packet_project"
-sidebar_current: "docs-packet-resource-project"
-description: |-
- Provides a Packet Project resource.
----
-
-# packet\_project
-
-Provides a Packet Project resource to allow you manage devices
-in your projects.
-
-## Example Usage
-
-```hcl
-# Create a new Project
-resource "packet_project" "tf_project_1" {
- name = "Terraform Fun"
- payment_method = "payment-method-id"
-}
-```
-
-## Argument Reference
-
-The following arguments are supported:
-
-* `name` - (Required) The name of the Project on Packet.net
-* `payment_method` - (Optional) The unique ID of the payment method on file to use for services created
-in this project. If not given, the project will use the default payment method for your user.
-
-## Attributes Reference
-
-The following attributes are exported:
-
-* `id` - The unique ID of the project
-* `payment_method` - The unique ID of the payment method on file to use for services created
-in this project.
-* `created` - The timestamp for when the Project was created
-* `updated` - The timestamp for the last time the Project was updated
diff --git a/website/source/docs/providers/packet/r/ssh_key.html.markdown b/website/source/docs/providers/packet/r/ssh_key.html.markdown
deleted file mode 100644
index 7eaaa042a..000000000
--- a/website/source/docs/providers/packet/r/ssh_key.html.markdown
+++ /dev/null
@@ -1,43 +0,0 @@
----
-layout: "packet"
-page_title: "Packet: packet_ssh_key"
-sidebar_current: "docs-packet-resource-ssh-key"
-description: |-
- Provides a Packet SSH key resource.
----
-
-# packet\_ssh_key
-
-Provides a Packet SSH key resource to allow you manage SSH
-keys on your account. All SSH keys on your account are loaded on
-all new devices, they do not have to be explicitly declared on
-device creation.
-
-## Example Usage
-
-```hcl
-# Create a new SSH key
-resource "packet_ssh_key" "key1" {
- name = "terraform-1"
- public_key = "${file("/home/terraform/.ssh/id_rsa.pub")}"
-}
-```
-
-## Argument Reference
-
-The following arguments are supported:
-
-* `name` - (Required) The name of the SSH key for identification
-* `public_key` - (Required) The public key. If this is a file, it
-can be read using the file interpolation function
-
-## Attributes Reference
-
-The following attributes are exported:
-
-* `id` - The unique ID of the key
-* `name` - The name of the SSH key
-* `public_key` - The text of the public key
-* `fingerprint` - The fingerprint of the SSH key
-* `created` - The timestamp for when the SSH key was created
-* `updated` - The timestamp for the last time the SSH key was updated
diff --git a/website/source/docs/providers/packet/r/volume.html.markdown b/website/source/docs/providers/packet/r/volume.html.markdown
deleted file mode 100644
index 3ea4a292f..000000000
--- a/website/source/docs/providers/packet/r/volume.html.markdown
+++ /dev/null
@@ -1,70 +0,0 @@
----
-layout: "packet"
-page_title: "Packet: packet_volume"
-sidebar_current: "docs-packet-resource-volume"
-description: |-
- Provides a Packet Block Storage Volume Resource.
----
-
-# packet\_volume
-
-Provides a Packet Block Storage Volume resource to allow you to
-manage block volumes on your account.
-Once created by Terraform, they must then be attached and mounted
-using the api and `packet_block_attach` and `packet_block_detach`
-scripts.
-
-## Example Usage
-
-```hcl
-# Create a new block volume
-resource "packet_volume" "volume1" {
- description = "terraform-volume-1"
- facility = "ewr1"
- project_id = "${packet_project.cool_project.id}"
- plan = "storage_1"
- size = 100
- billing_cycle = "hourly"
-
- snapshot_policies = {
- snapshot_frequency = "1day"
-
- snapshot_count = 7
- }
-
- snapshot_policies = {
- snapshot_frequency = "1month"
-
- snapshot_count = 6
- }
-}
-```
-
-## Argument Reference
-
-The following arguments are supported:
-
-* `plan` - (Required) The service plan slug of the volume
-* `facility` - (Required) The facility to create the volume in
-* `project_id` - (Required) The packet project ID to deploy the volume in
-* `size` - (Required) The size in GB to make the volume
-* `billing_cycle` - The billing cycle, defaults to "hourly"
-* `description` - Optional description for the volume
-* `snapshot_policies` - Optional list of snapshot policies
-
-## Attributes Reference
-
-The following attributes are exported:
-
-* `id` - The unique ID of the volume
-* `name` - The name of the volume
-* `description` - The description of the volume
-* `size` - The size in GB of the volume
-* `plan` - Performance plan the volume is on
-* `billing_cycle` - The billing cycle, defaults to hourly
-* `facility` - The facility slug the volume resides in
-* `state` - The state of the volume
-* `locked` - Whether the volume is locked or not
-* `project_id ` - The project id the volume is in
-* `created` - The timestamp for when the volume was created
-* `updated` - The timestamp for the last time the volume was updated
diff --git a/website/source/docs/providers/pagerduty/d/escalation_policy.html.markdown b/website/source/docs/providers/pagerduty/d/escalation_policy.html.markdown
deleted file mode 100644
index 916ed72c8..000000000
--- a/website/source/docs/providers/pagerduty/d/escalation_policy.html.markdown
+++ /dev/null
@@ -1,39 +0,0 @@
----
-layout: "pagerduty"
-page_title: "PagerDuty: pagerduty_escalation_policy"
-sidebar_current: "docs-pagerduty-datasource-escalation-policy"
-description: |-
- Provides information about a Escalation Policy.
-
- This data source can be helpful when an escalation policy is handled outside Terraform but still want to reference it in other resources.
----
-
-# pagerduty\_escalation_policy
-
-Use this data source to get information about a specific [escalation policy][1] that you can use for other PagerDuty resources.
-
-## Example Usage
-
-```hcl
-data "pagerduty_escalation_policy" "test" {
- name = "Engineering Escalation Policy"
-}
-
-resource "pagerduty_service" "test" {
- name = "My Web App"
- auto_resolve_timeout = 14400
- acknowledgement_timeout = 600
- escalation_policy = "${data.pagerduty_escalation_policy.test.id}"
-}
-```
-
-## Argument Reference
-
-The following arguments are supported:
-
-* `name` - (Required) The name to use to find an escalation policy in the PagerDuty API.
-
-## Attributes Reference
-* `name` - The short name of the found escalation policy.
-
-[1]: https://v2.developer.pagerduty.com/v2/page/api-reference#!/Escalation_Policies/get_escalation_policies
diff --git a/website/source/docs/providers/pagerduty/d/schedule.html.markdown b/website/source/docs/providers/pagerduty/d/schedule.html.markdown
deleted file mode 100644
index 842e0f7d4..000000000
--- a/website/source/docs/providers/pagerduty/d/schedule.html.markdown
+++ /dev/null
@@ -1,46 +0,0 @@
----
-layout: "pagerduty"
-page_title: "PagerDuty: pagerduty_schedule"
-sidebar_current: "docs-pagerduty-datasource-schedule"
-description: |-
- Provides information about a Schedule.
-
- This data source can be helpful when a schedule is handled outside Terraform but still want to reference it in other resources.
----
-
-# pagerduty\_schedule
-
-Use this data source to get information about a specific [schedule][1] that you can use for other PagerDuty resources.
-
-## Example Usage
-
-```hcl
-data "pagerduty_schedule" "test" {
- name = "Daily Engineering Rotation"
-}
-
-resource "pagerduty_escalation_policy" "foo" {
- name = "Engineering Escalation Policy"
- num_loops = 2
-
- rule {
- escalation_delay_in_minutes = 10
-
- target {
- type = "schedule"
- id = "${data.pagerduty_schedule.test.id}"
- }
- }
-}
-```
-
-## Argument Reference
-
-The following arguments are supported:
-
-* `name` - (Required) The name to use to find a schedule in the PagerDuty API.
-
-## Attributes Reference
-* `name` - The short name of the found schedule.
-
-[1]: https://v2.developer.pagerduty.com/v2/page/api-reference#!/Schedules/get_schedules
diff --git a/website/source/docs/providers/pagerduty/d/user.html.markdown b/website/source/docs/providers/pagerduty/d/user.html.markdown
deleted file mode 100644
index 3af946c02..000000000
--- a/website/source/docs/providers/pagerduty/d/user.html.markdown
+++ /dev/null
@@ -1,44 +0,0 @@
----
-layout: "pagerduty"
-page_title: "PagerDuty: pagerduty_user"
-sidebar_current: "docs-pagerduty-datasource-user"
-description: |-
- Get information about a user that you can use for a service integration (e.g Amazon Cloudwatch, Splunk, Datadog).
----
-
-# pagerduty\_user
-
-Use this data source to get information about a specific [user][1] that you can use for other Pager Duty resources.
-
-## Example Usage
-
-```hcl
-data "pagerduty_user" "me" {
- email = "me@example.com"
-}
-
-resource "pagerduty_escalation_policy" "foo" {
- name = "Engineering Escalation Policy"
- num_loops = 2
-
- rule {
- escalation_delay_in_minutes = 10
-
- target {
- type = "user"
- id = "${data.pagerduty_user.me.id}"
- }
- }
-}
-```
-
-## Argument Reference
-
-The following arguments are supported:
-
-* `email` - (Required) The email to use to find a user in the PagerDuty API.
-
-## Attributes Reference
-* `name` - The short name of the found user.
-
-[1]: https://v2.developer.pagerduty.com/v2/page/api-reference#!/Users/get_users
diff --git a/website/source/docs/providers/pagerduty/d/vendor.html.markdown b/website/source/docs/providers/pagerduty/d/vendor.html.markdown
deleted file mode 100644
index dc2300cbb..000000000
--- a/website/source/docs/providers/pagerduty/d/vendor.html.markdown
+++ /dev/null
@@ -1,65 +0,0 @@
----
-layout: "pagerduty"
-page_title: "PagerDuty: pagerduty_vendor"
-sidebar_current: "docs-pagerduty-datasource-vendor"
-description: |-
- Get information about a vendor that you can use for a service integration (e.g Amazon Cloudwatch, Splunk, Datadog).
----
-
-# pagerduty\_vendor
-
-Use this data source to get information about a specific [vendor][1] that you can use for a service integration (e.g Amazon Cloudwatch, Splunk, Datadog).
-
-## Example Usage
-
-```hcl
-data "pagerduty_vendor" "datadog" {
- name = "Datadog"
-}
-
-resource "pagerduty_user" "example" {
- name = "Earline Greenholt"
- email = "125.greenholt.earline@graham.name"
- teams = ["${pagerduty_team.example.id}"]
-}
-
-resource "pagerduty_escalation_policy" "foo" {
- name = "Engineering Escalation Policy"
- num_loops = 2
-
- rule {
- escalation_delay_in_minutes = 10
-
- target {
- type = "user"
- id = "${pagerduty_user.example.id}"
- }
- }
-}
-
-resource "pagerduty_service" "example" {
- name = "My Web App"
- auto_resolve_timeout = 14400
- acknowledgement_timeout = 600
- escalation_policy = "${pagerduty_escalation_policy.example.id}"
-}
-
-resource "pagerduty_service_integration" "example" {
- name = "Datadog Integration"
- vendor = "${data.pagerduty_vendor.datadog.id}"
- service = "${pagerduty_service.example.id}"
- type = "generic_events_api_inbound_integration"
-}
-```
-
-## Argument Reference
-
-The following arguments are supported:
-
-* `name` - (Required) The vendor name to use to find a vendor in the PagerDuty API.
-
-## Attributes Reference
-* `name` - The short name of the found vendor.
-* `type` - The generic service type for this vendor.
-
-[1]: https://v2.developer.pagerduty.com/v2/page/api-reference#!/Vendors/get_vendors
diff --git a/website/source/docs/providers/pagerduty/index.html.markdown b/website/source/docs/providers/pagerduty/index.html.markdown
deleted file mode 100644
index ff3a1bd2f..000000000
--- a/website/source/docs/providers/pagerduty/index.html.markdown
+++ /dev/null
@@ -1,42 +0,0 @@
----
-layout: "pagerduty"
-page_title: "Provider: PagerDuty"
-sidebar_current: "docs-pagerduty-index"
-description: |-
- PagerDuty is an alarm aggregation and dispatching service
----
-
-# PagerDuty Provider
-
-[PagerDuty](https://www.pagerduty.com/) is an alarm aggregation and dispatching service for system administrators and support teams. It collects alerts from your monitoring tools, gives you an overall view of all of your monitoring alarms, and alerts an on duty engineer if there’s a problem.
-
-Use the navigation to the left to read about the available resources.
-
-## Example Usage
-
-```hcl
-# Configure the PagerDuty provider
-provider "pagerduty" {
- token = "${var.pagerduty_token}"
-}
-
-# Create a PagerDuty team
-resource "pagerduty_team" "engineering" {
- name = "Engineering"
- description = "All engineering"
-}
-
-# Create a PagerDuty user
-resource "pagerduty_user" "earline" {
- name = "Earline Greenholt"
- email = "125.greenholt.earline@graham.name"
- teams = ["${pagerduty_team.engineering.id}"]
-}
-```
-
-## Argument Reference
-
-The following arguments are supported:
-
-* `token` - (Required) The v2 authorization token. See [API Documentation](https://v2.developer.pagerduty.com/docs/authentication) for more information.
-* `skip_credentials_validation` - (Optional) Skip validation of the token against the PagerDuty API.
diff --git a/website/source/docs/providers/pagerduty/r/addon.html.markdown b/website/source/docs/providers/pagerduty/r/addon.html.markdown
deleted file mode 100644
index 21f4f0398..000000000
--- a/website/source/docs/providers/pagerduty/r/addon.html.markdown
+++ /dev/null
@@ -1,41 +0,0 @@
----
-layout: "pagerduty"
-page_title: "PagerDuty: pagerduty_addon"
-sidebar_current: "docs-pagerduty-resource-addon"
-description: |-
- Creates and manages an add-on in PagerDuty.
----
-
-# pagerduty\_addon
-
-With [add-ons](https://v2.developer.pagerduty.com/v2/page/api-reference#!/Add-ons/get_addons), third-party developers can write their own add-ons to PagerDuty's UI. Given a configuration containing a src parameter, that URL will be embedded in an iframe on a page that's available to users from a drop-down menu.
-
-## Example Usage
-
-```hcl
-resource "pagerduty_addon" "example" {
- name = "Internal Status Page"
- src = "https://intranet.example.com/status"
-}
-```
-
-## Argument Reference
-
-The following arguments are supported:
-
- * `name` - (Required) The name of the add-on.
- * `src` - (Required) The source URL to display in a frame in the PagerDuty UI. `HTTPS` is required.
-
-## Attributes Reference
-
-The following attributes are exported:
-
- * `id` - The ID of the add-on.
-
-## Import
-
-Add-ons can be imported using the `id`, e.g.
-
-```
-$ terraform import pagerduty_addon.example P3DH5M6
-```
diff --git a/website/source/docs/providers/pagerduty/r/escalation_policy.html.markdown b/website/source/docs/providers/pagerduty/r/escalation_policy.html.markdown
deleted file mode 100644
index 7dcb04dff..000000000
--- a/website/source/docs/providers/pagerduty/r/escalation_policy.html.markdown
+++ /dev/null
@@ -1,79 +0,0 @@
----
-layout: "pagerduty"
-page_title: "PagerDuty: pagerduty_escalation_policy"
-sidebar_current: "docs-pagerduty-resource-escalation_policy"
-description: |-
- Creates and manages an escalation policy in PagerDuty.
----
-
-# pagerduty\_escalation_policy
-
-An [escalation policy](https://v2.developer.pagerduty.com/v2/page/api-reference#!/Escalation_Policies/get_escalation_policies) determines what user or schedule will be notified first, second, and so on when an incident is triggered. Escalation policies are used by one or more services.
-
-
-## Example Usage
-
-```hcl
-resource "pagerduty_team" "example" {
- name = "Engineering"
- description = "All engineering"
-}
-
-resource "pagerduty_user" "example" {
- name = "Earline Greenholt"
- email = "125.greenholt.earline@graham.name"
- teams = ["${pagerduty_team.example.id}"]
-}
-
-resource "pagerduty_escalation_policy" "example" {
- name = "Engineering Escalation Policy"
- num_loops = 2
- teams = ["${pagerduty_team.example.id}"]
-
- rule {
- escalation_delay_in_minutes = 10
-
- target {
- type = "user"
- id = "${pagerduty_user.example.id}"
- }
- }
-}
-```
-
-## Argument Reference
-
-The following arguments are supported:
-
-* `name` - (Required) The name of the escalation policy.
-* `teams` - (Optional) Teams associated with the policy. Account must have the `teams` ability to use this parameter.
-* `description` - (Optional) A human-friendly description of the escalation policy.
- If not set, a placeholder of "Managed by Terraform" will be set.
-* `num_loops` - (Optional) The number of times the escalation policy will repeat after reaching the end of its escalation.
-* `rule` - (Required) An Escalation rule block. Escalation rules documented below.
-
-
-Escalation rules (`rule`) supports the following:
-
- * `escalation_delay_in_minutes` - (Required) The number of minutes before an unacknowledged incident escalates away from this rule.
- * `targets` - (Required) A target block. Target blocks documented below.
-
-
-Targets (`target`) supports the following:
-
- * `type` - (Optional) Can be `user`, `schedule`, `user_reference` or `schedule_reference`. Defaults to `user_reference`
- * `id` - (Required) A target ID
-
-## Attributes Reference
-
-The following attributes are exported:
-
- * `id` - The ID of the escalation policy.
-
-## Import
-
-Escalation policies can be imported using the `id`, e.g.
-
-```
-$ terraform import pagerduty_escalation_policy.main PLBP09X
-```
diff --git a/website/source/docs/providers/pagerduty/r/schedule.html.markdown b/website/source/docs/providers/pagerduty/r/schedule.html.markdown
deleted file mode 100644
index 9bb1f95be..000000000
--- a/website/source/docs/providers/pagerduty/r/schedule.html.markdown
+++ /dev/null
@@ -1,82 +0,0 @@
----
-layout: "pagerduty"
-page_title: "PagerDuty: pagerduty_schedule"
-sidebar_current: "docs-pagerduty-resource-schedule"
-description: |-
- Creates and manages a schedule in PagerDuty.
----
-
-# pagerduty\_schedule
-
-A [schedule](https://v2.developer.pagerduty.com/v2/page/api-reference#!/Schedules/get_schedules) determines the time periods that users are on call. Only on-call users are eligible to receive notifications from incidents.
-
-
-## Example Usage
-
-```hcl
-resource "pagerduty_user" "example" {
- name = "Earline Greenholt"
- email = "125.greenholt.earline@graham.name"
- teams = ["${pagerduty_team.example.id}"]
-}
-
-resource "pagerduty_schedule" "foo" {
- name = "Daily Engineering Rotation"
- time_zone = "America/New_York"
-
- layer {
- name = "Night Shift"
- start = "2015-11-06T20:00:00-05:00"
- rotation_virtual_start = "2015-11-06T20:00:00-05:00"
- rotation_turn_length_seconds = 86400
- users = ["${pagerduty_user.foo.id}"]
-
- restriction {
- type = "daily_restriction"
- start_time_of_day = "08:00:00"
- duration_seconds = 32400
- }
- }
-}
-```
-
-## Argument Reference
-
-The following arguments are supported:
-
-* `name` - (Optional) The name of the escalation policy.
-* `time_zone` - (Required) The time zone of the schedule (e.g Europe/Berlin).
-* `description` - (Optional) The description of the schedule
-* `layer` - (Required) A schedule layer block. Schedule layers documented below.
-
-
-Schedule layers (`layer`) supports the following:
-
-* `name` - (Optional) The name of the schedule layer.
-* `start` - (Required) The start time of the schedule layer.
-* `end` - (Optional) The end time of the schedule layer. If not specified, the layer does not end.
-* `rotation_virtual_start` - (Required) The effective start time of the schedule layer. This can be before the start time of the schedule.
-* `rotation_turn_length_seconds` - (Required) The duration of each on-call shift in `seconds`.
-* `users` - (Required) The ordered list of users on this layer. The position of the user on the list determines their order in the layer.
-* `restriction` - (Optional) A schedule layer restriction block. Restriction blocks documented below.
-
-
-Restriction blocks (`restriction`) supports the following:
-
-* `type` - (Required) Can be `daily_restriction` or `weekly_restriction`
-* `start_time_of_day` - (Required) The start time in `HH:mm:ss` format.
-* `duration_seconds` - (Required) The duration of the restriction in `seconds`.
-
-## Attributes Reference
-
-The following attributes are exported:
-
- * `id` - The ID of the schedule
-
-## Import
-
-Schedules can be imported using the `id`, e.g.
-
-```
-$ terraform import pagerduty_schedule.main PLBP09X
-```
diff --git a/website/source/docs/providers/pagerduty/r/service.html.markdown b/website/source/docs/providers/pagerduty/r/service.html.markdown
deleted file mode 100644
index 7f3fd9da0..000000000
--- a/website/source/docs/providers/pagerduty/r/service.html.markdown
+++ /dev/null
@@ -1,136 +0,0 @@
----
-layout: "pagerduty"
-page_title: "PagerDuty: pagerduty_service"
-sidebar_current: "docs-pagerduty-resource-service"
-description: |-
- Creates and manages a service in PagerDuty.
----
-
-# pagerduty\_service
-
-A [service](https://v2.developer.pagerduty.com/v2/page/api-reference#!/Services/get_services) represents something you monitor (like a web service, email service, or database service). It is a container for related incidents that associates them with escalation policies.
-
-
-## Example Usage
-
-```hcl
-resource "pagerduty_user" "example" {
- name = "Earline Greenholt"
- email = "125.greenholt.earline@graham.name"
- teams = ["${pagerduty_team.example.id}"]
-}
-
-resource "pagerduty_escalation_policy" "foo" {
- name = "Engineering Escalation Policy"
- num_loops = 2
-
- rule {
- escalation_delay_in_minutes = 10
-
- target {
- type = "user"
- id = "${pagerduty_user.example.id}"
- }
- }
-}
-
-resource "pagerduty_service" "example" {
- name = "My Web App"
- auto_resolve_timeout = 14400
- acknowledgement_timeout = 600
- escalation_policy = "${pagerduty_escalation_policy.example.id}"
-}
-```
-
-## Argument Reference
-
-The following arguments are supported:
-
- * `name` - (Required) The name of the service.
- * `description` - (Optional) A human-friendly description of the escalation policy.
- If not set, a placeholder of "Managed by Terraform" will be set.
- * `auto_resolve_timeout` - (Optional) Time in seconds that an incident is automatically resolved if left open for that long. Disabled if not set.
- * `acknowledgement_timeout` - (Optional) Time in seconds that an incident changes to the Triggered State after being Acknowledged. Disabled if not set.
- * `escalation_policy` - (Required) The escalation policy used by this service.
-
-You may specify one optional `incident_urgency_rule` block configuring what urgencies to use.
-Your PagerDuty account must have the `urgencies` ability to assign an incident urgency rule.
-The block contains the following arguments.
-
- * `type` - The type of incident urgency: `constant` or `use_support_hours` (when depending on specific suppor hours; see `support_hours`).
- * `during_support_hours` - (Optional) Incidents' urgency during support hours.
- * `outside_support_hours` - (Optional) Incidents' urgency outside of support hours.
-
-When using `type = "use_support_hours"` in `incident_urgency_rule` you have to specify exactly one otherwise optional `support_hours` block.
-Changes to `support_hours` necessitate re-creating the service resource. Account must have the `service_support_hours` ability to assign support hours.
-The block contains the following arguments.
-
- * `type` - The type of support hours. Can be `fixed_time_per_day`.
- * `time_zone` - The time zone for the support hours.
- * `days_of_week` - Array of days of week as integers.
- * `start_time` - The support hours' starting time of day.
- * `end_time` - The support hours' ending time of day.
-
-When using `type = "use_support_hours"` in the `incident_urgency_rule` block you have to also specify `scheduled_actions` for the service. Otherwise `scheduled_actions` is optional. Changes necessitate re-createing the service resource.
-
- * `type` - The type of scheduled action. Currently, this must be set to `urgency_change`.
- * `at` - Represents when scheduled action will occur.
- * `name` - Designates either the start or the end of the scheduled action. Can be `support_hours_start` or `support_hours_end`.
-
-Below is an example for a `pagerduty_service` resource with `incident_urgency_rules` with `type = "use_support_hours"`, `support_hours` and a default `scheduled_action` as well.
-
-```hcl
-resource "pagerduty_service" "foo" {
- name = "bar"
- description = "bar bar bar"
- auto_resolve_timeout = 3600
- acknowledgement_timeout = 3600
- escalation_policy = "${pagerduty_escalation_policy.foo.id}"
-
- incident_urgency_rule {
- type = "use_support_hours"
-
- during_support_hours {
- type = "constant"
- urgency = "high"
- }
-
- outside_support_hours {
- type = "constant"
- urgency = "low"
- }
- }
-
- support_hours {
- type = "fixed_time_per_day"
- time_zone = "America/Lima"
- start_time = "09:00:00"
- end_time = "17:00:00"
- days_of_week = [1, 2, 3, 4, 5]
- }
-
- scheduled_actions {
- type = "urgency_change"
- to_urgency = "high"
-
- at {
- type = "named_time"
- name = "support_hours_start"
- }
- }
-}
-```
-
-## Attributes Reference
-
-The following attributes are exported:
-
- * `id` - The ID of the service.
-
-## Import
-
-Services can be imported using the `id`, e.g.
-
-```
-$ terraform import pagerduty_service.main PLBP09X
-```
diff --git a/website/source/docs/providers/pagerduty/r/service_integration.html.markdown b/website/source/docs/providers/pagerduty/r/service_integration.html.markdown
deleted file mode 100644
index ad4cd3046..000000000
--- a/website/source/docs/providers/pagerduty/r/service_integration.html.markdown
+++ /dev/null
@@ -1,98 +0,0 @@
----
-layout: "pagerduty"
-page_title: "PagerDuty: pagerduty_service_integration"
-sidebar_current: "docs-pagerduty-resource-service-integration"
-description: |-
- Creates and manages a service integration in PagerDuty.
----
-
-# pagerduty\_service_integration
-
-A [service integration](https://v2.developer.pagerduty.com/v2/page/api-reference#!/Services/post_services_id_integrations) is an integration that belongs to a service.
-
-## Example Usage
-
-```hcl
-resource "pagerduty_user" "example" {
- name = "Earline Greenholt"
- email = "125.greenholt.earline@graham.name"
- teams = ["${pagerduty_team.example.id}"]
-}
-
-resource "pagerduty_escalation_policy" "foo" {
- name = "Engineering Escalation Policy"
- num_loops = 2
-
- rule {
- escalation_delay_in_minutes = 10
-
- target {
- type = "user"
- id = "${pagerduty_user.example.id}"
- }
- }
-}
-
-resource "pagerduty_service" "example" {
- name = "My Web App"
- auto_resolve_timeout = 14400
- acknowledgement_timeout = 600
- escalation_policy = "${pagerduty_escalation_policy.example.id}"
-}
-
-resource "pagerduty_service_integration" "example" {
- name = "Generic API Service Integration"
- type = "generic_events_api_inbound_integration"
- service = "${pagerduty_service.example.id}"
-}
-
-data "pagerduty_vendor" "datadog" {
- name = "Datadog"
-}
-
-resource "pagerduty_service_integration" "datadog" {
- name = "${data.pagerduty_vendor.datadog.name}"
- service = "${pagerduty_service.example.id}"
- vendor = "${data.pagerduty_vendor.datadog.id}"
-}
-
-data "pagerduty_vendor" "cloudwatch" {
- name = "Cloudwatch"
-}
-
-resource "pagerduty_service_integration" "cloudwatch" {
- name = "${data.pagerduty_vendor.cloudwatch.name}"
- service = "${pagerduty_service.example.id}"
- vendor = "${data.pagerduty_vendor.cloudwatch.id}"
-}
-```
-
-## Argument Reference
-
-The following arguments are supported:
-
- * `name` - (Optional) The name of the service integration.
- * `type` - (Optional) The service type. Can be:
- `aws_cloudwatch_inbound_integration`,
- `cloudkick_inbound_integration`,
- `event_transformer_api_inbound_integration`,
- `generic_email_inbound_integration`,
- `generic_events_api_inbound_integration`,
- `keynote_inbound_integration`,
- `nagios_inbound_integration`,
- `pingdom_inbound_integration`or `sql_monitor_inbound_integration`.
-
- **Note:** This is meant for **generic** service integrations.
- To integrate with a **vendor** (e.g Datadog or Amazon Cloudwatch) use the `vendor` field instead.
-
- * `service` - (Optional) The ID of the service the integration should belong to.
- * `vendor` - (Optional) The ID of the vendor the integration should integrate with (e.g Datadog or Amazon Cloudwatch).
-
- **Note:** You can use the `pagerduty_vendor` data source to locate the appropriate vendor ID.
-## Attributes Reference
-
-The following attributes are exported:
-
- * `id` - The ID of the service integration.
- * `integration_key` - This is the unique key used to route events to this integration when received via the PagerDuty Events API.
- * `integration_email` - This is the unique fully-qualified email address used for routing emails to this integration for processing.
diff --git a/website/source/docs/providers/pagerduty/r/team.html.markdown b/website/source/docs/providers/pagerduty/r/team.html.markdown
deleted file mode 100644
index 821d8f777..000000000
--- a/website/source/docs/providers/pagerduty/r/team.html.markdown
+++ /dev/null
@@ -1,44 +0,0 @@
----
-layout: "pagerduty"
-page_title: "PagerDuty: pagerduty_team"
-sidebar_current: "docs-pagerduty-resource-team"
-description: |-
- Creates and manages a team in PagerDuty.
----
-
-# pagerduty\_team
-
-A [team](https://v2.developer.pagerduty.com/v2/page/api-reference#!/Teams/get_teams) is a collection of users and escalation policies that represent a group of people within an organization.
-
-The account must have the `teams` ability to use the following resource.
-
-## Example Usage
-
-```hcl
-resource "pagerduty_team" "example" {
- name = "Engineering"
- description = "All engineering"
-}
-```
-
-## Argument Reference
-
-The following arguments are supported:
-
- * `name` - (Required) The name of the group.
- * `description` - (Optional) A human-friendly description of the team.
- If not set, a placeholder of "Managed by Terraform" will be set.
-
-## Attributes Reference
-
-The following attributes are exported:
-
- * `id` - The ID of the team.
-
-## Import
-
-Teams can be imported using the `id`, e.g.
-
-```
-$ terraform import pagerduty_team.main PLBP09X
-```
diff --git a/website/source/docs/providers/pagerduty/r/user.html.markdown b/website/source/docs/providers/pagerduty/r/user.html.markdown
deleted file mode 100644
index d4cf11214..000000000
--- a/website/source/docs/providers/pagerduty/r/user.html.markdown
+++ /dev/null
@@ -1,57 +0,0 @@
----
-layout: "pagerduty"
-page_title: "PagerDuty: pagerduty_user"
-sidebar_current: "docs-pagerduty-resource-user"
-description: |-
- Creates and manages a user in PagerDuty.
----
-
-# pagerduty\_user
-
-A [user](https://v2.developer.pagerduty.com/v2/page/api-reference#!/Users/get_users) is a member of a PagerDuty account that have the ability to interact with incidents and other data on the account.
-
-
-## Example Usage
-
-```hcl
-resource "pagerduty_team" "example" {
- name = "Engineering"
- description = "All engineering"
-}
-
-resource "pagerduty_user" "example" {
- name = "Earline Greenholt"
- email = "125.greenholt.earline@graham.name"
- teams = ["${pagerduty_team.example.id}"]
-}
-```
-
-## Argument Reference
-
-The following arguments are supported:
-
- * `name` - (Required) The name of the user.
- * `email` - (Required) The user's email address.
- * `color` - (Optional) The schedule color for the user.
- * `role` - (Optional) The user role. Account must have the `read_only_users` ability to set a user as a `read_only_user`. Can be `admin`, `limited_user`, `owner`, `read_only_user`, `team_responder` or `user`
- * `job_title` - (Optional) The user's title.
- * `teams` - (Optional) A list of teams the user should belong to.
- * `description` - (Optional) A human-friendly description of the user.
- If not set, a placeholder of "Managed by Terraform" will be set.
-
-## Attributes Reference
-
-The following attributes are exported:
-
- * `id` - The ID of the user.
- * `avatar_url` - The URL of the user's avatar.
- * `html_url` - URL at which the entity is uniquely displayed in the Web app
- * `invitation_sent` - If true, the user has an outstanding invitation.
-
-## Import
-
-Users can be imported using the `id`, e.g.
-
-```
-$ terraform import pagerduty_user.main PLBP09X
-```
diff --git a/website/source/docs/providers/postgresql/index.html.markdown b/website/source/docs/providers/postgresql/index.html.markdown
deleted file mode 100644
index b586ba0b5..000000000
--- a/website/source/docs/providers/postgresql/index.html.markdown
+++ /dev/null
@@ -1,76 +0,0 @@
----
-layout: "postgresql"
-page_title: "Provider: PostgreSQL"
-sidebar_current: "docs-postgresql-index"
-description: |-
- A provider for PostgreSQL Server.
----
-
-# PostgreSQL Provider
-
-The PostgreSQL provider gives the ability to deploy and configure resources in a PostgreSQL server.
-
-Use the navigation to the left to read about the available resources.
-
-## Usage
-
-```hcl
-provider "postgresql" {
- host = "postgres_server_ip"
- port = 5432
- database = "postgres"
- username = "postgres_user"
- password = "postgres_password"
- sslmode = "require"
- connect_timeout = 15
-}
-```
-
-Configuring multiple servers can be done by specifying the alias option.
-
-```hcl
-provider "postgresql" {
- alias = "pg1"
- host = "postgres_server_ip1"
- username = "postgres_user1"
- password = "postgres_password1"
-}
-
-provider "postgresql" {
- alias = "pg2"
- host = "postgres_server_ip2"
- username = "postgres_user2"
- password = "postgres_password2"
-}
-
-resource "postgresql_database" "my_db1" {
- provider = "postgresql.pg1"
- name = "my_db1"
-}
-
-resource "postgresql_database" "my_db2" {
- provider = "postgresql.pg2"
- name = "my_db2"
-}
-```
-
-## Argument Reference
-
-The following arguments are supported:
-
-* `host` - (Required) The address for the postgresql server connection.
-* `port` - (Optional) The port for the postgresql server connection. The default is `5432`.
-* `database` - (Optional) Database to connect to. The default is `postgres`.
-* `username` - (Required) Username for the server connection.
-* `password` - (Optional) Password for the server connection.
-* `sslmode` - (Optional) Set the priority for an SSL connection to the server.
- Valid values for `sslmode` are (note: `prefer` is not supported by Go's
- [`lib/pq`](https://godoc.org/github.com/lib/pq)):
- * disable - No SSL
- * require - Always SSL (the default, also skip verification)
- * verify-ca - Always SSL (verify that the certificate presented by the server was signed by a trusted CA)
- * verify-full - Always SSL (verify that the certification presented by the server was signed by a trusted CA and the server host name matches the one in the certificate)
- Additional information on the options and their implications can be seen
- [in the `libpq(3)` SSL guide](http://www.postgresql.org/docs/current/static/libpq-ssl.html#LIBPQ-SSL-PROTECTION).
-* `connect_timeout` - (Optional) Maximum wait for connection, in seconds. The
- default is `180s`. Zero or not specified means wait indefinitely.
diff --git a/website/source/docs/providers/postgresql/r/postgresql_database.html.markdown b/website/source/docs/providers/postgresql/r/postgresql_database.html.markdown
deleted file mode 100644
index 5110e0f50..000000000
--- a/website/source/docs/providers/postgresql/r/postgresql_database.html.markdown
+++ /dev/null
@@ -1,111 +0,0 @@
----
-layout: "postgresql"
-page_title: "PostgreSQL: postgresql_database"
-sidebar_current: "docs-postgresql-resource-postgresql_database"
-description: |-
- Creates and manages a database on a PostgreSQL server.
----
-
-# postgresql\_database
-
-The ``postgresql_database`` resource creates and manages [database
-objects](https://www.postgresql.org/docs/current/static/managing-databases.html)
-within a PostgreSQL server instance.
-
-
-## Usage
-
-```hcl
-resource "postgresql_database" "my_db" {
- name = "my_db"
- owner = "my_role"
- template = "template0"
- lc_collate = "C"
- connection_limit = -1
- allow_connections = true
-}
-```
-
-## Argument Reference
-
-* `name` - (Required) The name of the database. Must be unique on the PostgreSQL
- server instance where it is configured.
-
-* `owner` - (Optional) The role name of the user who will own the database, or
- `DEFAULT` to use the default (namely, the user executing the command). To
- create a database owned by another role or to change the owner of an existing
- database, you must be a direct or indirect member of the specified role, or
- the username in the provider is a superuser.
-
-* `tablespace_name` - (Optional) The name of the tablespace that will be
- associated with the database, or `DEFAULT` to use the template database's
- tablespace. This tablespace will be the default tablespace used for objects
- created in this database.
-
-* `connection_limit` - (Optional) How many concurrent connections can be
- established to this database. `-1` (the default) means no limit.
-
-* `allow_connections` - (Optional) If `false` then no one can connect to this
- database. The default is `true`, allowing connections (except as restricted by
- other mechanisms, such as `GRANT` or `REVOKE CONNECT`).
-
-* `is_template` - (Optional) If `true`, then this database can be cloned by any
- user with `CREATEDB` privileges; if `false` (the default), then only
- superusers or the owner of the database can clone it.
-
-* `template` - (Optional) The name of the template database from which to create
- the database, or `DEFAULT` to use the default template (`template0`). NOTE:
- the default in Terraform is `template0`, not `template1`. Changing this value
- will force the creation of a new resource as this value can only be changed
- when a database is created.
-
-* `encoding` - (Optional) Character set encoding to use in the database.
- Specify a string constant (e.g. `UTF8` or `SQL_ASCII`), or an integer encoding
- number. If unset or set to an empty string the default encoding is set to
- `UTF8`. If set to `DEFAULT` Terraform will use the same encoding as the
- template database. Changing this value will force the creation of a new
- resource as this value can only be changed when a database is created.
-
-* `lc_collate` - (Optional) Collation order (`LC_COLLATE`) to use in the
- database. This affects the sort order applied to strings, e.g. in queries
- with `ORDER BY`, as well as the order used in indexes on text columns. If
- unset or set to an empty string the default collation is set to `C`. If set
- to `DEFAULT` Terraform will use the same collation order as the specified
- `template` database. Changing this value will force the creation of a new
- resource as this value can only be changed when a database is created.
-
-* `lc_ctype` - (Optional) Character classification (`LC_CTYPE`) to use in the
- database. This affects the categorization of characters, e.g. lower, upper and
- digit. If unset or set to an empty string the default character classification
- is set to `C`. If set to `DEFAULT` Terraform will use the character
- classification of the specified `template` database. Changing this value will
- force the creation of a new resource as this value can only be changed when a
- database is created.
-
-## Import Example
-
-`postgresql_database` supports importing resources. Supposing the following
-Terraform:
-
-```hcl
-provider "postgresql" {
- alias = "admindb"
-}
-
-resource "postgresql_database" "db1" {
- provider = "postgresql.admindb"
-
- name = "testdb1"
-}
-```
-
-It is possible to import a `postgresql_database` resource with the following
-command:
-
-```
-$ terraform import postgresql_database.db1 testdb1
-```
-
-Where `testdb1` is the name of the database to import and
-`postgresql_database.db1` is the name of the resource whose state will be
-populated as a result of the command.
diff --git a/website/source/docs/providers/postgresql/r/postgresql_extension.html.markdown b/website/source/docs/providers/postgresql/r/postgresql_extension.html.markdown
deleted file mode 100644
index 64f7cafc0..000000000
--- a/website/source/docs/providers/postgresql/r/postgresql_extension.html.markdown
+++ /dev/null
@@ -1,25 +0,0 @@
----
-layout: "postgresql"
-page_title: "PostgreSQL: postgresql_extension"
-sidebar_current: "docs-postgresql-resource-postgresql_extension"
-description: |-
- Creates and manages an extension on a PostgreSQL server.
----
-
-# postgresql\_extension
-
-The ``postgresql_extension`` resource creates and manages an extension on a PostgreSQL
-server.
-
-
-## Usage
-
-```hcl
-resource "postgresql_extension" "my_extension" {
- name = "pg_trgm"
-}
-```
-
-## Argument Reference
-
-* `name` - (Required) The name of the extension.
diff --git a/website/source/docs/providers/postgresql/r/postgresql_role.html.markdown b/website/source/docs/providers/postgresql/r/postgresql_role.html.markdown
deleted file mode 100644
index 78f52e80e..000000000
--- a/website/source/docs/providers/postgresql/r/postgresql_role.html.markdown
+++ /dev/null
@@ -1,140 +0,0 @@
----
-layout: "postgresql"
-page_title: "PostgreSQL: postgresql_role"
-sidebar_current: "docs-postgresql-resource-postgresql_role"
-description: |-
- Creates and manages a role on a PostgreSQL server.
----
-
-# postgresql\_role
-
-The ``postgresql_role`` resource creates and manages a role on a PostgreSQL
-server.
-
-When a ``postgresql_role`` resource is removed, the PostgreSQL ROLE will
-automatically run a [`REASSIGN
-OWNED`](https://www.postgresql.org/docs/current/static/sql-reassign-owned.html)
-and [`DROP
-OWNED`](https://www.postgresql.org/docs/current/static/sql-drop-owned.html) to
-the `CURRENT_USER` (normally the connected user for the provider). If the
-specified PostgreSQL ROLE owns objects in multiple PostgreSQL databases in the
-same PostgreSQL Cluster, one PostgreSQL provider per database must be created
-and all but the final ``postgresql_role`` must specify a `skip_drop_role`.
-
-~> **Note:** All arguments including role name and password will be stored in the raw state as plain-text.
-[Read more about sensitive data in state](/docs/state/sensitive-data.html).
-
-## Usage
-
-```hcl
-resource "postgresql_role" "my_role" {
- name = "my_role"
- login = true
- password = "mypass"
-}
-
-resource "postgresql_role" "my_replication_role" {
- name = "replication_role"
- replication = true
- login = true
- connection_limit = 5
- password = "md5c98cbfeb6a347a47eb8e96cfb4c4b890"
-}
-```
-
-## Argument Reference
-
-* `name` - (Required) The name of the role. Must be unique on the PostgreSQL
- server instance where it is configured.
-
-* `superuser` - (Optional) Defines whether the role is a "superuser", and
- therefore can override all access restrictions within the database. Default
- value is `false`.
-
-* `create_database` - (Optional) Defines a role's ability to execute `CREATE
- DATABASE`. Default value is `false`.
-
-* `create_role` - (Optional) Defines a role's ability to execute `CREATE ROLE`.
- A role with this privilege can also alter and drop other roles. Default value
- is `false`.
-
-* `inherit` - (Optional) Defines whether a role "inherits" the privileges of
- roles it is a member of. Default value is `true`.
-
-* `login` - (Optional) Defines whether role is allowed to log in. Roles without
- this attribute are useful for managing database privileges, but are not users
- in the usual sense of the word. Default value is `false`.
-
-* `replication` - (Optional) Defines whether a role is allowed to initiate
- streaming replication or put the system in and out of backup mode. Default
- value is `false`
-
-* `bypass_row_level_security` - (Optional) Defines whether a role bypasses every
- row-level security (RLS) policy. Default value is `false`.
-
-* `connection_limit` - (Optional) If this role can log in, this specifies how
- many concurrent connections the role can establish. `-1` (the default) means no
- limit.
-
-* `encrypted_password` - (Optional) Defines whether the password is stored
- encrypted in the system catalogs. Default value is `true`. NOTE: this value
- is always set (to the conservative and safe value), but may interfere with the
- behavior of
- [PostgreSQL's `password_encryption` setting](https://www.postgresql.org/docs/current/static/runtime-config-connection.html#GUC-PASSWORD-ENCRYPTION).
-
-* `password` - (Optional) Sets the role's password. (A password is only of use
- for roles having the `login` attribute set to true, but you can nonetheless
- define one for roles without it.) Roles without a password explicitly set are
- left alone. If the password is set to the magic value `NULL`, the password
- will be always be cleared.
-
-* `valid_until` - (Optional) Defines the date and time after which the role's
- password is no longer valid. Established connections past this `valid_time`
- will have to be manually terminated. This value corresponds to a PostgreSQL
- datetime. If omitted or the magic value `NULL` is used, `valid_until` will be
- set to `infinity`. Default is `NULL`, therefore `infinity`.
-
-* `skip_drop_role` - (Optional) When a PostgreSQL ROLE exists in multiple
- databases and the ROLE is dropped, the
- [cleanup of ownership of objects](https://www.postgresql.org/docs/current/static/role-removal.html)
- in each of the respective databases must occur before the ROLE can be dropped
- from the catalog. Set this option to true when there are multiple databases
- in a PostgreSQL cluster using the same PostgreSQL ROLE for object ownership.
- This is the third and final step taken when removing a ROLE from a database.
-
-* `skip_reassign_owned` - (Optional) When a PostgreSQL ROLE exists in multiple
- databases and the ROLE is dropped, a
- [`REASSIGN OWNED`](https://www.postgresql.org/docs/current/static/sql-reassign-owned.html) in
- must be executed on each of the respective databases before the `DROP ROLE`
- can be executed to dropped the ROLE from the catalog. This is the first and
- second steps taken when removing a ROLE from a database (the second step being
- an implicit
- [`DROP OWNED`](https://www.postgresql.org/docs/current/static/sql-drop-owned.html)).
-
-## Import Example
-
-`postgresql_role` supports importing resources. Supposing the following
-Terraform:
-
-```hcl
-provider "postgresql" {
- alias = "admindb"
-}
-
-resource "postgresql_role" "replication_role" {
- provider = "postgresql.admindb"
-
- name = "replication_name"
-}
-```
-
-It is possible to import a `postgresql_role` resource with the following
-command:
-
-```
-$ terraform import postgresql_role.replication_role replication_name
-```
-
-Where `replication_name` is the name of the role to import and
-`postgresql_role.replication_role` is the name of the resource whose state will
-be populated as a result of the command.
diff --git a/website/source/docs/providers/postgresql/r/postgresql_schema.html.markdown b/website/source/docs/providers/postgresql/r/postgresql_schema.html.markdown
deleted file mode 100644
index 43a148f23..000000000
--- a/website/source/docs/providers/postgresql/r/postgresql_schema.html.markdown
+++ /dev/null
@@ -1,103 +0,0 @@
----
-layout: "postgresql"
-page_title: "PostgreSQL: postgresql_schema"
-sidebar_current: "docs-postgresql-resource-postgresql_schema"
-description: |-
- Creates and manages a schema within a PostgreSQL database.
----
-
-# postgresql\_schema
-
-The ``postgresql_schema`` resource creates and manages [schema
-objects](https://www.postgresql.org/docs/current/static/ddl-schemas.html) within
-a PostgreSQL database.
-
-
-## Usage
-
-```hcl
-resource "postgresql_role" "app_www" {
- name = "app_www"
-}
-
-resource "postgresql_role" "app_dba" {
- name = "app_dba"
-}
-
-resource "postgresql_role" "app_releng" {
- name = "app_releng"
-}
-
-resource "postgresql_schema" "my_schema" {
- name = "my_schema"
- owner = "postgres"
-
- policy {
- usage = true
- role = "${postgresql_role.app_www.name}"
- }
-
- # app_releng can create new objects in the schema. This is the role that
- # migrations are executed as.
- policy {
- create = true
- usage = true
- role = "${postgresql_role.app_releng.name}"
- }
-
- policy {
- create_with_grant = true
- usage_with_grant = true
- role = "${postgresql_role.app_dba.name}"
- }
-}
-```
-
-## Argument Reference
-
-* `name` - (Required) The name of the schema. Must be unique in the PostgreSQL
- database instance where it is configured.
-* `owner` - (Optional) The ROLE who owns the schema.
-* `policy` - (Optional) Can be specified multiple times for each policy. Each
- policy block supports fields documented below.
-
-The `policy` block supports:
-
-* `create` - (Optional) Should the specified ROLE have CREATE privileges to the specified SCHEMA.
-* `create_with_grant` - (Optional) Should the specified ROLE have CREATE privileges to the specified SCHEMA and the ability to GRANT the CREATE privilege to other ROLEs.
-* `role` - (Optional) The ROLE who is receiving the policy. If this value is empty or not specified it implies the policy is referring to the [`PUBLIC` role](https://www.postgresql.org/docs/current/static/sql-grant.html).
-* `usage` - (Optional) Should the specified ROLE have USAGE privileges to the specified SCHEMA.
-* `usage_with_grant` - (Optional) Should the specified ROLE have USAGE privileges to the specified SCHEMA and the ability to GRANT the USAGE privilege to other ROLEs.
-
-~> **NOTE on `policy`:** The permissions of a role specified in multiple policy blocks is cumulative. For example, if the same role is specified in two different `policy` each with different permissions (e.g. `create` and `usage_with_grant`, respectively), then the specified role with have both `create` and `usage_with_grant` privileges.
-
-## Import Example
-
-`postgresql_schema` supports importing resources. Supposing the following
-Terraform:
-
-```hcl
-resource "postgresql_schema" "public" {
- name = "public"
-}
-
-resource "postgresql_schema" "schema_foo" {
- name = "my_schema"
- owner = "postgres"
-
- policy {
- usage = true
- }
-}
-```
-
-It is possible to import a `postgresql_schema` resource with the following
-command:
-
-```
-$ terraform import postgresql_schema.schema_foo my_schema
-```
-
-Where `my_schema` is the name of the schema in the PostgreSQL database and
-`postgresql_schema.schema_foo` is the name of the resource whose state will be
-populated as a result of the command.
diff --git a/website/source/docs/providers/powerdns/index.html.markdown b/website/source/docs/providers/powerdns/index.html.markdown
deleted file mode 100644
index f68a644bf..000000000
--- a/website/source/docs/providers/powerdns/index.html.markdown
+++ /dev/null
@@ -1,36 +0,0 @@
----
-layout: "powerdns"
-page_title: "Provider: PowerDNS"
-sidebar_current: "docs-powerdns-index"
-description: |-
- The PowerDNS provider is used manipulate DNS records supported by PowerDNS server. The provider needs to be configured with the proper credentials before it can be used.
----
-
-# PowerDNS Provider
-
-The PowerDNS provider is used manipulate DNS records supported by PowerDNS server. The provider needs to be configured
-with the proper credentials before it can be used. It supports both the [legacy API](https://doc.powerdns.com/3/httpapi/api_spec/) and the new [version 1 API](https://doc.powerdns.com/md/httpapi/api_spec/), however resources may need to be configured differently.
-
-Use the navigation to the left to read about the available resources.
-
-## Example Usage
-
-```hcl
-# Configure the PowerDNS provider
-provider "powerdns" {
- api_key = "${var.pdns_api_key}"
- server_url = "${var.pdns_server_url}"
-}
-
-# Create a record
-resource "powerdns_record" "www" {
- # ...
-}
-```
-
-## Argument Reference
-
-The following arguments are supported:
-
-* `api_key` - (Required) The PowerDNS API key. This can also be specified with `PDNS_API_KEY` environment variable.
-* `server_url` - (Required) The address of PowerDNS server. This can also be specified with `PDNS_SERVER_URL` environment variable.
diff --git a/website/source/docs/providers/powerdns/r/record.html.markdown b/website/source/docs/providers/powerdns/r/record.html.markdown
deleted file mode 100644
index 22cf3b433..000000000
--- a/website/source/docs/providers/powerdns/r/record.html.markdown
+++ /dev/null
@@ -1,52 +0,0 @@
----
-layout: "powerdns"
-page_title: "PowerDNS: powerdns_record"
-sidebar_current: "docs-powerdns-resource-record"
-description: |-
- Provides a PowerDNS record resource.
----
-
-# powerdns\_record
-
-Provides a PowerDNS record resource.
-
-## Example Usage
-
-Note that PowerDNS internally lowercases certain records (e.g. CNAME and AAAA), which can lead to resources being marked for a change in every singe plan.
-
-For the v1 API (PowerDNS version 4):
-
-```hcl
-# Add a record to the zone
-resource "powerdns_record" "foobar" {
- zone = "example.com."
- name = "www.example.com"
- type = "A"
- ttl = 300
- records = ["192.168.0.11"]
-}
-```
-
-For the legacy API (PowerDNS version 3.4):
-
-```hcl
-# Add a record to the zone
-resource "powerdns_record" "foobar" {
- zone = "example.com"
- name = "www.example.com"
- type = "A"
- ttl = 300
- records = ["192.168.0.11"]
-}
-```
-
-## Argument Reference
-
-The following arguments are supported:
-
-* `zone` - (Required) The name of zone to contain this record.
-* `name` - (Required) The name of the record.
-* `type` - (Required) The record type.
-* `ttl` - (Required) The TTL of the record.
-* `records` - (Required) A string list of records.
-
diff --git a/website/source/docs/providers/profitbricks/d/profitbricks_datacenter.html.markdown b/website/source/docs/providers/profitbricks/d/profitbricks_datacenter.html.markdown
deleted file mode 100644
index 05375bb78..000000000
--- a/website/source/docs/providers/profitbricks/d/profitbricks_datacenter.html.markdown
+++ /dev/null
@@ -1,29 +0,0 @@
----
-layout: "profitbricks"
-page_title: "ProfitBricks : profitbricks_datacenter"
-sidebar_current: "docs-profitbricks-datasource-datacenter"
-description: |-
- Get information on a ProfitBricks Data Centers
----
-
-# profitbricks\_datacenter
-
-The data centers data source can be used to search for and return an existing Virtual Data Center. You can provide a string for the name and location parameters which will be compared with provisioned Virtual Data Centers. If a single match is found, it will be returned. If your search results in multiple matches, an error will be generated. When this happens, please refine your search string so that it is specific enough to return only one result.
-
-## Example Usage
-
-```hcl
-data "profitbricks_datacenter" "dc_example" {
- name = "test_dc"
- location = "us"
-}
-```
-
-## Argument Reference
-
- * `name` - (Required) Name or part of the name of an existing Virtual Data Center that you want to search for.
- * `location` - (Optional) Id of the existing Virtual Data Center's location.
-
-## Attributes Reference
-
- * `id` - UUID of the Virtual Data Center
diff --git a/website/source/docs/providers/profitbricks/d/profitbricks_image.html.markdown b/website/source/docs/providers/profitbricks/d/profitbricks_image.html.markdown
deleted file mode 100644
index 1f604b8c7..000000000
--- a/website/source/docs/providers/profitbricks/d/profitbricks_image.html.markdown
+++ /dev/null
@@ -1,35 +0,0 @@
----
-layout: "profitbricks"
-page_title: "ProfitBricks : profitbrick_image"
-sidebar_current: "docs-profitbricks-datasource-image"
-description: |-
- Get information on a ProfitBricks Images
----
-
-# profitbricks\_image
-
-The images data source can be used to search for and return an existing image which can then be used to provision a server.
-
-## Example Usage
-
-```hcl
-data "profitbricks_image" "image_example" {
- name = "Ubuntu"
- type = "HDD"
- version = "14"
- location = "location_id"
-}
-```
-
-## Argument Reference
-
- * `name` - (Required) Name or part of the name of an existing image that you want to search for.
- * `version` - (Optional) Version of the image (see details below).
- * `location` - (Optional) Id of the existing image's location.
- * `type` - (Optional) The image type, HDD or CD-ROM.
-
-If both "name" and "version" are provided the plugin will concatenate the two strings in this format [name]-[version].
-
-## Attributes Reference
-
- * `id` - UUID of the image
diff --git a/website/source/docs/providers/profitbricks/d/profitbricks_location.html.markdown b/website/source/docs/providers/profitbricks/d/profitbricks_location.html.markdown
deleted file mode 100644
index e50f18fc1..000000000
--- a/website/source/docs/providers/profitbricks/d/profitbricks_location.html.markdown
+++ /dev/null
@@ -1,29 +0,0 @@
----
-layout: "profitbricks"
-page_title: "ProfitBricks : profitbrick_location"
-sidebar_current: "docs-profitbricks-datasource-location"
-description: |-
- Get information on a ProfitBricks Locations
----
-
-# profitbricks\_location
-
-The locations data source can be used to search for and return an existing location which can then be used elsewhere in the configuration.
-
-## Example Usage
-
-```hcl
-data "profitbricks_location" "loc1" {
- name = "karlsruhe"
- feature = "SSD"
-}
-```
-
-## Argument Reference
-
- * `name` - (Required) Name or part of the location name to search for.
- * `feature` - (Optional) A desired feature that the location must be able to provide.
-
-## Attributes Reference
-
- * `id` - UUID of the location
diff --git a/website/source/docs/providers/profitbricks/index.html.markdown b/website/source/docs/providers/profitbricks/index.html.markdown
deleted file mode 100644
index 22bd5b54d..000000000
--- a/website/source/docs/providers/profitbricks/index.html.markdown
+++ /dev/null
@@ -1,63 +0,0 @@
----
-layout: "profitbricks"
-page_title: "Provider: ProfitBricks"
-sidebar_current: "docs-profitbricks-index"
-description: |-
- A provider for ProfitBricks.
----
-
-# ProfitBricks Provider
-
-The ProfitBricks provider gives the ability to deploy and configure resources using ProfitBricks Cloud API.
-
-Use the navigation to the left to read about the available resources.
-
-
-## Usage
-
-The provider needs to be configured with proper credentials before it can be used.
-
-
-```hcl
-$ export PROFITBRICKS_USERNAME="profitbricks_username"
-$ export PROFITBRICKS_PASSWORD="profitbricks_password"
-$ export PROFITBRICKS_API_URL="profitbricks_rest_url"
-```
-
-Or you can provide your credentials like this:
-
-
-The credentials provided in `.tf` file will override credentials in the environment variables.
-
-## Example Usage
-
-
-```hcl
-provider "profitbricks" {
- username = "profitbricks_username"
- password = "profitbricks_password"
- endpoint = "profitbricks_rest_url"
- retries = 100
-}
-
-resource "profitbricks_datacenter" "main" {
- # ...
-}
-```
-
-
-## Configuration Reference
-
-The following arguments are supported:
-
-* `username` - (Required) If omitted, the `PROFITBRICKS_USERNAME` environment variable is used.
-
-* `password` - (Required) If omitted, the `PROFITBRICKS_PASSWORD` environment variable is used.
-
-* `endpoint` - (Required) If omitted, the `PROFITBRICKS_API_URL` environment variable is used.
-
-* `retries` - (Optional) Number of retries while waiting for a resource to be provisioned. Default value is 50.
-
-
-#Support
-You are welcome to contact us with questions or comments at [ProfitBricks DevOps Central](https://devops.profitbricks.com/).
\ No newline at end of file
diff --git a/website/source/docs/providers/profitbricks/r/profitbricks_datacenter.html.markdown b/website/source/docs/providers/profitbricks/r/profitbricks_datacenter.html.markdown
deleted file mode 100644
index 12b1612c1..000000000
--- a/website/source/docs/providers/profitbricks/r/profitbricks_datacenter.html.markdown
+++ /dev/null
@@ -1,29 +0,0 @@
----
-layout: "profitbricks"
-page_title: "ProfitBricks: profitbricks_datacenter"
-sidebar_current: "docs-profitbricks-resource-datacenter"
-description: |-
- Creates and manages Profitbricks Virtual Data Center.
----
-
-# profitbricks\_datacenter
-
-Manages a Virtual Data Center on ProfitBricks
-
-## Example Usage
-
-```hcl
-resource "profitbricks_datacenter" "example" {
- name = "datacenter name"
- location = "us/las"
- description = "datacenter description"
-}
-```
-
-## Argument Reference
-
-The following arguments are supported:
-
-* `name` - (Required)[string] The name of the Virtual Data Center.
-* `location` - (Required)[string] The physical location where the data center will be created.
-* `description` - (Optional)[string] Description for the data center.
diff --git a/website/source/docs/providers/profitbricks/r/profitbricks_firewall.html.markdown b/website/source/docs/providers/profitbricks/r/profitbricks_firewall.html.markdown
deleted file mode 100644
index dfb0d0f86..000000000
--- a/website/source/docs/providers/profitbricks/r/profitbricks_firewall.html.markdown
+++ /dev/null
@@ -1,41 +0,0 @@
----
-layout: "profitbricks"
-page_title: "ProfitBricks: profitbricks_firewall"
-sidebar_current: "docs-profitbricks-resource-firewall"
-description: |-
- Creates and manages Firewall Rules.
----
-
-# profitbricks\_firewall
-
-Manages a Firewall Rules on ProfitBricks
-
-## Example Usage
-
-```hcl
-resource "profitbricks_firewall" "example" {
- datacenter_id = "${profitbricks_datacenter.example.id}"
- server_id = "${profitbricks_server.example.id}"
- nic_id = "${profitbricks_server.example.primary_nic}"
- protocol = "TCP"
- name = "test"
- port_range_start = 1
- port_range_end = 2
-}
-```
-
-####Argument reference
-
-* `datacenter_id` - (Required)[string]
-* `server_id` - (Required)[string]
-* `nic_id` - (Required)[string]
-* `protocol` - (Required)[string] The protocol for the rule: TCP, UDP, ICMP, ANY.
-* `name` - (Optional)[string] The name of the firewall rule.
-* `source_mac` - (Optional)[string] Only traffic originating from the respective MAC address is allowed. Valid format: aa:bb:cc:dd:ee:ff.
-* `source_ip` - (Optional)[string] Only traffic originating from the respective IPv4 address is allowed.
-* `target_ip` - (Optional)[string] Only traffic directed to the respective IP address of the NIC is allowed.
-* `port_range_start` - (Optional)[string] Defines the start range of the allowed port (from 1 to 65534) if protocol TCP or UDP is chosen.
-* `port_range_end` - (Optional)[string] Defines the end range of the allowed port (from 1 to 65534) if the protocol TCP or UDP is chosen.
-* `icmp_type` - (Optional)[string] Defines the allowed type (from 0 to 254) if the protocol ICMP is chosen.
-* `icmp_code` - (Optional)[string] Defines the allowed code (from 0 to 254) if protocol ICMP is chosen.
-
diff --git a/website/source/docs/providers/profitbricks/r/profitbricks_ipblock.html.markdown b/website/source/docs/providers/profitbricks/r/profitbricks_ipblock.html.markdown
deleted file mode 100644
index 1729eef8c..000000000
--- a/website/source/docs/providers/profitbricks/r/profitbricks_ipblock.html.markdown
+++ /dev/null
@@ -1,26 +0,0 @@
----
-layout: "profitbricks"
-page_title: "ProfitBricks: profitbricks_ipblock"
-sidebar_current: "docs-profitbricks-resource-ipblock"
-description: |-
- Creates and manages IP Block objects.
----
-
-# profitbricks\_ipblock
-
-Manages a IP Blocks on ProfitBricks
-
-## Example Usage
-
-```hcl
-resource "profitbricks_ipblock" "example" {
- location = "${profitbricks_datacenter.example.location}"
- size = 1
-}
-```
-
-##Argument reference
-
-* `location` - (Required)
-* `size` - (Required)
-
diff --git a/website/source/docs/providers/profitbricks/r/profitbricks_lan.html.markdown b/website/source/docs/providers/profitbricks/r/profitbricks_lan.html.markdown
deleted file mode 100644
index 66dd03623..000000000
--- a/website/source/docs/providers/profitbricks/r/profitbricks_lan.html.markdown
+++ /dev/null
@@ -1,26 +0,0 @@
----
-layout: "profitbricks"
-page_title: "ProfitBricks: profitbricks_lan"
-sidebar_current: "docs-profitbricks-resource-lan"
-description: |-
- Creates and manages LAN objects.
----
-
-# profitbricks\_lan
-
-Manages a LANs on ProfitBricks
-
-## Example Usage
-
-```hcl
-resource "profitbricks_lan" "example" {
- datacenter_id = "${profitbricks_datacenter.example.id}"
- public = true
-}
-```
-
-##Argument reference
-
-* `datacenter_id` - (Required) [string]
-* `name` - (Optional) [string] The name of the LAN
-* `public` - (Optional) [Boolean] indicating if the LAN faces the public Internet or not.
diff --git a/website/source/docs/providers/profitbricks/r/profitbricks_loadbalancer.html.markdown b/website/source/docs/providers/profitbricks/r/profitbricks_loadbalancer.html.markdown
deleted file mode 100644
index 11a1657bd..000000000
--- a/website/source/docs/providers/profitbricks/r/profitbricks_loadbalancer.html.markdown
+++ /dev/null
@@ -1,30 +0,0 @@
----
-layout: "profitbricks"
-page_title: "ProfitBricks: profitbricks_loadbalancer"
-sidebar_current: "docs-profitbricks-resource-loadbalancer"
-description: |-
- Creates and manages Load Balancers
----
-
-# profitbricks\_loadbalancer
-
-Manages a Load Balancers on ProfitBricks
-
-## Example Usage
-
-```hcl
-resource "profitbricks_loadbalancer" "example" {
- datacenter_id = "${profitbricks_datacenter.example.id}"
- nic_id = "${profitbricks_nic.example.id}"
- name = "load balancer name"
- dhcp = true
-}
-```
-
-##Argument reference
-
-* `datacenter_id` - (Required)[string]
-* `nic_id` - (Required)[string]
-* `dhcp` - (Optional) [boolean] Indicates if the load balancer will reserve an IP using DHCP.
-* `ip` - (Optional) [string] IPv4 address of the load balancer.
-
diff --git a/website/source/docs/providers/profitbricks/r/profitbricks_nic.html.markdown b/website/source/docs/providers/profitbricks/r/profitbricks_nic.html.markdown
deleted file mode 100644
index e0338f728..000000000
--- a/website/source/docs/providers/profitbricks/r/profitbricks_nic.html.markdown
+++ /dev/null
@@ -1,34 +0,0 @@
----
-layout: "profitbricks"
-page_title: "ProfitBricks: profitbricks_nic"
-sidebar_current: "docs-profitbricks-resource-nic"
-description: |-
- Creates and manages Network Interface objects.
----
-
-# profitbricks\_nic
-
-Manages a NICs on ProfitBricks
-
-## Example Usage
-
-```hcl
-resource "profitbricks_nic" "example" {
- datacenter_id = "${profitbricks_datacenter.example.id}"
- server_id = "${profitbricks_server.example.id}"
- lan = 2
- dhcp = true
- ip = "${profitbricks_ipblock.example.ip}"
-}
-```
-
-##Argument reference
-
-* `datacenter_id` - (Required)[string][1](#myfootnote1)
-* `server_id` - (Required)[string][1](#myfootnote1)
-* `lan` - (Required) [integer] The LAN ID the NIC will sit on.
-* `name` - (Optional) [string] The name of the LAN.
-* `dhcp` - (Optional) [boolean]
-* `ip` - (Optional) [string] IP assigned to the NIC.
-* `firewall_active` - (Optional) [boolean] If this resource is set to true and is nested under a server resource firewall, with open SSH port, resource must be nested under the nic.
-* `nat` - (Optional) [boolean] Boolean value indicating if the private IP address has outbound access to the public internet.
diff --git a/website/source/docs/providers/profitbricks/r/profitbricks_server.html.markdown b/website/source/docs/providers/profitbricks/r/profitbricks_server.html.markdown
deleted file mode 100644
index 68ef48d3f..000000000
--- a/website/source/docs/providers/profitbricks/r/profitbricks_server.html.markdown
+++ /dev/null
@@ -1,62 +0,0 @@
----
-layout: "profitbricks"
-page_title: "ProfitBricks: profitbricks_server"
-sidebar_current: "docs-profitbricks-resource-server"
-description: |-
- Creates and manages ProfitBricks Server objects.
----
-
-# profitbricks\_server
-
-Manages a Servers on ProfitBricks
-
-## Example Usage
-
-This resource will create an operational server. After this section completes, the provisioner can be called.
-
-```hcl
-resource "profitbricks_server" "example" {
- name = "server"
- datacenter_id = "${profitbricks_datacenter.example.id}"
- cores = 1
- ram = 1024
- availability_zone = "ZONE_1"
- cpu_family = "AMD_OPTERON"
-
- volume {
- name = "new"
- image_name = "${var.ubuntu}"
- size = 5
- disk_type = "SSD"
- ssh_key_path = "${var.private_key_path}"
- image_password = "test1234"
- }
-
- nic {
- lan = "${profitbricks_lan.example.id}"
- dhcp = true
- ip = "${profitbricks_ipblock.example.ip}"
- firewall_active = true
-
- firewall {
- protocol = "TCP"
- name = "SSH"
- port_range_start = 22
- port_range_end = 22
- }
- }
-}
-```
-
-##Argument reference
-
-* `name` - (Required) [string] The name of the server.
-* `datacenter_id` - (Required)[string]
-* `cores` - (Required)[integer] Number of server cores.
-* `ram` - (Required)[integer] The amount of memory for the server in MB.
-* `availability_zone` - (Optional)[string] The availability zone in which the server should exist.
-* `licence_type` - (Optional)[string] Sets the OS type of the server.
-* `cpuFamily` - (Optional)[string] Sets the CPU type. "AMD_OPTERON" or "INTEL_XEON". Defaults to "AMD_OPTERON".
-* `volume` - (Required) See Volume section.
-* `nic` - (Required) See NIC section.
-* `firewall` - (Optional) See Firewall Rule section.
diff --git a/website/source/docs/providers/profitbricks/r/profitbricks_volume.html.markdown b/website/source/docs/providers/profitbricks/r/profitbricks_volume.html.markdown
deleted file mode 100644
index fb4ee5f3c..000000000
--- a/website/source/docs/providers/profitbricks/r/profitbricks_volume.html.markdown
+++ /dev/null
@@ -1,41 +0,0 @@
----
-layout: "profitbricks"
-page_title: "ProfitBricks: profitbricks_server"
-sidebar_current: "docs-profitbricks-resource-volume"
-description: |-
- Creates and manages ProfitBricks Volume objects.
----
-
-# profitbricks\_volume
-
-Manages a Volumes on ProfitBricks
-
-## Example Usage
-
-A primary volume will be created with the server. If there is a need for additional volume, this resource handles it.
-
-```hcl
-resource "profitbricks_volume" "example" {
- datacenter_id = "${profitbricks_datacenter.example.id}"
- server_id = "${profitbricks_server.example.id}"
- image_name = "${var.ubuntu}"
- size = 5
- disk_type = "HDD"
- ssh_key_path = "${var.private_key_path}"
- bus = "VIRTIO"
-}
-```
-
-##Argument reference
-
-* `datacenter_id` - (Required) [string] [1](#myfootnote1)
-* `server_id` - (Required)[string] [1](#myfootnote1)
-* `disk_type` - (Required) [string] The volume type, HDD or SSD.
-* `bus` - (Required) [boolean] The bus type of the volume.
-* `size` - (Required)[integer] The size of the volume in GB.
-* `ssh_key_path` - (Required)[list] List of paths to files containing a public SSH key that will be injected into ProfitBricks provided Linux images. Required if `image_password` is not provided.
-* `image_password` - [string] Required if `sshkey_path` is not provided.
-* `image_name` - [string] The image or snapshot ID. It is required if `licence_type` is not provided.
-* `licence_type` - [string] Required if `image_name` is not provided.
-* `name` - (Optional) [string] The name of the volume.
-* `availability_zone` - (Optional) [string] The storage availability zone assigned to the volume. AUTO, ZONE_1, ZONE_2, or ZONE_3
\ No newline at end of file
diff --git a/website/source/docs/providers/rabbitmq/index.html.markdown b/website/source/docs/providers/rabbitmq/index.html.markdown
deleted file mode 100644
index 11f4d7881..000000000
--- a/website/source/docs/providers/rabbitmq/index.html.markdown
+++ /dev/null
@@ -1,55 +0,0 @@
----
-layout: "rabbitmq"
-page_title: "Provider: RabbitMQ"
-sidebar_current: "docs-rabbitmq-index"
-description: |-
- A provider for a RabbitMQ Server.
----
-
-# RabbitMQ Provider
-
-[RabbitMQ](http://www.rabbitmq.com) is an AMQP message broker server. The
-RabbitMQ provider exposes resources used to manage the configuration of
-resources in a RabbitMQ server.
-
-Use the navigation to the left to read about the available resources.
-
-## Example Usage
-
-The following is a minimal example:
-
-```hcl
-# Configure the RabbitMQ provider
-provider "rabbitmq" {
- endpoint = "http://127.0.0.1"
- username = "guest"
- password = "guest"
-}
-
-# Create a virtual host
-resource "rabbitmq_vhost" "vhost_1" {
- name = "vhost_1"
-}
-```
-
-## Requirements
-
-The RabbitMQ management plugin must be enabled to use this provider. You can
-enable the plugin by doing something similar to:
-
-```
-$ sudo rabbitmq-plugins enable rabbitmq_management
-```
-
-## Argument Reference
-
-The following arguments are supported:
-
-* `endpoint` - (Required) The HTTP URL of the management plugin on the
- RabbitMQ server. The RabbitMQ management plugin *must* be enabled in order
- to use this provder. _Note_: This is not the IP address or hostname of the
- RabbitMQ server that you would use to access RabbitMQ directly.
-* `username` - (Required) Username to use to authenticate with the server.
-* `password` - (Optional) Password for the given user.
-* `insecure` - (Optional) Trust self-signed certificates.
-* `cacert_file` - (Optional) The path to a custom CA / intermediate certificate.
diff --git a/website/source/docs/providers/rabbitmq/r/binding.html.markdown b/website/source/docs/providers/rabbitmq/r/binding.html.markdown
deleted file mode 100644
index da797065e..000000000
--- a/website/source/docs/providers/rabbitmq/r/binding.html.markdown
+++ /dev/null
@@ -1,92 +0,0 @@
----
-layout: "rabbitmq"
-page_title: "RabbitMQ: rabbitmq_binding"
-sidebar_current: "docs-rabbitmq-resource-binding"
-description: |-
- Creates and manages a binding on a RabbitMQ server.
----
-
-# rabbitmq\_binding
-
-The ``rabbitmq_binding`` resource creates and manages a binding relationship
-between a queue an exchange.
-
-## Example Usage
-
-```hcl
-resource "rabbitmq_vhost" "test" {
- name = "test"
-}
-
-resource "rabbitmq_permissions" "guest" {
- user = "guest"
- vhost = "${rabbitmq_vhost.test.name}"
-
- permissions {
- configure = ".*"
- write = ".*"
- read = ".*"
- }
-}
-
-resource "rabbitmq_exchange" "test" {
- name = "test"
- vhost = "${rabbitmq_permissions.guest.vhost}"
-
- settings {
- type = "fanout"
- durable = false
- auto_delete = true
- }
-}
-
-resource "rabbitmq_queue" "test" {
- name = "test"
- vhost = "${rabbitmq_permissions.guest.vhost}"
-
- settings {
- durable = true
- auto_delete = false
- }
-}
-
-resource "rabbitmq_binding" "test" {
- source = "${rabbitmq_exchange.test.name}"
- vhost = "${rabbitmq_vhost.test.name}"
- destination = "${rabbitmq_queue.test.name}"
- destination_type = "queue"
- routing_key = "#"
- properties_key = "%23"
-}
-```
-
-## Argument Reference
-
-The following arguments are supported:
-
-* `source` - (Required) The source exchange.
-
-* `vhost` - (Required) The vhost to create the resource in.
-
-* `destination` - (Required) The destination queue or exchange.
-
-* `destination_type` - (Required) The type of destination (queue or exchange).
-
-* `properties_key` - (Required) A unique key to refer to the binding.
-
-* `routing_key` - (Optional) A routing key for the binding.
-
-* `arguments` - (Optional) Additional key/value arguments for the binding.
-
-## Attributes Reference
-
-No further attributes are exported.
-
-## Import
-
-Bindings can be imported using the `id` which is composed of
- `vhost/source/destination/destination_type/properties_key`. E.g.
-
-```
-$ terraform import rabbitmq_binding.test test/test/test/queue/%23
-```
diff --git a/website/source/docs/providers/rabbitmq/r/exchange.html.markdown b/website/source/docs/providers/rabbitmq/r/exchange.html.markdown
deleted file mode 100644
index bb5fc016c..000000000
--- a/website/source/docs/providers/rabbitmq/r/exchange.html.markdown
+++ /dev/null
@@ -1,77 +0,0 @@
----
-layout: "rabbitmq"
-page_title: "RabbitMQ: rabbitmq_exchange"
-sidebar_current: "docs-rabbitmq-resource-exchange"
-description: |-
- Creates and manages an exchange on a RabbitMQ server.
----
-
-# rabbitmq\_exchange
-
-The ``rabbitmq_exchange`` resource creates and manages an exchange.
-
-## Example Usage
-
-```hcl
-resource "rabbitmq_vhost" "test" {
- name = "test"
-}
-
-resource "rabbitmq_permissions" "guest" {
- user = "guest"
- vhost = "${rabbitmq_vhost.test.name}"
-
- permissions {
- configure = ".*"
- write = ".*"
- read = ".*"
- }
-}
-
-resource "rabbitmq_exchange" "test" {
- name = "test"
- vhost = "${rabbitmq_permissions.guest.vhost}"
-
- settings {
- type = "fanout"
- durable = false
- auto_delete = true
- }
-}
-```
-
-## Argument Reference
-
-The following arguments are supported:
-
-* `name` - (Required) The name of the exchange.
-
-* `vhost` - (Required) The vhost to create the resource in.
-
-* `settings` - (Required) The settings of the exchange. The structure is
- described below.
-
-The `settings` block supports:
-
-* `type` - (Required) The type of exchange.
-
-* `durable` - (Optional) Whether the exchange survives server restarts.
- Defaults to `false`.
-
-* `auto_delete` - (Optional) Whether the exchange will self-delete when all
- queues have finished using it.
-
-* `arguments` - (Optional) Additional key/value settings for the exchange.
-
-## Attributes Reference
-
-No further attributes are exported.
-
-## Import
-
-Exchanges can be imported using the `id` which is composed of `name@vhost`.
-E.g.
-
-```
-terraform import rabbitmq_exchange.test test@vhost
-```
diff --git a/website/source/docs/providers/rabbitmq/r/permissions.html.markdown b/website/source/docs/providers/rabbitmq/r/permissions.html.markdown
deleted file mode 100644
index f30703547..000000000
--- a/website/source/docs/providers/rabbitmq/r/permissions.html.markdown
+++ /dev/null
@@ -1,67 +0,0 @@
----
-layout: "rabbitmq"
-page_title: "RabbitMQ: rabbitmq_permissions"
-sidebar_current: "docs-rabbitmq-resource-permissions"
-description: |-
- Creates and manages a user's permissions on a RabbitMQ server.
----
-
-# rabbitmq\_permissions
-
-The ``rabbitmq_permissions`` resource creates and manages a user's set of
-permissions.
-
-## Example Usage
-
-```hcl
-resource "rabbitmq_vhost" "test" {
- name = "test"
-}
-
-resource "rabbitmq_user" "test" {
- name = "mctest"
- password = "foobar"
- tags = ["administrator"]
-}
-
-resource "rabbitmq_permissions" "test" {
- user = "${rabbitmq_user.test.name}"
- vhost = "${rabbitmq_vhost.test.name}"
-
- permissions {
- configure = ".*"
- write = ".*"
- read = ".*"
- }
-}
-```
-
-## Argument Reference
-
-The following arguments are supported:
-
-* `user` - (Required) The user to apply the permissions to.
-
-* `vhost` - (Required) The vhost to create the resource in.
-
-* `permissions` - (Required) The settings of the permissions. The structure is
- described below.
-
-The `permissions` block supports:
-
-* `configure` - (Required) The "configure" ACL.
-* `write` - (Required) The "write" ACL.
-* `read` - (Required) The "read" ACL.
-
-## Attributes Reference
-
-No further attributes are exported.
-
-## Import
-
-Permissions can be imported using the `id` which is composed of `user@vhost`.
-E.g.
-
-```
-terraform import rabbitmq_permissions.test user@vhost
-```
diff --git a/website/source/docs/providers/rabbitmq/r/policy.html.markdown b/website/source/docs/providers/rabbitmq/r/policy.html.markdown
deleted file mode 100644
index 951c8ac42..000000000
--- a/website/source/docs/providers/rabbitmq/r/policy.html.markdown
+++ /dev/null
@@ -1,78 +0,0 @@
----
-layout: "rabbitmq"
-page_title: "RabbitMQ: rabbitmq_policy"
-sidebar_current: "docs-rabbitmq-resource-policy"
-description: |-
- Creates and manages a policy on a RabbitMQ server.
----
-
-# rabbitmq\_policy
-
-The ``rabbitmq_policy`` resource creates and manages policies for exchanges
-and queues.
-
-## Example Usage
-
-```hcl
-resource "rabbitmq_vhost" "test" {
- name = "test"
-}
-
-resource "rabbitmq_permissions" "guest" {
- user = "guest"
- vhost = "${rabbitmq_vhost.test.name}"
-
- permissions {
- configure = ".*"
- write = ".*"
- read = ".*"
- }
-}
-
-resource "rabbitmq_policy" "test" {
- name = "test"
- vhost = "${rabbitmq_permissions.guest.vhost}"
-
- policy {
- pattern = ".*"
- priority = 0
- apply_to = "all"
-
- definition {
- ha-mode = "all"
- }
- }
-}
-```
-
-## Argument Reference
-
-The following arguments are supported:
-
-* `name` - (Required) The name of the policy.
-
-* `vhost` - (Required) The vhost to create the resource in.
-
-* `policy` - (Required) The settings of the policy. The structure is
- described below.
-
-The `policy` block supports:
-
-* `pattern` - (Required) A pattern to match an exchange or queue name.
-* `priority` - (Required) The policy with the greater priority is applied first.
-* `apply_to` - (Required) Can either be "exchange", "queues", or "all".
-* `definition` - (Required) Key/value pairs of the policy definition. See the
- RabbitMQ documentation for definition references and examples.
-
-## Attributes Reference
-
-No further attributes are exported.
-
-## Import
-
-Policies can be imported using the `id` which is composed of `name@vhost`.
-E.g.
-
-```
-terraform import rabbitmq_policy.test name@vhost
-```
diff --git a/website/source/docs/providers/rabbitmq/r/queue.html.markdown b/website/source/docs/providers/rabbitmq/r/queue.html.markdown
deleted file mode 100644
index 44abb50e3..000000000
--- a/website/source/docs/providers/rabbitmq/r/queue.html.markdown
+++ /dev/null
@@ -1,73 +0,0 @@
----
-layout: "rabbitmq"
-page_title: "RabbitMQ: rabbitmq_queue"
-sidebar_current: "docs-rabbitmq-resource-queue"
-description: |-
- Creates and manages a queue on a RabbitMQ server.
----
-
-# rabbitmq\_queue
-
-The ``rabbitmq_queue`` resource creates and manages a queue.
-
-## Example Usage
-
-```hcl
-resource "rabbitmq_vhost" "test" {
- name = "test"
-}
-
-resource "rabbitmq_permissions" "guest" {
- user = "guest"
- vhost = "${rabbitmq_vhost.test.name}"
-
- permissions {
- configure = ".*"
- write = ".*"
- read = ".*"
- }
-}
-
-resource "rabbitmq_queue" "test" {
- name = "test"
- vhost = "${rabbitmq_permissions.guest.vhost}"
-
- settings {
- durable = false
- auto_delete = true
- }
-}
-```
-
-## Argument Reference
-
-The following arguments are supported:
-
-* `name` - (Required) The name of the queue.
-
-* `vhost` - (Required) The vhost to create the resource in.
-
-* `settings` - (Required) The settings of the queue. The structure is
- described below.
-
-The `settings` block supports:
-
-* `durable` - (Optional) Whether the queue survives server restarts.
- Defaults to `false`.
-
-* `auto_delete` - (Optional) Whether the queue will self-delete when all
- consumers have unsubscribed.
-
-* `arguments` - (Optional) Additional key/value settings for the queue.
-
-## Attributes Reference
-
-No further attributes are exported.
-
-## Import
-
-Queues can be imported using the `id` which is composed of `name@vhost`. E.g.
-
-```
-terraform import rabbitmq_queue.test name@vhost
-```
diff --git a/website/source/docs/providers/rabbitmq/r/user.html.markdown b/website/source/docs/providers/rabbitmq/r/user.html.markdown
deleted file mode 100644
index f62db774e..000000000
--- a/website/source/docs/providers/rabbitmq/r/user.html.markdown
+++ /dev/null
@@ -1,48 +0,0 @@
----
-layout: "rabbitmq"
-page_title: "RabbitMQ: rabbitmq_user"
-sidebar_current: "docs-rabbitmq-resource-user"
-description: |-
- Creates and manages a user on a RabbitMQ server.
----
-
-# rabbitmq\_user
-
-The ``rabbitmq_user`` resource creates and manages a user.
-
-~> **Note:** All arguments including username and password will be stored in the raw state as plain-text.
-[Read more about sensitive data in state](/docs/state/sensitive-data.html).
-
-## Example Usage
-
-```hcl
-resource "rabbitmq_user" "test" {
- name = "mctest"
- password = "foobar"
- tags = ["administrator", "management"]
-}
-```
-
-## Argument Reference
-
-The following arguments are supported:
-
-* `name` - (Required) The name of the user.
-
-* `password` - (Required) The password of the user. The value of this argument
- is plain-text so make sure to secure where this is defined.
-
-* `tags` - (Optional) Which permission model to apply to the user. Valid
- options are: management, policymaker, monitoring, and administrator.
-
-## Attributes Reference
-
-No further attributes are exported.
-
-## Import
-
-Users can be imported using the `name`, e.g.
-
-```
-terraform import rabbitmq_user.test mctest
-```
diff --git a/website/source/docs/providers/rabbitmq/r/vhost.html.markdown b/website/source/docs/providers/rabbitmq/r/vhost.html.markdown
deleted file mode 100644
index fc06d135e..000000000
--- a/website/source/docs/providers/rabbitmq/r/vhost.html.markdown
+++ /dev/null
@@ -1,37 +0,0 @@
----
-layout: "rabbitmq"
-page_title: "RabbitMQ: rabbitmq_vhost"
-sidebar_current: "docs-rabbitmq-resource-vhost"
-description: |-
- Creates and manages a vhost on a RabbitMQ server.
----
-
-# rabbitmq\_vhost
-
-The ``rabbitmq_vhost`` resource creates and manages a vhost.
-
-## Example Usage
-
-```hcl
-resource "rabbitmq_vhost" "my_vhost" {
- name = "my_vhost"
-}
-```
-
-## Argument Reference
-
-The following arguments are supported:
-
-* `name` - (Required) The name of the vhost.
-
-## Attributes Reference
-
-No further attributes are exported.
-
-## Import
-
-Vhosts can be imported using the `name`, e.g.
-
-```
-terraform import rabbitmq_vhost.my_vhost my_vhost
-```
diff --git a/website/source/docs/providers/rancher/index.html.markdown b/website/source/docs/providers/rancher/index.html.markdown
deleted file mode 100644
index 144fd00c3..000000000
--- a/website/source/docs/providers/rancher/index.html.markdown
+++ /dev/null
@@ -1,33 +0,0 @@
----
-layout: "rancher"
-page_title: "Provider: Rancher"
-sidebar_current: "docs-rancher-index"
-description: |-
- The Rancher provider is used to interact with Rancher container platforms.
----
-
-# Rancher Provider
-
-The Rancher provider is used to interact with the
-resources supported by Rancher. The provider needs to be configured
-with the URL of the Rancher server at minimum and API credentials if
-access control is enabled on the server.
-
-## Example Usage
-
-```hcl
-# Configure the Rancher provider
-provider "rancher" {
- api_url = "http://rancher.my-domain.com:8080"
- access_key = "${var.rancher_access_key}"
- secret_key = "${var.rancher_secret_key}"
-}
-```
-
-## Argument Reference
-
-The following arguments are supported:
-
-* `api_url` - (Required) Rancher API url. It must be provided, but it can also be sourced from the `RANCHER_URL` environment variable.
-* `access_key` - (Optional) Rancher API access key. It can also be sourced from the `RANCHER_ACCESS_KEY` environment variable.
-* `secret_key` - (Optional) Rancher API access key. It can also be sourced from the `RANCHER_SECRET_KEY` environment variable.
diff --git a/website/source/docs/providers/rancher/r/certificate.html.markdown b/website/source/docs/providers/rancher/r/certificate.html.markdown
deleted file mode 100644
index bc0d445de..000000000
--- a/website/source/docs/providers/rancher/r/certificate.html.markdown
+++ /dev/null
@@ -1,66 +0,0 @@
----
-layout: "rancher"
-page_title: "Rancher: rancher_certificate"
-sidebar_current: "docs-rancher-resource-certificate"
-description: |-
- Provides a Rancher Certificate resource. This can be used to create certificates for rancher environments and retrieve their information.
----
-
-# rancher\_certificate
-
-Provides a Rancher Certificate resource. This can be used to create certificates for rancher environments and retrieve their information.
-
-## Example Usage
-
-```hcl
-# Create a new Rancher Certificate
-resource rancher_certificate "foo" {
- name = "foo"
- description = "my foo certificate"
- environment_id = "${rancher_environment.test.id}"
- cert = "${file("server.crt")}"
- key = "${file("server.key")}"
-}
-```
-
-## Argument Reference
-
-The following arguments are supported:
-
-* `name` - (Required) The name of the registry credential.
-* `description` - (Optional) A registry credential description.
-* `environment_id` - (Required) The ID of the environment to create the certificate for.
-* `cert` - (Required) The certificate content.
-* `cert_chain` - (Optional) The certificate chain.
-* `key` - (Required) The certificate key.
-
-## Attributes Reference
-
-The following attributes are exported:
-
-* `cn` - The certificate CN.
-* `algorithm` - The certificate algorithm.
-* `cert_fingerprint` - The certificate fingerprint.
-* `expires_at` - The certificate expiration date.
-* `issued_at` - The certificate creation date.
-* `issuer` - The certificate issuer.
-* `key_size` - The certificate key size.
-* `serial_number` - The certificate serial number.
-* `subject_alternative_names` - The list of certificate Subject Alternative Names.
-* `version` - The certificate version.
-
-## Import
-
-Registry credentials can be imported using the Registry and credentials
-IDs in the format `/`
-
-```
-$ terraform import rancher_certificate.mycert 1sp31/1c605
-```
-
-If the credentials for the Rancher provider have access to the global API,
-then `environment_id` can be omitted e.g.
-
-```
-$ terraform import rancher_certificate.mycert 1c605
-```
diff --git a/website/source/docs/providers/rancher/r/environment.html.md b/website/source/docs/providers/rancher/r/environment.html.md
deleted file mode 100644
index 3afe6d1aa..000000000
--- a/website/source/docs/providers/rancher/r/environment.html.md
+++ /dev/null
@@ -1,64 +0,0 @@
----
-layout: "rancher"
-page_title: "Rancher: rancher_environment"
-sidebar_current: "docs-rancher-resource-environment"
-description: |-
- Provides a Rancher Environment resource. This can be used to create and manage environments on rancher.
----
-
-# rancher\_environment
-
-Provides a Rancher Environment resource. This can be used to create and manage environments on rancher.
-
-## Example Usage
-
-```hcl
-# Create a new Rancher environment
-resource "rancher_environment" "default" {
- name = "staging"
- description = "The staging environment"
- orchestration = "cattle"
-
- member {
- external_id = "650430"
- external_id_type = "github_user"
- role = "owner"
- }
-
- member {
- external_id = "1234"
- external_id_type = "github_team"
- role = "member"
- }
-}
-```
-
-## Argument Reference
-
-The following arguments are supported:
-
-* `name` - (Required) The name of the environment.
-* `description` - (Optional) An environment description.
-* `orchestration` - (Optional) Must be one of **cattle**, **swarm**, **mesos**, **windows** or **kubernetes**. This is a helper for setting the project_template_ids for the included Rancher templates. This will conflict with project_template_id setting.
-* `project_template_id` - (Optional) This can be any valid project template ID. If this is set, then orchestration can not be.
-* `member` - (Optional) Members to add to the environment.
-
-### Member Parameters Reference
-
-A `member` takes three parameters:
-
-* `external_id` - (Required) The external ID of the member.
-* `external_id_type` - (Required) The external ID type of the member.
-* `role` - (Required) The role of the member in the environment.
-
-## Attributes Reference
-
-* `id` - The ID of the environment (ie `1a11`) that can be used in other Terraform resources such as Rancher Stack definitions.
-
-## Import
-
-Environments can be imported using their Rancher API ID, e.g.
-
-```
-$ terraform import rancher_environment.dev 1a15
-```
diff --git a/website/source/docs/providers/rancher/r/host.html.markdown b/website/source/docs/providers/rancher/r/host.html.markdown
deleted file mode 100644
index 382d8ac86..000000000
--- a/website/source/docs/providers/rancher/r/host.html.markdown
+++ /dev/null
@@ -1,36 +0,0 @@
----
-layout: "rancher"
-page_title: "Rancher: rancher_host"
-sidebar_current: "docs-rancher-resource-host"
-description: |-
- Provides a Rancher Host resource. This can be used to manage and delete hosts on Rancher.
----
-
-# rancher\_host
-
-Provides a Rancher Host resource. This can be used to manage and delete hosts on Rancher.
-
-## Example usage
-
-```hcl
-# Manage an existing Rancher host
-resource rancher_host "foo" {
- name = "foo"
- description = "The foo node"
- environment_id = "1a5"
- hostname = "foo.example.com"
- labels {
- role = "database"
- }
-}
-```
-
-## Argument Reference
-
-The following arguments are supported:
-
-* `name` - (Required) The name of the host.
-* `description` - (Optional) A host description.
-* `environment_id` - (Required) The ID of the environment the host is associated to.
-* `hostname` - (Required) The host name. Used as the primary key to detect the host ID.
-* `labels` - (Optional) A dictionary of labels to apply to the host. Computed internal labels are excluded from that list.
diff --git a/website/source/docs/providers/rancher/r/registration_token.html.markdown b/website/source/docs/providers/rancher/r/registration_token.html.markdown
deleted file mode 100644
index bb2cef0fe..000000000
--- a/website/source/docs/providers/rancher/r/registration_token.html.markdown
+++ /dev/null
@@ -1,61 +0,0 @@
----
-layout: "rancher"
-page_title: "Rancher: rancher_registration_token"
-sidebar_current: "docs-rancher-resource-registration-token"
-description: |-
- Provides a Rancher Registration Token resource. This can be used to create registration tokens for rancher environments and retrieve their information.
----
-
-# rancher\_registration\_token
-
-Provides a Rancher Registration Token resource. This can be used to create registration tokens for rancher environments and retrieve their information.
-
-## Example Usage
-
-```hcl
-# Create a new Rancher registration token
-resource "rancher_registration_token" "default" {
- name = "staging_token"
- description = "Registration token for the staging environment"
- environment_id = "${rancher_environment.default.id}"
-
- host_labels {
- orchestration = true,
- etcd = true,
- compute = true
- }
-}
-```
-
-## Argument Reference
-
-The following arguments are supported:
-
-* `name` - (Required) The name of the registration token.
-* `description` - (Optional) A registration token description.
-* `environment_id` - (Required) The ID of the environment to create the token for.
-* `host_labels` - (Optional) A map of host labels to add to the registration command.
-
-## Attributes Reference
-
-The following attributes are exported:
-
-* `command` - The command used to start a rancher agent for this environment.
-* `registration_url` - The URL to use to register new nodes to the environment.
-* `token` - The token to use to register new nodes to the environment.
-
-## Import
-
-Registration tokens can be imported using the Environment and Registration token
-IDs in the form `/`.
-
-```
-$ terraform import rancher_registration_token.dev_token 1a5/1c11
-```
-
-If the credentials for the Rancher provider have access to the global API, then
-then `environment_id` can be omitted e.g.
-
-```
-$ terraform import rancher_registration_token.dev_token 1c11
-```
diff --git a/website/source/docs/providers/rancher/r/registry.html.markdown b/website/source/docs/providers/rancher/r/registry.html.markdown
deleted file mode 100644
index b778d0b1e..000000000
--- a/website/source/docs/providers/rancher/r/registry.html.markdown
+++ /dev/null
@@ -1,52 +0,0 @@
----
-layout: "rancher"
-page_title: "Rancher: rancher_registry"
-sidebar_current: "docs-rancher-resource-registry"
-description: |-
- Provides a Rancher Registy resource. This can be used to create registries for rancher environments and retrieve their information.
----
-
-# rancher\_registry
-
-Provides a Rancher Registy resource. This can be used to create registries for rancher environments and retrieve their information
-
-## Example Usage
-
-```hcl
-# Create a new Rancher registry
-resource "rancher_registry" "dockerhub" {
- name = "dockerhub"
- description = "DockerHub Registry"
- environment_id = "${rancher_environment.default.id}"
- server_address = "index.dockerhub.io"
-}
-```
-
-## Argument Reference
-
-The following arguments are supported:
-
-* `name` - (Required) The name of the registry.
-* `description` - (Optional) A registry description.
-* `environment_id` - (Required) The ID of the environment to create the registry for.
-* `server_address` - (Required) The server address for the registry.
-
-## Attributes Reference
-
-No further attributes are exported.
-
-## Import
-
-Registries can be imported using the Environment and Registry IDs in the form
-`/`
-
-```
-$ terraform import rancher_registry.private_registry 1a5/1sp31
-```
-
-If the credentials for the Rancher provider have access to the global API, then
-then `environment_id` can be omitted e.g.
-
-```
-$ terraform import rancher_registry.private_registry 1sp31
-```
diff --git a/website/source/docs/providers/rancher/r/registry_credential.html.markdown b/website/source/docs/providers/rancher/r/registry_credential.html.markdown
deleted file mode 100644
index 0e2430ed6..000000000
--- a/website/source/docs/providers/rancher/r/registry_credential.html.markdown
+++ /dev/null
@@ -1,56 +0,0 @@
----
-layout: "rancher"
-page_title: "Rancher: rancher_registry_credential"
-sidebar_current: "docs-rancher-resource-registry-credential"
-description: |-
- Provides a Rancher Registy Credential resource. This can be used to create registry credentials for rancher environments and retrieve their information.
----
-
-# rancher\_registry\_credential
-
-Provides a Rancher Registy Credential resource. This can be used to create registry credentials for rancher environments and retrieve their information.
-
-## Example Usage
-
-```hcl
-# Create a new Rancher registry
-resource "rancher_registry_credential" "dockerhub" {
- name = "dockerhub"
- description = "DockerHub Registry Credential"
- registry_id = "${rancher_registry.dockerhub.id}"
- email = "myself@company.com"
- public_value = "myself"
- secret_value = "mypass"
-}
-```
-
-## Argument Reference
-
-The following arguments are supported:
-
-* `name` - (Required) The name of the registry credential.
-* `description` - (Optional) A registry credential description.
-* `registry_id` - (Required) The ID of the registry to create the credential for.
-* `email` - (Required) The email of the account.
-* `public_value` - (Required) The public value (user name) of the account.
-* `secret_value` - (Required) The secret value (password) of the account.
-
-## Attributes Reference
-
-No further attributes are exported.
-
-## Import
-
-Registry credentials can be imported using the Registry and credentials
-IDs in the format `/`
-
-```
-$ terraform import rancher_registry_credential.private_registry 1sp31/1c605
-```
-
-If the credentials for the Rancher provider have access to the global API, then
-then `registry_id` can be omitted e.g.
-
-```
-$ terraform import rancher_registry_credential.private_registry 1c605
-```
diff --git a/website/source/docs/providers/rancher/r/stack.html.markdown b/website/source/docs/providers/rancher/r/stack.html.markdown
deleted file mode 100644
index 4f18b26bc..000000000
--- a/website/source/docs/providers/rancher/r/stack.html.markdown
+++ /dev/null
@@ -1,73 +0,0 @@
----
-layout: "rancher"
-page_title: "Rancher: rancher_stack"
-sidebar_current: "docs-rancher-resource-stack"
-description: |-
- Provides a Rancher Stack resource. This can be used to create and manage stacks on rancher.
----
-
-# rancher\_stack
-
-Provides a Rancher Stack resource. This can be used to create and manage stacks on rancher.
-
-## Example Usage
-
-```hcl
-# Create a new empty Rancher stack
-resource "rancher_stack" "external-dns" {
- name = "route53"
- description = "Route53 stack"
- environment_id = "${rancher_environment.default.id}"
- catalog_id = "library:route53:7"
- scope = "system"
-
- environment {
- AWS_ACCESS_KEY = "MYKEY"
- AWS_SECRET_KEY = "MYSECRET"
- AWS_REGION = "eu-central-1"
- TTL = "60"
- ROOT_DOMAIN = "example.com"
- ROUTE53_ZONE_ID = ""
- HEALTH_CHECK_INTERVAL = "15"
- }
-}
-```
-
-## Argument Reference
-
-The following arguments are supported:
-
-* `name` - (Required) The name of the stack.
-* `description` - (Optional) A stack description.
-* `environment_id` - (Required) The ID of the environment to create the stack for.
-* `docker_compose` - (Optional) The `docker-compose.yml` content to apply for the stack.
-* `rancher_compose` - (Optional) The `rancher-compose.yml` content to apply for the stack.
-* `environment` - (Optional) The environment to apply to interpret the docker-compose and rancher-compose files.
-* `catalog_id` - (Optional) The catalog ID to link this stack to. When provided, `docker_compose` and `rancher_compose` will be retrieved from the catalog unless they are overridden.
-* `scope` - (Optional) The scope to attach the stack to. Must be one of **user** or **system**. Defaults to **user**.
-* `start_on_create` - (Optional) Whether to start the stack automatically.
-* `finish_upgrade` - (Optional) Whether to automatically finish upgrades to this stack.
-
-## Attributes Reference
-
-The following attributes are exported:
-
-* `rendered_docker_compose` - The interpolated `docker_compose` applied to the stack.
-* `rendered_rancher_compose` - The interpolated `rancher_compose` applied to the stack.
-
-
-## Import
-
-Stacks can be imported using the Environment and Stack ID in the form
-`/`
-
-```
-$ terraform import rancher_stack.foo 1a5/1e149
-```
-
-If the credentials for the Rancher provider have access to the global API, then
-then `environment_id` can be omitted e.g.
-
-```
-$ terraform import rancher_stack.foo 1e149
-```
diff --git a/website/source/docs/providers/random/index.html.markdown b/website/source/docs/providers/random/index.html.markdown
deleted file mode 100644
index 234b00106..000000000
--- a/website/source/docs/providers/random/index.html.markdown
+++ /dev/null
@@ -1,73 +0,0 @@
----
-layout: "random"
-page_title: "Provider: Random"
-sidebar_current: "docs-random-index"
-description: |-
- The Random provider is used to generate randomness.
----
-
-# Random Provider
-
-The "random" provider allows the use of randomness within Terraform
-configurations. This is a *logical provider*, which means that it works
-entirely within Terraform's logic, and doesn't interact with any other
-services.
-
-Unconstrained randomness within a Terraform configuration would not be very
-useful, since Terraform's goal is to converge on a fixed configuration by
-applying a diff. Because of this, the "random" provider provides an idea of
-*managed randomness*: it provides resources that generate random values during
-their creation and then hold those values steady until the inputs are changed.
-
-Even with these resources, it is advisable to keep the use of randomness within
-Terraform configuration to a minimum, and retain it for special cases only;
-Terraform works best when the configuration is well-defined, since its behavior
-can then be more readily predicted.
-
-Unless otherwise stated within the documentation of a specific resource, this
-provider's results are **not** sufficiently random for cryptographic use.
-
-For more information on the specific resources available, see the links in the
-navigation bar. Read on for information on the general patterns that apply
-to this provider's resources.
-
-## Resource "Keepers"
-
-As noted above, the random resources generate randomness only when they are
-created; the results produced are stored in the Terraform state and re-used
-until the inputs change, prompting the resource to be recreated.
-
-The resources all provide a map argument called `keepers` that can be populated
-with arbitrary key/value pairs that should be selected such that they remain
-the same until new random values are desired.
-
-For example:
-
-```hcl
-resource "random_id" "server" {
- keepers = {
- # Generate a new id each time we switch to a new AMI id
- ami_id = "${var.ami_id}"
- }
-
- byte_length = 8
-}
-
-resource "aws_instance" "server" {
- tags = {
- Name = "web-server ${random_id.server.hex}"
- }
-
- # Read the AMI id "through" the random_id resource to ensure that
- # both will change together.
- ami = "${random_id.server.keepers.ami_id}"
-
- # ... (other aws_instance arguments) ...
-}
-```
-
-Resource "keepers" are optional. The other arguments to each resource must
-*also* remain constant in order to retain a random result.
-
-To force a random result to be replaced, the `taint` command can be used to
-produce a new result on the next run.
diff --git a/website/source/docs/providers/random/r/id.html.md b/website/source/docs/providers/random/r/id.html.md
deleted file mode 100644
index b06e04013..000000000
--- a/website/source/docs/providers/random/r/id.html.md
+++ /dev/null
@@ -1,73 +0,0 @@
----
-layout: "random"
-page_title: "Random: random_id"
-sidebar_current: "docs-random-resource-id"
-description: |-
- Generates a random identifier.
----
-
-# random\_id
-
-The resource `random_id` generates random numbers that are intended to be
-used as unique identifiers for other resources.
-
-Unlike other resources in the "random" provider, this resource *does* use a
-cryptographic random number generator in order to minimize the chance of
-collisions, making the results of this resource when a 32-byte identifier
-is requested of equivalent uniqueness to a type-4 UUID.
-
-This resource can be used in conjunction with resources that have,
-the `create_before_destroy` lifecycle flag set, to avoid conflicts with
-unique names during the brief period where both the old and new resources
-exist concurrently.
-
-## Example Usage
-
-The following example shows how to generate a unique name for an AWS EC2
-instance that changes each time a new AMI id is selected.
-
-```hcl
-resource "random_id" "server" {
- keepers = {
- # Generate a new id each time we switch to a new AMI id
- ami_id = "${var.ami_id}"
- }
-
- byte_length = 8
-}
-
-resource "aws_instance" "server" {
- tags = {
- Name = "web-server ${random_id.server.hex}"
- }
-
- # Read the AMI id "through" the random_id resource to ensure that
- # both will change together.
- ami = "${random_id.server.keepers.ami_id}"
-
- # ... (other aws_instance arguments) ...
-}
-```
-
-## Argument Reference
-
-The following arguments are supported:
-
-* `byte_length` - (Required) The number of random bytes to produce. The
- minimum value is 1, which produces eight bits of randomness.
-
-* `keepers` - (Optional) Arbitrary map of values that, when changed, will
- trigger a new id to be generated. See
- [the main provider documentation](../index.html) for more information.
-
-* `prefix` - (Optional) Arbitrary string to prefix the output value with. This
- string is supplied as-is, meaning it is not guaranteed to be URL-safe or
- base64 encoded.
-
-## Attributes Reference
-
-The following attributes are exported:
-
-* `b64` - The generated id presented in base64, using the URL-friendly character set: case-sensitive letters, digits and the characters `_` and `-`.
-* `hex` - The generated id presented in padded hexadecimal digits. This result will always be twice as long as the requested byte length.
-* `dec` - The generated id presented in non-padded decimal digits.
diff --git a/website/source/docs/providers/random/r/pet.html.md b/website/source/docs/providers/random/r/pet.html.md
deleted file mode 100644
index 9e464aca5..000000000
--- a/website/source/docs/providers/random/r/pet.html.md
+++ /dev/null
@@ -1,66 +0,0 @@
----
-layout: "random"
-page_title: "Random: random_pet"
-sidebar_current: "docs-random-resource-pet"
-description: |-
- Generates a random pet.
----
-
-# random\_pet
-
-The resource `random_pet` generates random pet names that are intended to be
-used as unique identifiers for other resources.
-
-This resource can be used in conjunction with resources that have
-the `create_before_destroy` lifecycle flag set, to avoid conflicts with
-unique names during the brief period where both the old and new resources
-exist concurrently.
-
-## Example Usage
-
-The following example shows how to generate a unique pet name for an AWS EC2
-instance that changes each time a new AMI id is selected.
-
-```hcl
-resource "random_pet" "server" {
- keepers = {
- # Generate a new pet name each time we switch to a new AMI id
- ami_id = "${var.ami_id}"
- }
-}
-
-resource "aws_instance" "server" {
- tags = {
- Name = "web-server-${random_pet.server.id}"
- }
-
- # Read the AMI id "through" the random_pet resource to ensure that
- # both will change together.
- ami = "${random_pet.server.keepers.ami_id}"
-
- # ... (other aws_instance arguments) ...
-}
-```
-
-The result of the above will set the Name of the AWS Instance to
-`web-server-simple-snake`.
-
-## Argument Reference
-
-The following arguments are supported:
-
-* `keepers` - (Optional) Arbitrary map of values that, when changed, will
- trigger a new id to be generated. See
- [the main provider documentation](../index.html) for more information.
-
-* `length` - (Optional) The length (in words) of the pet name.
-
-* `prefix` - (Optional) A string to prefix the name with.
-
-* `separator` - (Optional) The character to separate words in the pet name.
-
-## Attribute Reference
-
-The following attributes are supported:
-
-* `id` - (string) The random pet name
diff --git a/website/source/docs/providers/random/r/shuffle.html.md b/website/source/docs/providers/random/r/shuffle.html.md
deleted file mode 100644
index 9204df791..000000000
--- a/website/source/docs/providers/random/r/shuffle.html.md
+++ /dev/null
@@ -1,59 +0,0 @@
----
-layout: "random"
-page_title: "Random: random_shuffle"
-sidebar_current: "docs-random-resource-shuffle"
-description: |-
- Produces a random permutation of a given list.
----
-
-# random\_shuffle
-
-The resource `random_shuffle` generates a random permutation of a list
-of strings given as an argument.
-
-## Example Usage
-
-```hcl
-resource "random_shuffle" "az" {
- input = ["us-west-1a", "us-west-1c", "us-west-1d", "us-west-1e"]
- result_count = 2
-}
-
-resource "aws_elb" "example" {
- # Place the ELB in any two of the given availability zones, selected
- # at random.
- availability_zones = ["${random_shuffle.az.result}"]
-
- # ... and other aws_elb arguments ...
-}
-```
-
-## Argument Reference
-
-The following arguments are supported:
-
-* `input` - (Required) The list of strings to shuffle.
-
-* `result_count` - (Optional) The number of results to return. Defaults to
- the number of items in the `input` list. If fewer items are requested,
- some elements will be excluded from the result. If more items are requested,
- items will be repeated in the result but not more frequently than the number
- of items in the input list.
-
-* `keepers` - (Optional) Arbitrary map of values that, when changed, will
- trigger a new id to be generated. See
- [the main provider documentation](../index.html) for more information.
-
-* `seed` - (Optional) Arbitrary string with which to seed the random number
- generator, in order to produce less-volatile permutations of the list.
- **Important:** Even with an identical seed, it is not guaranteed that the
- same permutation will be produced across different versions of Terraform.
- This argument causes the result to be *less volatile*, but not fixed for
- all time.
-
-## Attributes Reference
-
-The following attributes are exported:
-
-* `result` - Random permutation of the list of strings given in `input`.
-
diff --git a/website/source/docs/providers/rundeck/index.html.markdown b/website/source/docs/providers/rundeck/index.html.markdown
deleted file mode 100644
index 062045de5..000000000
--- a/website/source/docs/providers/rundeck/index.html.markdown
+++ /dev/null
@@ -1,77 +0,0 @@
----
-layout: "rundeck"
-page_title: "Provider: Rundeck"
-sidebar_current: "docs-rundeck-index"
-description: |-
- The Rundeck provider configures projects, jobs and keys in Rundeck.
----
-
-# Rundeck Provider
-
-The Rundeck provider allows Terraform to create and configure Projects,
-Jobs and Keys in [Rundeck](http://rundeck.org/). Rundeck is a tool
-for runbook automation and execution of arbitrary management tasks,
-allowing operators to avoid logging in to individual machines directly
-via SSH.
-
-The provider configuration block accepts the following arguments:
-
-* ``url`` - (Required) The root URL of a Rundeck server. May alternatively be set via the
- ``RUNDECK_URL`` environment variable.
-
-* ``auth_token`` - (Required) The API auth token to use when making requests. May alternatively
- be set via the ``RUNDECK_AUTH_TOKEN`` environment variable.
-
-* ``allow_unverified_ssl`` - (Optional) Boolean that can be set to ``true`` to disable SSL
- certificate verification. This should be used with care as it could allow an attacker to
- intercept your auth token.
-
-Use the navigation to the left to read about the available resources.
-
-## Example Usage
-
-```hcl
-provider "rundeck" {
- url = "http://rundeck.example.com/"
- auth_token = "abcd1234"
-}
-
-resource "rundeck_project" "anvils" {
- name = "anvils"
- description = "Application for managing Anvils"
-
- ssh_key_storage_path = "${rundeck_private_key.anvils.path}"
-
- resource_model_source {
- type = "file"
-
- config = {
- format = "resourcexml"
-
- # This path is interpreted on the Rundeck server.
- file = "/var/rundeck/projects/anvils/resources.xml"
- }
- }
-}
-
-resource "rundeck_job" "bounceweb" {
- name = "Bounce Web Servers"
- project_name = "${rundeck_project.anvils.name}"
- node_filter_query = "tags: web"
- description = "Restart the service daemons on all the web servers"
-
- command {
- shell_command = "sudo service anvils restart"
- }
-}
-
-resource "rundeck_public_key" "anvils" {
- path = "anvils/id_rsa.pub"
- key_material = "ssh-rsa yada-yada-yada"
-}
-
-resource "rundeck_private_key" "anvils" {
- path = "anvils/id_rsa"
- key_material = "${file(\"id_rsa.pub\")}"
-}
-```
diff --git a/website/source/docs/providers/rundeck/r/job.html.md b/website/source/docs/providers/rundeck/r/job.html.md
deleted file mode 100644
index 29b8b7087..000000000
--- a/website/source/docs/providers/rundeck/r/job.html.md
+++ /dev/null
@@ -1,168 +0,0 @@
----
-layout: "rundeck"
-page_title: "Rundeck: rundeck_job"
-sidebar_current: "docs-rundeck-resource-job"
-description: |-
- The rundeck_job resource allows Rundeck jobs to be managed by Terraform.
----
-
-# rundeck\_job
-
-The job resource allows Rundeck jobs to be managed by Terraform. In Rundeck a job is a particular
-named set of steps that can be executed against one or more of the nodes configured for its
-associated project.
-
-Each job belongs to a project. A project can be created with the `rundeck_project` resource.
-
-## Example Usage
-
-```hcl
-resource "rundeck_job" "bounceweb" {
- name = "Bounce Web Servers"
- project_name = "anvils"
- node_filter_query = "tags: web"
- description = "Restart the service daemons on all the web servers"
-
- command {
- shell_command = "sudo service anvils restart"
- }
-}
-```
-
-## Argument Reference
-
-The following arguments are supported:
-
-* `name` - (Required) The name of the job, used to describe the job in the Rundeck UI.
-
-* `description` - (Required) A longer description of the job, describing the job in the Rundeck UI.
-
-* `project_name` - (Required) The name of the project that this job should belong to.
-
-* `group_name` - (Optional) The name of a group within the project in which to place the job.
- Setting this creates collapsable subcategories within the Rundeck UI's project job index.
-
-* `log_level` - (Optional) The log level that Rundeck should use for this job. Defaults to "INFO".
-
-* `allow_concurrent_executions` - (Optional) Boolean defining whether two or more executions of
- this job can run concurrently. The default is `false`, meaning that jobs will only run
- sequentially.
-
-* `max_thread_count` - (Optional) The maximum number of threads to use to execute this job, which
- controls on how many nodes the commands can be run simulateneously. Defaults to 1, meaning that
- the nodes will be visited sequentially.
-
-* `continue_on_error` - (Optional) Boolean defining whether Rundeck will continue to run
- subsequent steps if any intermediate step fails. Defaults to `false`, meaning that execution
- will stop and the execution will be considered to have failed.
-
-* `rank_attribute` - (Optional) The name of the attribute that will be used to decide in which
- order the nodes will be visited while executing the job across multiple nodes.
-
-* `rank_order` - (Optional) Keyword deciding which direction the nodes are sorted in terms of
- the chosen `rank_attribute`. May be either "ascending" (the default) or "descending".
-
-* `preserve_options_order`: (Optional) Boolean controlling whether the configured options will
- be presented in their configuration order when shown in the Rundeck UI. The default is `false`,
- which means that the options will be displayed in alphabetical order by name.
-
-* `command_ordering_strategy`: (Optional) The name of the strategy used to describe how to
- traverse the matrix of nodes and commands. The default is "node-first", meaning that all commands
- will be executed on a single node before moving on to the next. May also be set to "step-first",
- meaning that a single step will be executed across all nodes before moving on to the next step.
-
-* `node_filter_query` - (Optional) A query string using
- [Rundeck's node filter language](http://rundeck.org/docs/manual/node-filters.html#node-filter-syntax)
- that defines which subset of the project's nodes will be used to execute this job.
-
-* `node_filter_exclude_precedence`: (Optional) Boolean controlling a deprecated Rundeck feature that controls
- whether node exclusions take priority over inclusions.
-
-* `option`: (Optional) Nested block defining an option a user may set when executing this job. A
- job may have any number of options. The structure of this nested block is described below.
-
-* `command`: (Required) Nested block defining one step in the job workflow. A job must have one or
- more commands. The structure of this nested block is described below.
-
-`option` blocks have the following structure:
-
-* `name`: (Required) Unique name that will be shown in the UI when entering values and used as
- a variable name for template substitutions.
-
-* `default_value`: (Optional) A default value for the option.
-
-* `value_choices`: (Optional) A list of strings giving a set of predefined values that the user
- may choose from when entering a value for the option.
-
-* `value_choices_url`: (Optional) Can be used instead of `value_choices` to cause Rundeck to
- obtain a list of choices dynamically by fetching this URL.
-
-* `require_predefined_choice`: (Optional) Boolean controlling whether the user is allowed to
- enter values not included in the predefined set of choices (`false`, the default) or whether
- a predefined choice is required (`true`).
-
-* `validation_regex`: (Optional) A regular expression that a provided value must match in order
- to be accepted.
-
-* `description`: (Optional) A longer description of the option to be shown in the UI.
-
-* `required`: (Optional) Boolean defining whether the user must provide a value for the option.
- Defaults to `false`.
-
-* `allow_multiple_values`: (Optional) Boolean defining whether the user may select multiple values
- from the set of predefined values. Defaults to `false`, meaning that the user may choose only
- one value.
-
-* `multi_value_delimiter`: (Optional) Delimiter used to join together multiple values into a single
- string when `allow_multiple_values` is set and the user chooses multiple values.
-
-* `obscure_input`: (Optional) Boolean controlling whether the value of this option should be obscured
- during entry and in execution logs. Defaults to `false`, but should be set to `true` when the
- requested value is a password, private key or any other secret value.
-
-* `exposed_to_scripts`: (Optional) Boolean controlling whether the value of this option is available
- to scripts executed by job commands. Defaults to `false`.
-
-`command` blocks must have any one of the following combinations of arguments as contents:
-
-* `description`: (Optional) gives a description to the command block.
-
-* `shell_command` gives a single shell command to execute on the nodes.
-
-* `inline_script` gives a whole shell script, inline in the configuration, to execute on the nodes.
-
-* `script_file` and `script_file_args` together describe a script that is already pre-installed
- on the nodes which is to be executed.
-
-* A `job` block, described below, causes another job within the same project to be executed as
- a command.
-
-* A `step_plugin` block, described below, causes a step plugin to be executed as a command.
-
-* A `node_step_plugin` block, described below, causes a node step plugin to be executed once for
- each node.
-
-A command's `job` block has the following structure:
-
-* `name`: (Required) The name of the job to execute. The target job must be in the same project
- as the current job.
-
-* `group_name`: (Optional) The name of the group that the target job belongs to, if any.
-
-* `run_for_each_node`: (Optional) Boolean controlling whether the job is run only once (`false`,
- the default) or whether it is run once for each node (`true`).
-
-* `args`: (Optional) A string giving the arguments to pass to the target job, using
- [Rundeck's job arguments syntax](http://rundeck.org/docs/manual/jobs.html#job-reference-step).
-
-A command's `step_plugin` or `node_step_plugin` block both have the following structure:
-
-* `type`: (Required) The name of the plugin to execute.
-
-* `config`: (Optional) Map of arbitrary configuration parameters for the selected plugin.
-
-## Attributes Reference
-
-The following attribute is exported:
-
-* `id` - A unique identifier for the job.
diff --git a/website/source/docs/providers/rundeck/r/private_key.html.md b/website/source/docs/providers/rundeck/r/private_key.html.md
deleted file mode 100644
index a95829ef8..000000000
--- a/website/source/docs/providers/rundeck/r/private_key.html.md
+++ /dev/null
@@ -1,39 +0,0 @@
----
-layout: "rundeck"
-page_title: "Rundeck: rundeck_private_key"
-sidebar_current: "docs-rundeck-resource-private-key"
-description: |-
- The rundeck_private_key resource allows private keys to be stored in Rundeck's key store.
----
-
-# rundeck\_private\_key
-
-The private key resource allows SSH private keys to be stored into Rundeck's key store.
-The key store is where Rundeck keeps credentials that are needed to access the nodes on which
-it runs commands.
-
-## Example Usage
-
-```hcl
-resource "rundeck_private_key" "anvils" {
- path = "anvils/id_rsa"
- key_material = "${file("/id_rsa")}"
-}
-```
-
-## Argument Reference
-
-The following arguments are supported:
-
-* `path` - (Required) The path within the key store where the key will be stored.
-
-* `key_material` - (Required) The private key material to store, serialized in any way that is
- accepted by OpenSSH.
-
-The key material is hashed before it is stored in the state file, so sharing the resulting state
-will not disclose the private key contents.
-
-## Attributes Reference
-
-Rundeck does not allow stored private keys to be retrieved via the API, so this resource does not
-export any attributes.
diff --git a/website/source/docs/providers/rundeck/r/project.html.md b/website/source/docs/providers/rundeck/r/project.html.md
deleted file mode 100644
index 31a3827a3..000000000
--- a/website/source/docs/providers/rundeck/r/project.html.md
+++ /dev/null
@@ -1,90 +0,0 @@
----
-layout: "rundeck"
-page_title: "Rundeck: rundeck_project"
-sidebar_current: "docs-rundeck-resource-project"
-description: |-
- The rundeck_project resource allows Rundeck projects to be managed by Terraform.
----
-
-# rundeck\_project
-
-The project resource allows Rundeck projects to be managed by Terraform. In Rundeck a project
-is the container object for a set of jobs and the configuration for which servers those jobs
-can be run on.
-
-## Example Usage
-
-```hcl
-resource "rundeck_project" "anvils" {
- name = "anvils"
- description = "Application for managing Anvils"
-
- ssh_key_storage_path = "anvils/id_rsa"
-
- resource_model_source {
- type = "file"
- config = {
- format = "resourcexml"
- # This path is interpreted on the Rundeck server.
- file = "/var/rundeck/projects/anvils/resources.xml"
- }
- }
-}
-```
-
-Note that the above configuration assumes the existence of a ``resources.xml`` file in the
-filesystem on the Rundeck server. The Rundeck provider does not itself support creating such a file,
-but one way to place it would be to use the ``file`` provisioner to copy a configuration file
-from the module directory.
-
-## Argument Reference
-
-The following arguments are supported:
-
-* `name` - (Required) The name of the project, used both in the UI and to uniquely identify
- the project. Must therefore be unique across a single Rundeck installation.
-
-* `resource_model_source` - (Required) Nested block instructing Rundeck on how to determine the
- set of resources (nodes) for this project. The nested block structure is described below.
-
-* `description` - (Optional) A description of the project, to be displayed in the Rundeck UI.
- Defaults to "Managed by Terraform".
-
-* `default_node_file_copier_plugin` - (Optional) The name of a plugin to use to copy files onto
- nodes within this project. Defaults to `jsch-scp`, which uses the "Secure Copy" protocol
- to send files over SSH.
-
-* `default_node_executor_plugin` - (Optional) The name of a plugin to use to run commands on
- nodes within this project. Defaults to `jsch-ssh`, which uses the SSH protocol to access the
- nodes.
-
-* `ssh_authentication_type` - (Optional) When the SSH-based file copier and executor plugins are
- used, the type of SSH authentication to use. Defaults to `privateKey`.
-
-* `ssh_key_storage_path` - (Optional) When the SSH-based file copier and executor plugins are
- used, the location within Rundeck's key store where the SSH private key can be found. Private
- keys can be uploaded to rundeck using the `rundeck_private_key` resource.
-
-* `ssh_key_file_path` - (Optional) Like `ssh_key_storage_path` except that the key is read from
- the Rundeck server's local filesystem, rather than from the key store.
-
-* `extra_config` - (Optional) Behind the scenes a Rundeck project is really an arbitrary set of
- key/value pairs. This map argument allows setting any configuration properties that aren't
- explicitly supported by the other arguments described above, but due to limitations of Terraform
- the key names must be written with slashes in place of dots. Do not use this argument to set
- properties that the above arguments set, or undefined behavior will result.
-
-`resource_model_source` blocks have the following nested arguments:
-
-* `type` - (Required) The name of the resource model plugin to use.
-
-* `config` - (Required) Map of arbitrary configuration properties for the selected resource model
- plugin.
-
-## Attributes Reference
-
-The following attributes are exported:
-
-* `name` - The unique name that identifies the project, as set in the arguments.
-* `ui_url` - The URL of the index page for this project in the Rundeck UI.
-
diff --git a/website/source/docs/providers/rundeck/r/public_key.html.md b/website/source/docs/providers/rundeck/r/public_key.html.md
deleted file mode 100644
index a3e617829..000000000
--- a/website/source/docs/providers/rundeck/r/public_key.html.md
+++ /dev/null
@@ -1,51 +0,0 @@
----
-layout: "rundeck"
-page_title: "Rundeck: rundeck_public_key"
-sidebar_current: "docs-rundeck-resource-public-key"
-description: |-
- The rundeck_public_key resource allows public keys to be stored in Rundeck's key store.
----
-
-# rundeck\_public\_key
-
-The public key resource allows SSH public keys to be stored into Rundeck's key store.
-The key store is where Rundeck keeps credentials that are needed to access the nodes on which
-it runs commands.
-
-This resource also allows the retrieval of an existing public key from the store, so that it
-may be used in the configuration of other resources such as ``aws_key_pair``.
-
-## Example Usage
-
-```hcl
-resource "rundeck_public_key" "anvils" {
- path = "anvils/id_rsa.pub"
- key_material = "ssh-rsa yada-yada-yada"
-}
-```
-
-## Argument Reference
-
-The following arguments are supported:
-
-* `path` - (Required) The path within the key store where the key will be stored. By convention
- this path name normally ends with ".pub" and otherwise has the same name as the associated
- private key.
-
-* `key_material` - (Optional) The public key string to store, serialized in any way that is accepted
- by OpenSSH. If this is not included, ``key_material`` becomes an attribute that can be used
- to read the already-existing key material in the Rundeck store.
-
-The key material is included inline as a string, which is consistent with the way a public key
-is provided to the `aws_key_pair`, `cloudstack_ssh_keypair`, `digitalocean_ssh_key` and
-`openstack_compute_keypair_v2` resources. This means the `key_material` argument can be populated
-from the interpolation of the `public_key` attribute of such a keypair resource, or vice-versa.
-
-## Attributes Reference
-
-The following attributes are exported:
-
-* `url` - The URL at which the key material can be retrieved from the key store by other clients.
-
-* `key_material` - If `key_material` is omitted in the configuration, it becomes an attribute that
- exposes the key material already stored at the given `path`.
diff --git a/website/source/docs/providers/scaleway/d/bootscript.html.markdown b/website/source/docs/providers/scaleway/d/bootscript.html.markdown
deleted file mode 100644
index 69c75ac50..000000000
--- a/website/source/docs/providers/scaleway/d/bootscript.html.markdown
+++ /dev/null
@@ -1,49 +0,0 @@
----
-layout: "scaleway"
-page_title: "Scaleway: scaleway_bootscript"
-sidebar_current: "docs-scaleway-datasource-bootscript"
-description: |-
- Get information on a Scaleway bootscript.
----
-
-# scaleway\_bootscript
-
-Use this data source to get the ID of a registered Bootscript for use with the
-`scaleway_server` resource.
-
-## Example Usage
-
-```hcl
-data "scaleway_bootscript" "debug" {
- architecture = "arm"
- name_filter = "Rescue"
-}
-```
-
-## Argument Reference
-
-* `architecture` - (Optional) any supported Scaleway architecture, e.g. `x86_64`, `arm`
-
-* `name_filter` - (Optional) Regexp to match Bootscript name by
-
-* `name` - (Optional) Exact name of desired Bootscript
-
-## Attributes Reference
-
-`id` is set to the ID of the found Bootscript. In addition, the following attributes
-are exported:
-
-* `architecture` - architecture of the Bootscript, e.g. `arm` or `x86_64`
-
-* `organization` - uuid of the organization owning this Bootscript
-
-* `public` - is this a public bootscript
-
-* `boot_cmd_args` - command line arguments used for booting
-
-* `dtb` - path to Device Tree Blob detailing hardware information
-
-* `initrd` - URL to initial ramdisk content
-
-* `kernel` - URL to used kernel
-
diff --git a/website/source/docs/providers/scaleway/d/image.html.markdown b/website/source/docs/providers/scaleway/d/image.html.markdown
deleted file mode 100644
index e485d2867..000000000
--- a/website/source/docs/providers/scaleway/d/image.html.markdown
+++ /dev/null
@@ -1,49 +0,0 @@
----
-layout: "scaleway"
-page_title: "Scaleway: scaleway_image"
-sidebar_current: "docs-scaleway-datasource-image"
-description: |-
- Get information on a Scaleway image.
----
-
-# scaleway\_image
-
-Use this data source to get the ID of a registered Image for use with the
-`scaleway_server` resource.
-
-## Example Usage
-
-```hcl
-data "scaleway_image" "ubuntu" {
- architecture = "arm"
- name = "Ubuntu Precise"
-}
-
-resource "scaleway_server" "base" {
- name = "test"
- image = "${data.scaleway_image.ubuntu.id}"
- type = "C1"
-}
-```
-
-## Argument Reference
-
-* `architecture` - (Required) any supported Scaleway architecture, e.g. `x86_64`, `arm`
-
-* `name_filter` - (Optional) Regexp to match Image name by
-
-* `name` - (Optional) Exact name of desired Image
-
-## Attributes Reference
-
-`id` is set to the ID of the found Image. In addition, the following attributes
-are exported:
-
-* `architecture` - architecture of the Image, e.g. `arm` or `x86_64`
-
-* `organization` - uuid of the organization owning this Image
-
-* `public` - is this a public bootscript
-
-* `creation_date` - date when image was created
-
diff --git a/website/source/docs/providers/scaleway/index.html.markdown b/website/source/docs/providers/scaleway/index.html.markdown
deleted file mode 100644
index 88ae76089..000000000
--- a/website/source/docs/providers/scaleway/index.html.markdown
+++ /dev/null
@@ -1,101 +0,0 @@
----
-layout: "scaleway"
-page_title: "Provider: Scaleway"
-sidebar_current: "docs-scaleway-index"
-description: |-
- The Scaleway provider is used to interact with Scaleway bare metal & VPS provider.
----
-
-# Scaleway Provider
-
-The Scaleway provider is used to manage Scaleway resources.
-
-Use the navigation to the left to read about the available resources.
-
-## Example Usage
-
-Here is an example that will setup the following:
-+ An ARM Server.
-+ An IP Address.
-+ A security group.
-
-(create this as sl.tf and run terraform commands from this directory):
-
-```hcl
-provider "scaleway" {
- organization = ""
- token = ""
- region = "par1"
-}
-
-resource "scaleway_ip" "ip" {
- server = "${scaleway_server.test.id}"
-}
-
-resource "scaleway_server" "test" {
- name = "test"
- image = "aecaed73-51a5-4439-a127-6d8229847145"
- type = "C2S"
-}
-
-resource "scaleway_volume" "test" {
- name = "test"
- size_in_gb = 20
- type = "l_ssd"
-}
-
-resource "scaleway_volume_attachment" "test" {
- server = "${scaleway_server.test.id}"
- volume = "${scaleway_volume.test.id}"
-}
-
-resource "scaleway_security_group" "http" {
- name = "http"
- description = "allow HTTP and HTTPS traffic"
-}
-
-resource "scaleway_security_group_rule" "http_accept" {
- security_group = "${scaleway_security_group.http.id}"
-
- action = "accept"
- direction = "inbound"
- ip_range = "0.0.0.0/0"
- protocol = "TCP"
- port = 80
-}
-
-resource "scaleway_security_group_rule" "https_accept" {
- security_group = "${scaleway_security_group.http.id}"
-
- action = "accept"
- direction = "inbound"
- ip_range = "0.0.0.0/0"
- protocol = "TCP"
- port = 443
-}
-```
-
-You'll need to provide your Scaleway organization **access key** and **token**.
-
-Your access key can be found on your Scaleway control panel, in the *Credentials*
-tab of the management panes. It is under the *Tokens* subsection, but is labelled
-seperately as **access key**.
-
-Your **token** can be generated by selecting to "Create new token" under the same
-subsection as above. This does not require further input, but giving each token a
-friendly-name is suggested.
-
-If you do not want to put credentials in your configuration file,
-you can leave them out:
-
-```
-provider "scaleway" {
- region = "par1"
-}
-```
-
-...and instead set these environment variables:
-
-- **SCALEWAY_ORGANIZATION**: Your Scaleway `organization` access key
-- **SCALEWAY_TOKEN**: Your API access `token`, generated by you
-- **SCALEWAY_REGION**: The Scaleway region
diff --git a/website/source/docs/providers/scaleway/r/ip.html.markdown b/website/source/docs/providers/scaleway/r/ip.html.markdown
deleted file mode 100644
index 33630e34a..000000000
--- a/website/source/docs/providers/scaleway/r/ip.html.markdown
+++ /dev/null
@@ -1,41 +0,0 @@
----
-layout: "scaleway"
-page_title: "Scaleway: ip"
-sidebar_current: "docs-scaleway-resource-ip"
-description: |-
- Manages Scaleway IPs.
----
-
-# scaleway\_ip
-
-Provides IPs for servers. This allows IPs to be created, updated and deleted.
-For additional details please refer to [API documentation](https://developer.scaleway.com/#ips).
-
-## Example Usage
-
-```hcl
-resource "scaleway_ip" "test_ip" {}
-```
-
-## Argument Reference
-
-The following arguments are supported:
-
-* `server` - (Optional) ID of server to associate IP with
-
-Field `server` is editable.
-
-## Attributes Reference
-
-The following attributes are exported:
-
-* `id` - id of the new resource
-* `ip` - IP of the new resource
-
-## Import
-
-Instances can be imported using the `id`, e.g.
-
-```
-$ terraform import scaleway_ip.jump_host 5faef9cd-ea9b-4a63-9171-9e26bec03dbc
-```
diff --git a/website/source/docs/providers/scaleway/r/security_group.html.markdown b/website/source/docs/providers/scaleway/r/security_group.html.markdown
deleted file mode 100644
index 91293e908..000000000
--- a/website/source/docs/providers/scaleway/r/security_group.html.markdown
+++ /dev/null
@@ -1,44 +0,0 @@
----
-layout: "scaleway"
-page_title: "Scaleway: security_group"
-sidebar_current: "docs-scaleway-resource-security_group"
-description: |-
- Manages Scaleway security groups.
----
-
-# scaleway\_security\_group
-
-Provides security groups. This allows security groups to be created, updated and deleted.
-For additional details please refer to [API documentation](https://developer.scaleway.com/#security-groups).
-
-## Example Usage
-
-```hcl
-resource "scaleway_security_group" "test" {
- name = "test"
- description = "test"
-}
-```
-
-## Argument Reference
-
-The following arguments are supported:
-
-* `name` - (Required) name of security group
-* `description` - (Required) description of security group
-
-Field `name`, `description` are editable.
-
-## Attributes Reference
-
-The following attributes are exported:
-
-* `id` - id of the new resource
-
-## Import
-
-Instances can be imported using the `id`, e.g.
-
-```
-$ terraform import scaleway_security_group.test 5faef9cd-ea9b-4a63-9171-9e26bec03dbc
-```
diff --git a/website/source/docs/providers/scaleway/r/security_group_rule.html.markdown b/website/source/docs/providers/scaleway/r/security_group_rule.html.markdown
deleted file mode 100644
index ecf84e589..000000000
--- a/website/source/docs/providers/scaleway/r/security_group_rule.html.markdown
+++ /dev/null
@@ -1,49 +0,0 @@
----
-layout: "scaleway"
-page_title: "Scaleway: security_group_rule"
-sidebar_current: "docs-scaleway-resource-security_group_rule"
-description: |-
- Manages Scaleway security group rules.
----
-
-# scaleway\_security\_group\_rule
-
-Provides security group rules. This allows security group rules to be created, updated and deleted.
-For additional details please refer to [API documentation](https://developer.scaleway.com/#security-groups-manage-rules).
-
-## Example Usage
-
-```hcl
-resource "scaleway_security_group" "test" {
- name = "test"
- description = "test"
-}
-
-resource "scaleway_security_group_rule" "smtp_drop_1" {
- security_group = "${scaleway_security_group.test.id}"
-
- action = "accept"
- direction = "inbound"
- ip_range = "0.0.0.0/0"
- protocol = "TCP"
- port = 25
-}
-```
-
-## Argument Reference
-
-The following arguments are supported:
-
-* `action` - (Required) action of rule (`accept`, `drop`)
-* `direction` - (Required) direction of rule (`inbound`, `outbound`)
-* `ip_range` - (Required) ip_range of rule
-* `protocol` - (Required) protocol of rule (`ICMP`, `TCP`, `UDP`)
-* `port` - (Optional) port of the rule
-
-Fields `action`, `direction`, `ip_range`, `protocol`, `port` are editable.
-
-## Attributes Reference
-
-The following attributes are exported:
-
-* `id` - id of the new resource
diff --git a/website/source/docs/providers/scaleway/r/server.html.markdown b/website/source/docs/providers/scaleway/r/server.html.markdown
deleted file mode 100644
index 1fb21de38..000000000
--- a/website/source/docs/providers/scaleway/r/server.html.markdown
+++ /dev/null
@@ -1,71 +0,0 @@
----
-layout: "scaleway"
-page_title: "Scaleway: server"
-sidebar_current: "docs-scaleway-resource-server"
-description: |-
- Manages Scaleway servers.
----
-
-# scaleway\_server
-
-Provides servers. This allows servers to be created, updated and deleted.
-For additional details please refer to [API documentation](https://developer.scaleway.com/#servers).
-
-## Example Usage
-
-```hcl
-resource "scaleway_server" "test" {
- name = "test"
- image = "5faef9cd-ea9b-4a63-9171-9e26bec03dbc"
- type = "VC1M"
-
- volume {
- size_in_gb = 20
- type = "l_ssd"
- }
-}
-```
-
-## Argument Reference
-
-The following arguments are supported:
-
-* `name` - (Required) name of server
-* `image` - (Required) base image of server
-* `type` - (Required) type of server
-* `bootscript` - (Optional) server bootscript
-* `tags` - (Optional) list of tags for server
-* `enable_ipv6` - (Optional) enable ipv6
-* `dynamic_ip_required` - (Optional) make server publicly available
-* `security_group` - (Optional) assign security group to server
-
-Field `name`, `type`, `tags`, `dynamic_ip_required`, `security_group` are editable.
-
-## Volume
-
-You can attach additional volumes to your instance, which will share the lifetime
-of your `scaleway_server` resource.
-
-**Warning:** Using the `volume` attribute does not modify the System Volume provided default with every `scaleway_server` instance.
-Instead it adds additional volumes to the server instance.
-
-The `volume` mapping supports the following:
-
-* `type` - (Required) The type of volume. Can be `"l_ssd"`
-* `size_in_gb` - (Required) The size of the volume in gigabytes.
-
-## Attributes Reference
-
-The following attributes are exported:
-
-* `id` - id of the new resource
-* `private_ip` - private ip of the new resource
-* `public_ip` - public ip of the new resource
-
-## Import
-
-Instances can be imported using the `id`, e.g.
-
-```
-$ terraform import scaleway_server.web 5faef9cd-ea9b-4a63-9171-9e26bec03dbc
-```
diff --git a/website/source/docs/providers/scaleway/r/volume.html.markdown b/website/source/docs/providers/scaleway/r/volume.html.markdown
deleted file mode 100644
index 4aa2be147..000000000
--- a/website/source/docs/providers/scaleway/r/volume.html.markdown
+++ /dev/null
@@ -1,51 +0,0 @@
----
-layout: "scaleway"
-page_title: "Scaleway: volume"
-sidebar_current: "docs-scaleway-resource-volume"
-description: |-
- Manages Scaleway Volumes.
----
-
-# scaleway\_volume
-
-Provides volumes. This allows volumes to be created, updated and deleted.
-For additional details please refer to [API documentation](https://developer.scaleway.com/#volumes).
-
-## Example Usage
-
-```hcl
-resource "scaleway_server" "test" {
- name = "test"
- image = "aecaed73-51a5-4439-a127-6d8229847145"
- type = "C2S"
- volumes = ["${scaleway_volume.test.id}"]
-}
-
-resource "scaleway_volume" "test" {
- name = "test"
- size_in_gb = 20
- type = "l_ssd"
-}
-```
-
-## Argument Reference
-
-The following arguments are supported:
-
-* `name` - (Required) name of volume
-* `size_in_gb` - (Required) size of the volume in GB
-* `type` - (Required) type of volume
-
-## Attributes Reference
-
-The following attributes are exported:
-
-* `id` - id of the new resource
-
-## Import
-
-Instances can be imported using the `id`, e.g.
-
-```
-$ terraform import scaleway_volume.test 5faef9cd-ea9b-4a63-9171-9e26bec03dbc
-```
diff --git a/website/source/docs/providers/scaleway/r/volume_attachment.html.markdown b/website/source/docs/providers/scaleway/r/volume_attachment.html.markdown
deleted file mode 100644
index 05e89fc81..000000000
--- a/website/source/docs/providers/scaleway/r/volume_attachment.html.markdown
+++ /dev/null
@@ -1,48 +0,0 @@
----
-layout: "scaleway"
-page_title: "Scaleway: volume attachment"
-sidebar_current: "docs-scaleway-resource-volume attachment"
-description: |-
- Manages Scaleway Volume attachments for servers.
----
-
-# scaleway\_volume\_attachment
-
-This allows volumes to be attached to servers.
-
-**Warning:** Attaching volumes requires the servers to be powered off. This will lead
-to downtime if the server is already in use.
-
-## Example Usage
-
-```hcl
-resource "scaleway_server" "test" {
- name = "test"
- image = "aecaed73-51a5-4439-a127-6d8229847145"
- type = "C2S"
-}
-
-resource "scaleway_volume" "test" {
- name = "test"
- size_in_gb = 20
- type = "l_ssd"
-}
-
-resource "scaleway_volume_attachment" "test" {
- server = "${scaleway_server.test.id}"
- volume = "${scaleway_volume.test.id}"
-}
-```
-
-## Argument Reference
-
-The following arguments are supported:
-
-* `server` - (Required) id of the server
-* `volume` - (Required) id of the volume to be attached
-
-## Attributes Reference
-
-The following attributes are exported:
-
-* `id` - id of the new resource
diff --git a/website/source/docs/providers/softlayer/index.html.markdown b/website/source/docs/providers/softlayer/index.html.markdown
deleted file mode 100644
index b7249b1ea..000000000
--- a/website/source/docs/providers/softlayer/index.html.markdown
+++ /dev/null
@@ -1,84 +0,0 @@
----
-layout: "softlayer"
-page_title: "Provider: SoftLayer"
-sidebar_current: "docs-softlayer-index"
-description: |-
- The Docker provider is used to interact with Docker containers and images.
----
-
-# SoftLayer Provider
-
-The SoftLayer provider is used to manage SoftLayer resources.
-
-Use the navigation to the left to read about the available resources.
-
--> **Note:** The SoftLayer provider is new as of Terraform 0.6.16.
-It is ready to be used but many features are still being added. If there
-is a SoftLayer feature missing, please report it in the GitHub repo.
-
-## Example Usage
-
-Here is an example that will setup the following:
-
-+ An SSH key resource.
-+ A virtual server resource that uses an existing SSH key.
-+ A virtual server resource using an existing SSH key and a Terraform managed SSH key (created as `test_key_1` in the example below).
-
-Add the below to a file called `sl.tf` and run the `terraform` command from the same directory:
-
-```hcl
-provider "softlayer" {
- username = ""
- api_key = ""
-}
-
-# This will create a new SSH key that will show up under the \
-# Devices>Manage>SSH Keys in the SoftLayer console.
-resource "softlayer_ssh_key" "test_key_1" {
- name = "test_key_1"
- public_key = "${file(\"~/.ssh/id_rsa_test_key_1.pub\")}"
-
- # Windows Example:
- # public_key = "${file(\"C:\ssh\keys\path\id_rsa_test_key_1.pub\")}"
-}
-
-# Virtual Server created with existing SSH Key already in SoftLayer \
-# inventory and not created using this Terraform template.
-resource "softlayer_virtual_guest" "my_server_1" {
- name = "my_server_1"
- domain = "example.com"
- ssh_keys = ["123456"]
- image = "DEBIAN_7_64"
- region = "ams01"
- public_network_speed = 10
- cpu = 1
- ram = 1024
-}
-
-# Virtual Server created with a mix of previously existing and \
-# Terraform created/managed resources.
-resource "softlayer_virtual_guest" "my_server_2" {
- name = "my_server_2"
- domain = "example.com"
- ssh_keys = ["123456", "${softlayer_ssh_key.test_key_1.id}"]
- image = "CENTOS_6_64"
- region = "ams01"
- public_network_speed = 10
- cpu = 1
- ram = 1024
-}
-```
-
-You'll need to provide your SoftLayer username and API key,
-so that Terraform can connect. If you don't want to put
-credentials in your configuration file, you can leave them
-out:
-
-```
-provider "softlayer" {}
-```
-
-...and instead set these environment variables:
-
-- **SOFTLAYER_USERNAME**: Your SoftLayer username
-- **SOFTLAYER_API_KEY**: Your API key
diff --git a/website/source/docs/providers/softlayer/r/ssh_key.html.markdown b/website/source/docs/providers/softlayer/r/ssh_key.html.markdown
deleted file mode 100644
index 571ad6c6c..000000000
--- a/website/source/docs/providers/softlayer/r/ssh_key.html.markdown
+++ /dev/null
@@ -1,39 +0,0 @@
----
-layout: "softlayer"
-page_title: "SoftLayer: ssh_key"
-sidebar_current: "docs-softlayer-resource-ssh-key"
-description: |-
- Manages SoftLayer SSH Keys.
----
-
-# softlayer\ssh_key
-
-Provides SSK keys. This allows SSH keys to be created, updated and deleted.
-For additional details please refer to [API documentation](http://sldn.softlayer.com/reference/datatypes/SoftLayer_Security_Ssh_Key).
-
-## Example Usage
-
-```hcl
-resource "softlayer_ssh_key" "test_ssh_key" {
- name = "test_ssh_key_name"
- notes = "test_ssh_key_notes"
- public_key = "ssh-rsa "
-}
-```
-
-## Argument Reference
-
-The following arguments are supported:
-
-* `name` - (Required) A descriptive name used to identify an SSH key.
-* `public_key` - (Required) The public SSH key.
-* `notes` - (Optional) A small note about an SSH key to use at your discretion.
-
-The `name` and `notes` fields are editable.
-
-## Attributes Reference
-
-The following attributes are exported:
-
-* `id` - The ID of the new SSH key
-* `fingerprint` - sequence of bytes to authenticate or lookup a longer SSH key.
diff --git a/website/source/docs/providers/softlayer/r/virtual_guest.html.markdown b/website/source/docs/providers/softlayer/r/virtual_guest.html.markdown
deleted file mode 100644
index cf487085e..000000000
--- a/website/source/docs/providers/softlayer/r/virtual_guest.html.markdown
+++ /dev/null
@@ -1,135 +0,0 @@
----
-layout: "softlayer"
-page_title: "SoftLayer: virtual_guest"
-sidebar_current: "docs-softlayer-resource-virtual-guest"
-description: |-
- Manages SoftLayer Virtual Guests.
----
-
-# softlayer\virtual_guest
-
-Provides virtual guest resource. This allows virtual guests to be created, updated
-and deleted. For additional details please refer to [API documentation](http://sldn.softlayer.com/reference/services/SoftLayer_Virtual_Guest).
-
-## Example Usage
-
-Create a new virtual guest using the "Debian" image.
-
-```hcl
-resource "softlayer_virtual_guest" "twc_terraform_sample" {
- name = "twc-terraform-sample-name"
- domain = "bar.example.com"
- image = "DEBIAN_7_64"
- region = "ams01"
- public_network_speed = 10
- hourly_billing = true
- private_network_only = false
- cpu = 1
- ram = 1024
- disks = [25, 10, 20]
- user_data = "{\"value\":\"newvalue\"}"
- dedicated_acct_host_only = true
- local_disk = false
- frontend_vlan_id = 1085155
- backend_vlan_id = 1085157
-}
-```
-
-Create a new virtual guest using block device template.
-
-```hcl
-resource "softlayer_virtual_guest" "terraform-sample-BDTGroup" {
- name = "terraform-sample-blockDeviceTemplateGroup"
- domain = "bar.example.com"
- region = "ams01"
- public_network_speed = 10
- hourly_billing = false
- cpu = 1
- ram = 1024
- local_disk = false
- block_device_template_group_gid = "****-****-****-****-****"
-}
-```
-
-## Argument Reference
-
-The following arguments are supported:
-
-* `name` | *string*
- * Hostname for the computing instance.
- * **Required**
-* `domain` | *string*
- * Domain for the computing instance.
- * **Required**
-* `cpu` | *int*
- * The number of CPU cores to allocate.
- * **Required**
-* `ram` | *int*
- * The amount of memory to allocate in megabytes.
- * **Required**
-* `region` | *string*
- * Specifies which datacenter the instance is to be provisioned in.
- * **Required**
-* `hourly_billing` | *boolean*
- * Specifies the billing type for the instance. When `true`, the computing instance will be billed on hourly usage, otherwise it will be billed on a monthly basis.
- * **Required**
-* `local_disk` | *boolean*
- * Specifies the disk type for the instance. When `true`, the disks for the computing instance will be provisioned on the host which it runs, otherwise SAN disks will be provisioned.
- * **Required**
-* `dedicated_acct_host_only` | *boolean*
- * Specifies whether or not the instance must only run on hosts with instances from the same account
- * *Default*: nil
- * *Optional*
-* `image` | *string*
- * An identifier for the operating system to provision the computing instance with.
- * **Conditionally required** - Disallowed when `blockDeviceTemplateGroup.globalIdentifier` is provided, as the template will specify the operating system.
-* `block_device_template_group_gid` | *string*
- * A global identifier for the template to be used to provision the computing instance.
- * **Conditionally required** - Disallowed when `operatingSystemReferenceCode` is provided, as the template will specify the operating system.
-* `public_network_speed` | *int*
- * Specifies the connection speed for the instance's network components.
- * *Default*: 10
- * *Optional*
-* `private_network_only` | *boolean*
- * Specifies whether or not the instance only has access to the private network. When true this flag specifies that a compute instance is to only have access to the private network.
- * *Default*: False
- * *Optional*
-* `frontend_vlan_id` | *int*
- * Specifies the network VLAN which is to be used for the front end interface of the computing instance.
- * *Default*: nil
- * *Optional*
-* `backend_vlan_id` | *int*
- * Specifies the network VLAN which is to be used for the back end interface of the computing instance.
- * *Default*: nil
- * *Optional*
-* `disks` | *array*
- * Block device and disk image settings for the computing instance
- * *Optional*
- * *Default*: The smallest available capacity for the primary disk will be used. If an image template is specified the disk capacity will be be provided by the template.
-* `user_data` | *string*
- * Arbitrary data to be made available to the computing instance.
- * *Default*: nil
- * *Optional*
-* `ssh_keys` | *array*
- * SSH keys to install on the computing instance upon provisioning.
- * *Default*: nil
- * *Optional*
-* `ipv4_address` | *string*
- * Uses `editObject` call, template data [defined here](https://sldn.softlayer.com/reference/datatypes/SoftLayer_Virtual_Guest).
- * *Default*: nil
- * *Optional*
-* `ipv4_address_private` | *string*
- * Uses `editObject` call, template data [defined here](https://sldn.softlayer.com/reference/datatypes/SoftLayer_Virtual_Guest).
- * *Default*: nil
- * *Optional*
-* `post_install_script_uri` | *string*
- * As defined in the [SoftLayer_Virtual_Guest_SupplementalCreateObjectOptions](https://sldn.softlayer.com/reference/datatypes/SoftLayer_Virtual_Guest_SupplementalCreateObjectOptions).
- * *Default*: nil
- * *Optional*
-
-## Attributes Reference
-
-The following attributes are exported:
-
-* `id` - The ID of the virtual guest.
-
diff --git a/website/source/docs/providers/spotinst/index.html.markdown b/website/source/docs/providers/spotinst/index.html.markdown
deleted file mode 100755
index d0e39b049..000000000
--- a/website/source/docs/providers/spotinst/index.html.markdown
+++ /dev/null
@@ -1,43 +0,0 @@
----
-layout: "spotinst"
-page_title: "Provider: Spotinst"
-sidebar_current: "docs-spotinst-index"
-description: |-
- The Spotinst provider is used to interact with the resources supported by Spotinst. The provider needs to be configured with the proper credentials before it can be used.
----
-
-# Spotinst Provider
-
-The Spotinst provider is used to interact with the
-resources supported by Spotinst. The provider needs to be configured
-with the proper credentials before it can be used.
-
-Use the navigation to the left to read about the available resources.
-
-## Example Usage
-
-```hcl
-# Configure the Spotinst provider
-provider "spotinst" {
- email = "${var.spotinst_email}"
- password = "${var.spotinst_password}"
- client_id = "${var.spotinst_client_id}"
- client_secret = "${var.spotinst_client_secret}"
- token = "${var.spotinst_token}"
-}
-
-# Create an AWS group
-resource "spotinst_aws_group" "foo" {
- ...
-}
-```
-
-## Argument Reference
-
-The following arguments are supported:
-
-* `email` - (Required) The email registered in Spotinst. It must be provided, but it can also be sourced from the `SPOTINST_EMAIL` environment variable.
-* `password` - (Optional; Required if not using `token`) The password associated with the username. It can be sourced from the `SPOTINST_PASSWORD` environment variable.
-* `client_id` - (Optional; Required if not using `token`) The OAuth client ID associated with the username. It can be sourced from the `SPOTINST_CLIENT_ID` environment variable.
-* `client_secret` - (Optional; Required if not using `token`) The OAuth client secret associated with the username. It can be sourced from the `SPOTINST_CLIENT_SECRET` environment variable.
-* `token` - (Optional; Required if not using `password`) A Personal API Access Token issued by Spotinst. It can be sourced from the `SPOTINST_TOKEN` environment variable.
diff --git a/website/source/docs/providers/spotinst/r/aws_group.html.markdown b/website/source/docs/providers/spotinst/r/aws_group.html.markdown
deleted file mode 100755
index b7c786141..000000000
--- a/website/source/docs/providers/spotinst/r/aws_group.html.markdown
+++ /dev/null
@@ -1,245 +0,0 @@
----
-layout: "spotinst"
-page_title: "Spotinst: aws_group"
-sidebar_current: "docs-do-resource-aws_group"
-description: |-
- Provides a Spotinst AWS group resource.
----
-
-# spotinst_aws_group
-
-Provides a Spotinst AWS group resource.
-
-## Example Usage
-
-```hcl
-# Create an AWS group
-resource "spotinst_aws_group" "workers" {
- name = "workers-group"
- description = "created by Terraform"
- product = "Linux/UNIX"
-
- capacity {
- target = 50
- minimum = 25
- maximum = 100
- }
-
- strategy {
- risk = 100
- }
-
- scheduled_task {
- task_type = "scale"
- cron_expression = "0 5 * * 0-4"
- scale_target_capacity = 80
- }
-
- scheduled_task {
- task_type = "backup_ami"
- frequency = "hourly"
- }
-
- instance_types {
- ondemand = "c3.large"
- spot = ["m3.large", "m4.large", "c3.large", "c4.large"]
- }
-
- availability_zone {
- name = "us-west-2b"
- subnet_id = "subnet-7bbbf51e"
- }
-
- launch_specification {
- monitoring = false
- image_id = "ami-f0091d91"
- key_pair = "pemfile"
- security_group_ids = ["default", "allow-ssh"]
- }
-
- tags {
- foo = "bar"
- bar = "baz"
- }
-
- scaling_up_policy {
- policy_name = "Scaling Policy 1"
- metric_name = "CPUUtilization"
- statistic = "average"
- unit = "percent"
- threshold = 80
- adjustment = 1
- namespace = "AWS/EC2"
- period = 300
- evaluation_periods = 2
- cooldown = 300
- }
-
- scaling_down_policy {
- policy_name = "Scaling Policy 2"
- metric_name = "CPUUtilization"
- statistic = "average"
- unit = "percent"
- threshold = 40
- adjustment = 1
- namespace = "AWS/EC2"
- period = 300
- evaluation_periods = 2
- cooldown = 300
- }
-```
-
-## Argument Reference
-
-The following arguments are supported:
-
-* `name` - (Optional) The group description.
-* `description` - (Optional) The group description.
-* `product` - (Required) Operation system type.
-* `capacity` - (Required) The group capacity. Only a single block is allowed.
-
- * `target` - (Required) The desired number of instances the group should have at any time.
- * `minimum` - (Optional; Required if using scaling policies) The minimum number of instances the group should have at any time.
- * `maximum` - (Optional; Required if using scaling policies) The maximum number of instances the group should have at any time.
-
-* `strategy` - (Required) This determines how your group request is fulfilled from the possible On-Demand and Spot pools selected for launch. Only a single block is allowed.
-
- * `risk` - (Optional; Required if not using `ondemand_count`) The percentage of Spot instances that would spin up from the `capcity.target` number.
- * `ondemand_count` - (Optional; Required if not using `risk`) Number of on demand instances to launch in the group. All other instances will be spot instances. When this parameter is set the "risk" parameter is being ignored.
- * `availability_vs_cost` - (Optional) The percentage of Spot instances that would spin up from the `capcity.target` number.
- * `draining_timeout` - (Optional) The time in seconds, the instance is allowed to run while detached from the ELB. This is to allow the instance time to be drained from incoming TCP connections before terminating it, during a scale down operation.
-
-* `instance_types` - The type of instance determines your instance's CPU capacity, memory and storage (e.g., m1.small, c1.xlarge).
-
- * `ondemand` - (Required) The base instance type.
- * `spot` - (Required) One or more instance types.
-
-* `launch_specification` - (Required) Describes the launch specification for an instance.
-
- * `image_id` - (Required) The ID of the AMI used to launch the instance.
- * `key_pair` - (Optional) The key name that should be used for the instance.
- * `security_group_ids` - (Optional) A list of associated security group IDS.
- * `monitoring` - (Optional) Indicates whether monitoring is enabled for the instance.
- * `user_data` - (Optional) The user data to provide when launching the instance.
- * `iam_instance_profile` - (Optional) The ARN of an IAM instance profile to associate with launched instances.
- * `load_balancer_names` - (Optional) Registers each instance with the specified Elastic Load Balancers.
-
-* `tags` - (Optional) A mapping of tags to assign to the resource.
-* `elastic_ips` - (Optional) A list of [AWS Elastic IP](http://docs.aws.amazon.com/AWSEC2/latest/UserGuide/elastic-ip-addresses-eip.html) allocation IDs to associate to the group instances.
-
-
-
-## Availability Zone
-
-Each `availability_zone` supports the following:
-
-* `name` - The name of the availability zone.
-* `subnet_id` - (Optional) A specific subnet ID within the given availability zone. If not specified, the default subnet will be used.
-
-
-
-## Scheduled Tasks
-
-Each `scheduled_task` supports the following:
-
-* `task_type` - (Required) The task type to run. Supported task types are `scale` and `backup_ami`.
-* `cron_expression` - (Optional; Required if not using `frequency`) A valid cron expression. The cron is running in UTC time zone and is in [Unix cron format](https://en.wikipedia.org/wiki/Cron).
-* `frequency` - (Optional; Required if not using `cron_expression`) The recurrence frequency to run this task. Supported values are `hourly`, `daily` and `weekly`.
-* `scale_target_capcity` - (Optional) The desired number of instances the group should have.
-* `scale_min_capcity` - (Optional) The minimum number of instances the group should have.
-* `scale_max_capcity` - (Optional) The maximum number of instances the group should have.
-
-
-
-## Scaling Policies
-
-Each `scaling_*_policy` supports the following:
-
-* `namespace` - (Required) The namespace for the alarm's associated metric.
-* `metric_name` - (Required) The name of the metric, with or without spaces.
-* `threshold` - (Required) The value against which the specified statistic is compared.
-* `policy_name` - (Optional) The name of the policy.
-* `statistic` - (Optional) The metric statistics to return. For information about specific statistics go to [Statistics](http://docs.aws.amazon.com/AmazonCloudWatch/latest/DeveloperGuide/index.html?CHAP_TerminologyandKeyConcepts.html#Statistic) in the Amazon CloudWatch Developer Guide.
-* `unit` - (Optional) The unit for the alarm's associated metric.
-* `adjustment` - (Optional) The number of instances to add/remove to/from the target capacity when scale is needed.
-* `period` - (Optional) The granularity, in seconds, of the returned datapoints. Period must be at least 60 seconds and must be a multiple of 60.
-* `evaluation_periods` - (Optional) The number of periods over which data is compared to the specified threshold.
-* `cooldown` - (Optional) The amount of time, in seconds, after a scaling activity completes and before the next scaling activity can start. If this parameter is not specified, the default cooldown period for the group applies.
-* `dimensions` - (Optional) A mapping of dimensions describing qualities of the metric.
-
-
-
-## Network Interfaces
-
-Each of the `network_interface` attributes controls a portion of the AWS
-Instance's "Elastic Network Interfaces". It's a good idea to familiarize yourself with [AWS's Elastic Network
-Interfaces docs](http://docs.aws.amazon.com/AWSEC2/latest/UserGuide/using-eni.html)
-to understand the implications of using these attributes.
-
-* `network_interface_id` - (Optional) The ID of the network interface.
-* `device_index` - (Optional) The index of the device on the instance for the network interface attachment.
-* `subnet_id` - (Optional) The ID of the subnet associated with the network string.
-* `description` - (Optional) The description of the network interface.
-* `private_ip_address` - (Optional) The private IP address of the network interface.
-* `security_group_ids` - (Optional) The IDs of the security groups for the network interface.
-* `delete_on_termination` - (Optional) If set to true, the interface is deleted when the instance is terminated.
-* `secondary_private_ip_address_count` - (Optional) The number of secondary private IP addresses.
-* `associate_public_ip_address` - (Optional) Indicates whether to assign a public IP address to an instance you launch in a VPC. The public IP address can only be assigned to a network interface for eth0, and can only be assigned to a new network interface, not an existing one.
-
-
-
-## Block Devices
-
-Each of the `*_block_device` attributes controls a portion of the AWS
-Instance's "Block Device Mapping". It's a good idea to familiarize yourself with [AWS's Block Device
-Mapping docs](https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/block-device-mapping-concepts.html)
-to understand the implications of using these attributes.
-
-Each `ebs_block_device` supports the following:
-
-* `device_name` - The name of the device to mount.
-* `snapshot_id` - (Optional) The Snapshot ID to mount.
-* `volume_type` - (Optional) The type of volume. Can be `"standard"`, `"gp2"`, or `"io1"`.
-* `volume_size` - (Optional) The size of the volume in gigabytes.
-* `iops` - (Optional) The amount of provisioned
- [IOPS](https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/ebs-io-characteristics.html).
- This must be set with a `volume_type` of `"io1"`.
-* `delete_on_termination` - (Optional) Whether the volume should be destroyed on instance termination.
-* `encrypted` - (Optional) Enables [EBS encryption](https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/EBSEncryption.html) on the volume.
-
-Modifying any `ebs_block_device` currently requires resource replacement.
-
-Each `ephemeral_block_device` supports the following:
-
-* `device_name` - The name of the block device to mount on the instance.
-* `virtual_name` - The [Instance Store Device Name](https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/InstanceStorage.html#InstanceStoreDeviceNames)
- (e.g. `"ephemeral0"`).
-
-~> **NOTE:** Currently, changes to `*_block_device` configuration of _existing_
-resources cannot be automatically detected by Terraform. After making updates
-to block device configuration, resource recreation can be manually triggered by
-using the [`taint` command](/docs/commands/taint.html).
-
-
-
-## Third-Party Integrations
-
-* `rancher_integration` - (Optional) Describes the [Rancher](http://rancherlabs.com/) integration.
-
- * `master_host` - (Required) The URL of the Rancher Master host.
- * `access_key` - (Required) The access key of the Rancher API.
- * `secret_key` - (Required) The secret key of the Rancher API.
-
-* `elastic_beanstalk_integration` - (Optional) Describes the [Elastic Beanstalk](https://aws.amazon.com/documentation/elastic-beanstalk/) integration.
-
- * `environment_id` - (Required) The ID of the Elastic Beanstalk environment.
-
-* `nirmata_integration` - (Optional) Describes the [Nirmata](http://nirmata.io/) integration.
-
- * `api_key` - (Required) The API key of the Nirmata API.
-
-## Attributes Reference
-
-The following attributes are exported:
-
-* `id` - The group ID.
diff --git a/website/source/docs/providers/statuscake/index.html.markdown b/website/source/docs/providers/statuscake/index.html.markdown
deleted file mode 100644
index d25a86654..000000000
--- a/website/source/docs/providers/statuscake/index.html.markdown
+++ /dev/null
@@ -1,39 +0,0 @@
----
-layout: "statuscake"
-page_title: "Provider: StatusCake"
-sidebar_current: "docs-statuscake-index"
-description: |-
- The StatusCake provider configures tests in StatusCake.
----
-
-# StatusCake Provider
-
-The StatusCake provider allows Terraform to create and configure tests in [StatusCake](https://www.statuscake.com/). StatusCake is a tool that helps to
-monitor the uptime of your service via a network of monitoring centers throughout the world
-
-The provider configuration block accepts the following arguments:
-
-* ``username`` - (Required) The username for the statuscake account. May alternatively be set via the
- ``STATUSCAKE_USERNAME`` environment variable.
-
-* ``apikey`` - (Required) The API auth token to use when making requests. May alternatively
- be set via the ``STATUSCAKE_APIKEY`` environment variable.
-
-Use the navigation to the left to read about the available resources.
-
-## Example Usage
-
-```hcl
-provider "statuscake" {
- username = "testuser"
- apikey = "12345ddfnakn"
-}
-
-resource "statuscake_test" "google" {
- website_name = "google.com"
- website_url = "www.google.com"
- test_type = "HTTP"
- check_rate = 300
- contact_id = 12345
-}
-```
diff --git a/website/source/docs/providers/statuscake/r/test.html.markdown b/website/source/docs/providers/statuscake/r/test.html.markdown
deleted file mode 100644
index 0a1df9f12..000000000
--- a/website/source/docs/providers/statuscake/r/test.html.markdown
+++ /dev/null
@@ -1,45 +0,0 @@
----
-layout: "statuscake"
-page_title: "StatusCake: statuscake_test"
-sidebar_current: "docs-statuscake-test"
-description: |-
- The statuscake_test resource allows StatusCake tests to be managed by Terraform.
----
-
-# statuscake\_test
-
-The test resource allows StatusCake tests to be managed by Terraform.
-
-## Example Usage
-
-```hcl
-resource "statuscake_test" "google" {
- website_name = "google.com"
- website_url = "www.google.com"
- test_type = "HTTP"
- check_rate = 300
- contact_id = 12345
-}
-```
-
-## Argument Reference
-
-The following arguments are supported:
-
-* `website_name` - (Required) This is the name of the test and the website to be monitored.
-* `website_url` - (Required) The URL of the website to be monitored
-* `check_rate` - (Optional) Test check rate in seconds. Defaults to 300
-* `contact_id` - (Optional) The id of the contact group to be add to the test. Each test can have only one.
-* `test_type` - (Required) The type of Test. Either HTTP or TCP
-* `paused` - (Optional) Whether or not the test is paused. Defaults to false.
-* `timeout` - (Optional) The timeout of the test in seconds.
-* `confirmations` - (Optional) The number of confirmation servers to use in order to detect downtime. Defaults to 0.
-* `port` - (Optional) The port to use when specifying a TCP test.
-* `trigger_rate` - (Optional) The number of minutes to wait before sending an alert. Default is `5`.
-
-
-## Attributes Reference
-
-The following attribute is exported:
-
-* `test_id` - A unique identifier for the test.
diff --git a/website/source/docs/providers/template/d/cloudinit_config.html.markdown b/website/source/docs/providers/template/d/cloudinit_config.html.markdown
deleted file mode 100644
index 6e6297a68..000000000
--- a/website/source/docs/providers/template/d/cloudinit_config.html.markdown
+++ /dev/null
@@ -1,81 +0,0 @@
----
-layout: "template"
-page_title: "Template: cloudinit_multipart"
-sidebar_current: "docs-template-datasource-cloudinit-config"
-description: |-
- Renders a multi-part cloud-init config from source files.
----
-
-# template_cloudinit_config
-
-Renders a multi-part cloud-init config from source files.
-
-## Example Usage
-
-```hcl
-# Render a part using a `template_file`
-data "template_file" "script" {
- template = "${file("${path.module}/init.tpl")}"
-
- vars {
- consul_address = "${aws_instance.consul.private_ip}"
- }
-}
-
-# Render a multi-part cloudinit config making use of the part
-# above, and other source files
-data "template_cloudinit_config" "config" {
- gzip = true
- base64_encode = true
-
- # Setup hello world script to be called by the cloud-config
- part {
- filename = "init.cfg"
- content_type = "text/part-handler"
- content = "${data.template_file.script.rendered}"
- }
-
- part {
- content_type = "text/x-shellscript"
- content = "baz"
- }
-
- part {
- content_type = "text/x-shellscript"
- content = "ffbaz"
- }
-}
-
-# Start an AWS instance with the cloudinit config as user data
-resource "aws_instance" "web" {
- ami = "ami-d05e75b8"
- instance_type = "t2.micro"
- user_data = "${data.template_cloudinit_config.config.rendered}"
-}
-```
-
-## Argument Reference
-
-The following arguments are supported:
-
-* `gzip` - (Optional) Specify whether or not to gzip the rendered output. Default to `true`
-
-* `base64_encode` - (Optional) Base64 encoding of the rendered output. Default to `true`
-
-* `part` - (Required) One may specify this many times, this creates a fragment of the rendered cloud-init config file. The order of the parts is maintained in the configuration is maintained in the rendered template.
-
-The `part` block supports:
-
-* `filename` - (Optional) Filename to save part as.
-
-* `content_type` - (Optional) Content type to send file as.
-
-* `content` - (Required) Body for the part.
-
-* `merge_type` - (Optional) Gives the ability to merge multiple blocks of cloud-config together.
-
-## Attributes Reference
-
-The following attributes are exported:
-
-* `rendered` - The final rendered multi-part cloudinit config.
diff --git a/website/source/docs/providers/template/d/file.html.md b/website/source/docs/providers/template/d/file.html.md
deleted file mode 100644
index c4be59fb5..000000000
--- a/website/source/docs/providers/template/d/file.html.md
+++ /dev/null
@@ -1,137 +0,0 @@
----
-layout: "template"
-page_title: "Template: template_file"
-sidebar_current: "docs-template-datasource-file"
-description: |-
- Renders a template from a file.
----
-
-# template_file
-
-Renders a template from a file.
-
-## Example Usage
-
-Option 1: From a file:
-
-Reference the template path:
-
-```hcl
-data "template_file" "init" {
- template = "${file("${path.module}/init.tpl")}"
-
- vars {
- consul_address = "${aws_instance.consul.private_ip}"
- }
-}
-```
-
-Inside the file, reference the variable as such:
-
-```bash
-#!/bin/bash
-
-echo "CONSUL_ADDRESS = ${consul_address}" > /tmp/iplist
-```
-
-Option 2: Inline:
-
-```hcl
-data "template_file" "init" {
- template = "$${consul_address}:1234"
-
- vars {
- consul_address = "${aws_instance.consul.private_ip}"
- }
-}
-```
-
-## Argument Reference
-
-The following arguments are supported:
-
-* `template` - (Required) The contents of the template. These can be loaded
- from a file on disk using the [`file()` interpolation
- function](/docs/configuration/interpolation.html#file_path_).
-
-* `vars` - (Optional) Variables for interpolation within the template. Note
- that variables must all be primitives. Direct references to lists or maps
- will cause a validation error.
-
-The following arguments are maintained for backwards compatibility and may be
-removed in a future version:
-
-* `filename` - _Deprecated, please use `template` instead_. The filename for
- the template. Use [path variables](/docs/configuration/interpolation.html#path-variables) to make
- this path relative to different path roots.
-
-## Attributes Reference
-
-The following attributes are exported:
-
-* `template` - See Argument Reference above.
-* `vars` - See Argument Reference above.
-* `rendered` - The final rendered template.
-
-## Template Syntax
-
-The syntax of the template files is the same as
-[standard interpolation syntax](/docs/configuration/interpolation.html),
-but you only have access to the variables defined in the `vars` section.
-
-To access interpolations that are normally available to Terraform
-configuration (such as other variables, resource attributes, module
-outputs, etc.) you'll have to expose them via `vars` as shown below:
-
-```hcl
-data "template_file" "init" {
- # ...
-
- vars {
- foo = "${var.foo}"
- attr = "${aws_instance.foo.private_ip}"
- }
-}
-```
-
-## Inline Templates
-
-Inline templates allow you to specify the template string inline without
-loading a file. An example is shown below:
-
-```hcl
-data "template_file" "init" {
- template = "$${consul_address}:1234"
-
- vars {
- consul_address = "${aws_instance.consul.private_ip}"
- }
-}
-```
-
--> **Important:** Template variables in an inline template (such as
-`consul_address` above) must be escaped with a double-`$`. Unescaped
-interpolations will be processed by Terraform normally prior to executing
-the template.
-
-An example of mixing escaped and non-escaped interpolations in a template:
-
-```hcl
-variable "port" { default = 80 }
-
-data "template_file" "init" {
- template = "$${foo}:${var.port}"
-
- vars {
- foo = "${count.index}"
- }
-}
-```
-
-In the above example, the template is processed by Terraform first to
-turn it into: `${foo}:80`. After that, the template is processed as a
-template to interpolate `foo`.
-
-In general, you should use template variables in the `vars` block and try
-not to mix interpolations. This keeps it understandable and has the benefit
-that you don't have to change anything to switch your template to a file.
diff --git a/website/source/docs/providers/template/index.html.markdown b/website/source/docs/providers/template/index.html.markdown
deleted file mode 100644
index 22d715c2f..000000000
--- a/website/source/docs/providers/template/index.html.markdown
+++ /dev/null
@@ -1,58 +0,0 @@
----
-layout: "template"
-page_title: "Provider: Template"
-sidebar_current: "docs-template-index"
-description: |-
- The Template provider is used to template strings for other Terraform resources.
----
-
-# Template Provider
-
-The template provider exposes data sources to use templates to generate
-strings for other Terraform resources or outputs.
-
-Use the navigation to the left to read about the available data sources.
-
-## Example Usage
-
-```hcl
-# Template for initial configuration bash script
-data "template_file" "init" {
- template = "${file("init.tpl")}"
-
- vars {
- consul_address = "${aws_instance.consul.private_ip}"
- }
-}
-
-# Create a web server
-resource "aws_instance" "web" {
- # ...
-
- user_data = "${data.template_file.init.rendered}"
-}
-```
-
-Or using an inline template:
-
-```hcl
-# Template for initial configuration bash script
-data "template_file" "init" {
- template = "$${consul_address}:1234"
-
- vars {
- consul_address = "${aws_instance.consul.private_ip}"
- }
-}
-
-# Create a web server
-resource "aws_instance" "web" {
- # ...
-
- user_data = "${data.template_file.init.rendered}"
-}
-```
-
--> **Note:** Inline templates must escape their interpolations (as seen
-by the double `$` above). Unescaped interpolations will be processed
-_before_ the template.
diff --git a/website/source/docs/providers/template/r/dir.html.md b/website/source/docs/providers/template/r/dir.html.md
deleted file mode 100644
index 9ce700826..000000000
--- a/website/source/docs/providers/template/r/dir.html.md
+++ /dev/null
@@ -1,116 +0,0 @@
----
-layout: "template"
-page_title: "Template: template_dir"
-sidebar_current: "docs-template-resource-dir"
-description: |-
- Renders a directory of templates.
----
-
-# template_dir
-
-Renders a directory containing templates into a separate directory of
-corresponding rendered files.
-
-`template_dir` is similar to [`template_file`](../d/file.html) but it walks
-a given source directory and treats every file it encounters as a template,
-rendering it to a corresponding file in the destination directory.
-
-~> **Note** When working with local files, Terraform will detect the resource
-as having been deleted each time a configuration is applied on a new machine
-where the destination dir is not present and will generate a diff to create
-it. This may cause "noise" in diffs in environments where configurations are
-routinely applied by many different users or within automation systems.
-
-## Example Usage
-
-The following example shows how one might use this resource to produce a
-directory of configuration files to upload to a compute instance, using
-Amazon EC2 as a placeholder.
-
-```hcl
-resource "template_dir" "config" {
- source_dir = "${path.module}/instance_config_templates"
- destination_dir = "${path.cwd}/instance_config"
-
- vars {
- consul_addr = "${var.consul_addr}"
- }
-}
-
-resource "aws_instance" "server" {
- ami = "${var.server_ami}"
- instance_type = "t2.micro"
-
- connection {
- # ...connection configuration...
- }
-
- provisioner "file" {
- # Referencing the template_dir resource ensures that it will be
- # created or updated before this aws_instance resource is provisioned.
- source = "${template_dir.config.destination_dir}"
- destination = "/etc/myapp"
- }
-}
-
-variable "consul_addr" {}
-
-variable "server_ami" {}
-```
-
-## Argument Reference
-
-The following arguments are supported:
-
-* `source_dir` - (Required) Path to the directory where the files to template reside.
-
-* `destination_dir` - (Required) Path to the directory where the templated files will be written.
-
-* `vars` - (Optional) Variables for interpolation within the template. Note
- that variables must all be primitives. Direct references to lists or maps
- will cause a validation error.
-
-Any required parent directories of `destination_dir` will be created
-automatically, and any pre-existing file or directory at that location will
-be deleted before template rendering begins.
-
-After rendering this resource remembers the content of both the source and
-destination directories in the Terraform state, and will plan to recreate the
-output directory if any changes are detected during the plan phase.
-
-Note that it is _not_ safe to use the `file` interpolation function to read
-files create by this resource, since that function can be evaluated before the
-destination directory has been created or updated. It *is* safe to use the
-generated files with resources that directly take filenames as arguments,
-as long as the path is constructed using the `destination_dir` attribute
-to create a dependency relationship with the `template_dir` resource.
-
-## Template Syntax
-
-The syntax of the template files is the same as
-[standard interpolation syntax](/docs/configuration/interpolation.html),
-but you only have access to the variables defined in the `vars` section.
-
-To access interpolations that are normally available to Terraform
-configuration (such as other variables, resource attributes, module
-outputs, etc.) you can expose them via `vars` as shown below:
-
-```hcl
-resource "template_dir" "init" {
- # ...
-
- vars {
- foo = "${var.foo}"
- attr = "${aws_instance.foo.private_ip}"
- }
-}
-```
-
-## Attributes
-
-This resource exports the following attributes:
-
-* `destination_dir` - The destination directory given in configuration.
- Interpolate this attribute into other resource configurations to create
- a dependency to ensure that the destination directory is populated before
- another resource attempts to read it.
diff --git a/website/source/docs/providers/terraform-enterprise/d/artifact.html.markdown b/website/source/docs/providers/terraform-enterprise/d/artifact.html.markdown
deleted file mode 100644
index e525ff3c4..000000000
--- a/website/source/docs/providers/terraform-enterprise/d/artifact.html.markdown
+++ /dev/null
@@ -1,88 +0,0 @@
----
-layout: "terraform-enterprise"
-page_title: "Terraform Enterprise: atlas_artifact"
-sidebar_current: "docs-terraform-enterprise-data-artifact"
-description: |-
- Provides a data source to deployment artifacts managed by Terraform Enterprise. This can
- be used to dynamically configure instantiation and provisioning
- of resources.
----
-
-# atlas_artifact
-
-Provides a [Data Source](/docs/configuration/data-sources.html) to access to deployment
-artifacts managed by Terraform Enterprise. This can be used to dynamically configure instantiation
-and provisioning of resources.
-
-## Example Usage
-
-An artifact can be created that has metadata representing
-an AMI in AWS. This AMI can be used to configure an instance. Any changes
-to this artifact will trigger a change to that instance.
-
-```hcl
-# Read the AMI
-data "atlas_artifact" "web" {
- name = "hashicorp/web"
- type = "amazon.image"
- build = "latest"
-
- metadata {
- arch = "386"
- }
-}
-
-# Start our instance with the dynamic ami value
-# Remember to include the AWS region as it is part of the full ID
-resource "aws_instance" "app" {
- ami = "${data.atlas_artifact.web.metadata_full.region-us-east-1}"
-
- # ...
-}
-```
-
-## Argument Reference
-
-The following arguments are supported:
-
-* `name` - (Required) Name of the artifact in Terraform Enterprise. This is given
- in slug format like "organization/artifact".
-
-* `type` - (Required) The type of artifact to query for.
-
-* `build` - (Optional) The build number responsible for creating
- the version of the artifact to filter on. This can be "latest",
- to find a matching artifact in the latest build, "any" to find a
- matching artifact in any build, or a specific number to pin to that
- build. If `build` and `version` are unspecified, `version` will default
- to "latest". Cannot be specified with `version`. Note: `build` is only
- present if Terraform Enterprise builds the image.
-
-* `version` - (Optional) The version of the artifact to filter on. This can
- be "latest", to match against the latest version, "any" to find a matching artifact
- in any version, or a specific number to pin to that version. Defaults to
- "latest" if neither `build` or `version` is specified. Cannot be specified
- with `build`.
-
-* `metadata_keys` - (Optional) If given, only an artifact containing
- the given keys will be returned. This is used to disambiguate when
- multiple potential artifacts match. An example is "aws" to filter
- on an AMI.
-
-* `metadata` - (Optional) If given, only an artifact matching the
- metadata filters will be returned. This is used to disambiguate when
- multiple potential artifacts match. An example is "arch" = "386" to
- filter on architecture.
-
-
-## Attributes Reference
-
-The following attributes are exported:
-
-* `id` - The ID of the artifact. This could be an AMI ID, GCE Image ID, etc.
-* `file_url` - For artifacts that are binaries, this is a download path.
-* `metadata_full` - Contains the full metadata of the artifact. The keys are sanitized
- to replace any characters that are invalid in a resource name with a hyphen.
- For example, the "region.us-east-1" key will become "region-us-east-1".
-* `version_real` - The matching version of the artifact
-* `slug` - The artifact slug in Terraform Enterprise
diff --git a/website/source/docs/providers/terraform-enterprise/index.html.markdown b/website/source/docs/providers/terraform-enterprise/index.html.markdown
deleted file mode 100644
index 1793e5828..000000000
--- a/website/source/docs/providers/terraform-enterprise/index.html.markdown
+++ /dev/null
@@ -1,50 +0,0 @@
----
-layout: "terraform-enterprise"
-page_title: "Provider: Terraform Enterprise"
-sidebar_current: "docs-terraform-enterprise-index"
-description: |-
- The Terraform Enterprise provider is used to interact with configuration,
- artifacts, and metadata managed by the Terraform Enterprise service.
----
-
-# Terraform Enterprise Provider
-
-The Terraform Enterprise provider is used to interact with resources,
-configuration, artifacts, and metadata managed by
-[Terraform Enterprise](https://www.terraform.io/docs/providers/index.html).
-The provider needs to be configured with the proper credentials before it can
-be used.
-
-Use the navigation to the left to read about the available resources.
-
-~> **Why is this called "atlas"?** Atlas was previously a commercial offering
-from HashiCorp that included a full suite of enterprise products. The products
-have since been broken apart into their individual products, like **Terraform
-Enterprise**. While this transition is in progress, you may see references to
-"atlas" in the documentation. We apologize for the inconvenience.
-
-## Example Usage
-
-```hcl
-# Configure the Terraform Enterprise provider
-provider "atlas" {
- token = "${var.atlas_token}"
-}
-
-# Fetch an artifact configuration
-data "atlas_artifact" "web" {
- # ...
-}
-```
-
-## Argument Reference
-
-The following arguments are supported:
-
-* `address` - (Optional) Terraform Enterprise server endpoint. Defaults to
- public Terraform Enterprise. This is only required when using an on-premise
- deployment of Terraform Enterprise. This can also be specified with the
- `ATLAS_ADDRESS` shell environment variable.
-
-* `token` - (Required) API token. This can also be specified with the
- `ATLAS_TOKEN` shell environment variable.
diff --git a/website/source/docs/providers/terraform-enterprise/r/artifact.html.markdown b/website/source/docs/providers/terraform-enterprise/r/artifact.html.markdown
deleted file mode 100644
index 42d820929..000000000
--- a/website/source/docs/providers/terraform-enterprise/r/artifact.html.markdown
+++ /dev/null
@@ -1,92 +0,0 @@
----
-layout: "terraform-enterprise"
-page_title: "Terraform Enterprise: atlas_artifact"
-sidebar_current: "docs-terraform-enterprise-resource-artifact"
-description: |-
- Provides access to deployment artifacts managed by Terraform Enterprise. This
- can be used to dynamically configure instantiation and provisioning of
- resources.
----
-
-# atlas_artifact
-
-Provides access to deployment artifacts managed by Terraform Enterprise. This
-can be used to dynamically configure instantiation and provisioning of
-resources.
-
-
-~> **This resource is deprecated!** Please use the
-[Artifact Data Source](/docs/providers/terraform-enterprise/d/artifact.html)
-
-## Example Usage
-
-An artifact can be created that has metadata representing
-an AMI in AWS. This AMI can be used to configure an instance. Any changes
-to this artifact will trigger a change to that instance.
-
-```hcl
-# Read the AMI
-resource "atlas_artifact" "web" {
- name = "hashicorp/web"
- type = "amazon.image"
- build = "latest"
-
- metadata {
- arch = "386"
- }
-}
-
-# Start our instance with the dynamic ami value
-# Remember to include the AWS region as it is part of the full ID
-resource "aws_instance" "app" {
- ami = "${atlas_artifact.web.metadata_full.region-us-east-1}"
-
- # ...
-}
-```
-
-## Argument Reference
-
-The following arguments are supported:
-
-* `name` - (Required) Name of the artifact in Terraform Enterprise. This is given
- in slug format like "organization/artifact".
-
-* `type` - (Required) The type of artifact to query for.
-
-* `build` - (Optional) The build number responsible for creating
- the version of the artifact to filter on. This can be "latest",
- to find a matching artifact in the latest build, "any" to find a
- matching artifact in any build, or a specific number to pin to that
- build. If `build` and `version` are unspecified, `version` will default
- to "latest". Cannot be specified with `version`. Note: `build` is only
- present if Terraform Enterprise builds the image.
-
-* `version` - (Optional) The version of the artifact to filter on. This can
- be "latest", to match against the latest version, "any" to find a matching artifact
- in any version, or a specific number to pin to that version. Defaults to
- "latest" if neither `build` or `version` is specified. Cannot be specified
- with `build`.
-
-* `metadata_keys` - (Optional) If given, only an artifact containing
- the given keys will be returned. This is used to disambiguate when
- multiple potential artifacts match. An example is "aws" to filter
- on an AMI.
-
-* `metadata` - (Optional) If given, only an artifact matching the
- metadata filters will be returned. This is used to disambiguate when
- multiple potential artifacts match. An example is "arch" = "386" to
- filter on architecture.
-
-
-## Attributes Reference
-
-The following attributes are exported:
-
-* `id` - The ID of the artifact. This could be an AMI ID, GCE Image ID, etc.
-* `file_url` - For artifacts that are binaries, this is a download path.
-* `metadata_full` - Contains the full metadata of the artifact. The keys are sanitized
- to replace any characters that are invalid in a resource name with a hyphen.
- For example, the "region.us-east-1" key will become "region-us-east-1".
-* `version_real` - The matching version of the artifact
-* `slug` - The artifact slug in Terraform Enterprise
diff --git a/website/source/docs/providers/terraform/d/remote_state.html.md b/website/source/docs/providers/terraform/d/remote_state.html.md
deleted file mode 100644
index 33d138d6e..000000000
--- a/website/source/docs/providers/terraform/d/remote_state.html.md
+++ /dev/null
@@ -1,69 +0,0 @@
----
-layout: "terraform"
-page_title: "Terraform: terraform_remote_state"
-sidebar_current: "docs-terraform-datasource-remote-state"
-description: |-
- Accesses state meta data from a remote backend.
----
-
-# remote_state
-
-Retrieves state meta data from a remote backend
-
-## Example Usage
-
-```hcl
-data "terraform_remote_state" "vpc" {
- backend = "atlas"
- config {
- name = "hashicorp/vpc-prod"
- }
-}
-
-resource "aws_instance" "foo" {
- # ...
- subnet_id = "${data.terraform_remote_state.vpc.subnet_id}"
-}
-```
-
-## Argument Reference
-
-The following arguments are supported:
-
-* `backend` - (Required) The remote backend to use.
-* `workspace` - (Optional) The Terraform workspace whose state will be requested. Defaults to "default".
-* `config` - (Optional) The configuration of the remote backend. For more information,
- see [the Backend Types documentation](/docs/backends/types/).
-
-## Attributes Reference
-
-The following attributes are exported:
-
-* `backend` - See Argument Reference above.
-* `config` - See Argument Reference above.
-
-In addition, each output in the remote state appears as a top level attribute
-on the `terraform_remote_state` resource.
-
-## Root Outputs Only
-
-Only the root level outputs from the remote state are accessible. Outputs from
-modules within the state cannot be accessed. If you want a module output to be
-accessible via a remote state, you must thread the output through to a root
-output.
-
-An example is shown below:
-
-```hcl
-module "app" {
- source = "..."
-}
-
-output "app_value" {
- value = "${module.app.value}"
-}
-```
-
-In this example, the output `value` from the "app" module is available as
-"app_value". If this root level output hadn't been created, then a remote state
-resource wouldn't be able to access the `value` output on the module.
diff --git a/website/source/docs/providers/terraform/index.html.markdown b/website/source/docs/providers/terraform/index.html.markdown
deleted file mode 100644
index f0b7784a0..000000000
--- a/website/source/docs/providers/terraform/index.html.markdown
+++ /dev/null
@@ -1,32 +0,0 @@
----
-layout: "terraform"
-page_title: "Provider: Terraform"
-sidebar_current: "docs-terraform-index"
-description: |-
- The Terraform provider is used to access meta data from shared infrastructure.
----
-
-# Terraform Provider
-
-The terraform provider provides access to outputs from the Terraform state
-of shared infrastructure.
-
-Use the navigation to the left to read about the available data sources.
-
-## Example Usage
-
-```hcl
-# Shared infrastructure state stored in Atlas
-data "terraform_remote_state" "vpc" {
- backend = "atlas"
-
- config {
- name = "hashicorp/vpc-prod"
- }
-}
-
-resource "aws_instance" "foo" {
- # ...
- subnet_id = "${data.terraform_remote_state.vpc.subnet_id}"
-}
-```
diff --git a/website/source/docs/providers/tls/index.html.markdown b/website/source/docs/providers/tls/index.html.markdown
deleted file mode 100644
index 57f962e64..000000000
--- a/website/source/docs/providers/tls/index.html.markdown
+++ /dev/null
@@ -1,72 +0,0 @@
----
-layout: "tls"
-page_title: "Provider: TLS"
-sidebar_current: "docs-tls-index"
-description: |-
- The TLS provider provides utilities for working with Transport Layer Security keys and certificates.
----
-
-# TLS Provider
-
-The TLS provider provides utilities for working with *Transport Layer Security*
-keys and certificates. It provides resources that
-allow private keys, certificates and certficate requests to be
-created as part of a Terraform deployment.
-
-Another name for Transport Layer Security is *Secure Sockets Layer*,
-or SSL. TLS and SSL are equivalent when considering the resources
-managed by this provider.
-
-This provider is not particularly useful on its own, but it can be
-used to create certificates and credentials that can then be used
-with other providers when creating resources that expose TLS
-services or that themselves provision TLS certificates.
-
-Use the navigation to the left to read about the available resources.
-
-## Example Usage
-
-```hcl
-## This example create a self-signed certificate for a development
-## environment.
-## THIS IS NOT RECOMMENDED FOR PRODUCTION SERVICES.
-## See the detailed documentation of each resource for further
-## security considerations and other practical tradeoffs.
-
-resource "tls_private_key" "example" {
- algorithm = "ECDSA"
-}
-
-resource "tls_self_signed_cert" "example" {
- key_algorithm = "${tls_private_key.example.algorithm}"
- private_key_pem = "${tls_private_key.example.private_key_pem}"
-
- # Certificate expires after 12 hours.
- validity_period_hours = 12
-
- # Generate a new certificate if Terraform is run within three
- # hours of the certificate's expiration time.
- early_renewal_hours = 3
-
- # Reasonable set of uses for a server SSL certificate.
- allowed_uses = [
- "key_encipherment",
- "digital_signature",
- "server_auth",
- ]
-
- dns_names = ["example.com", "example.net"]
-
- subject {
- common_name = "example.com"
- organization = "ACME Examples, Inc"
- }
-}
-
-# For example, this can be used to populate an AWS IAM server certificate.
-resource "aws_iam_server_certificate" "example" {
- name = "example_self_signed_cert"
- certificate_body = "${tls_self_signed_cert.example.cert_pem}"
- private_key = "${tls_private_key.example.private_key_pem}"
-}
-```
diff --git a/website/source/docs/providers/tls/r/cert_request.html.md b/website/source/docs/providers/tls/r/cert_request.html.md
deleted file mode 100644
index 3a71583fd..000000000
--- a/website/source/docs/providers/tls/r/cert_request.html.md
+++ /dev/null
@@ -1,86 +0,0 @@
----
-layout: "tls"
-page_title: "TLS: tls_cert_request"
-sidebar_current: "docs-tls-data-source-cert-request"
-description: |-
- Creates a PEM-encoded certificate request.
----
-
-# tls\_cert\_request
-
-Generates a *Certificate Signing Request* (CSR) in PEM format, which is the
-typical format used to request a certificate from a certificate authority.
-
-This resource is intended to be used in conjunction with a Terraform provider
-for a particular certificate authority in order to provision a new certificate.
-This is a *logical resource*, so it contributes only to the current Terraform
-state and does not create any external managed resources.
-
-~> **Compatibility Note** From Terraform 0.7.0 to 0.7.4 this resource was
-converted to a data source, and the resource form of it was deprecated. This
-turned out to be a design error since a cert request includes a random number
-in the form of the signature nonce, and so the data source form of this
-resource caused non-convergent configuration. The data source form is no longer
-supported as of Terraform 0.7.5 and any users should return to using the
-resource form.
-
-## Example Usage
-
-```hcl
-resource "tls_cert_request" "example" {
- key_algorithm = "ECDSA"
- private_key_pem = "${file("private_key.pem")}"
-
- subject {
- common_name = "example.com"
- organization = "ACME Examples, Inc"
- }
-}
-```
-
-## Argument Reference
-
-The following arguments are supported:
-
-* `key_algorithm` - (Required) The name of the algorithm for the key provided
-in `private_key_pem`.
-
-* `private_key_pem` - (Required) PEM-encoded private key data. This can be
-read from a separate file using the ``file`` interpolation function. Only
-an irreversable secure hash of the private key will be stored in the Terraform
-state.
-
-* `subject` - (Required) The subject for which a certificate is being requested. This is
-a nested configuration block whose structure is described below.
-
-* `dns_names` - (Optional) List of DNS names for which a certificate is being requested.
-
-* `ip_addresses` - (Optional) List of IP addresses for which a certificate is being requested.
-
-The nested `subject` block accepts the following arguments, all optional, with their meaning
-corresponding to the similarly-named attributes defined in
-[RFC5290](https://tools.ietf.org/html/rfc5280#section-4.1.2.4):
-
-* `common_name` (string)
-
-* `organization` (string)
-
-* `organizational_unit` (string)
-
-* `street_address` (list of strings)
-
-* `locality` (string)
-
-* `province` (string)
-
-* `country` (string)
-
-* `postal_code` (string)
-
-* `serial_number` (string)
-
-## Attributes Reference
-
-The following attributes are exported:
-
-* `cert_request_pem` - The certificate request data in PEM format.
diff --git a/website/source/docs/providers/tls/r/locally_signed_cert.html.md b/website/source/docs/providers/tls/r/locally_signed_cert.html.md
deleted file mode 100644
index a022c2925..000000000
--- a/website/source/docs/providers/tls/r/locally_signed_cert.html.md
+++ /dev/null
@@ -1,118 +0,0 @@
----
-layout: "tls"
-page_title: "TLS: tls_locally_signed_cert"
-sidebar_current: "docs-tls-resource-locally-signed-cert"
-description: |-
- Creates a locally-signed TLS certificate in PEM format.
----
-
-# tls\_locally\_signed\_cert
-
-Generates a TLS certificate using a *Certificate Signing Request* (CSR) and
-signs it with a provided certificate authority (CA) private key.
-
-Locally-signed certificates are generally only trusted by client software when
-setup to use the provided CA. They are normally used in development environments
-or when deployed internally to an organization.
-
-## Example Usage
-
-```hcl
-resource "tls_locally_signed_cert" "example" {
- cert_request_pem = "${file("cert_request.pem")}"
- ca_key_algorithm = "ECDSA"
- ca_private_key_pem = "${file("ca_private_key.pem")}"
- ca_cert_pem = "${file("ca_cert.pem")}"
-
- validity_period_hours = 12
-
- allowed_uses = [
- "key_encipherment",
- "digital_signature",
- "server_auth",
- ]
-}
-```
-
-## Argument Reference
-
-The following arguments are supported:
-
-* `cert_request_pem` - (Required) PEM-encoded request certificate data.
-
-* `ca_key_algorithm` - (Required) The name of the algorithm for the key provided
- in `ca_private_key_pem`.
-
-* `ca_private_key_pem` - (Required) PEM-encoded private key data for the CA.
- This can be read from a separate file using the ``file`` interpolation
- function.
-
-* `ca_cert_pem` - (Required) PEM-encoded certificate data for the CA.
-
-* `validity_period_hours` - (Required) The number of hours after initial issuing that the
- certificate will become invalid.
-
-* `allowed_uses` - (Required) List of keywords each describing a use that is permitted
- for the issued certificate. The valid keywords are listed below.
-
-* `early_renewal_hours` - (Optional) If set, the resource will consider the certificate to
- have expired the given number of hours before its actual expiry time. This can be useful
- to deploy an updated certificate in advance of the expiration of the current certificate.
- Note however that the old certificate remains valid until its true expiration time, since
- this resource does not (and cannot) support certificate revocation. Note also that this
- advance update can only be performed should the Terraform configuration be applied during the
- early renewal period.
-
-* `is_ca_certificate` - (Optional) Boolean controlling whether the CA flag will be set in the
- generated certificate. Defaults to `false`, meaning that the certificate does not represent
- a certificate authority.
-
-The `allowed_uses` list accepts the following keywords, combining the set of flags defined by
-both [Key Usage](https://tools.ietf.org/html/rfc5280#section-4.2.1.3) and
-[Extended Key Usage](https://tools.ietf.org/html/rfc5280#section-4.2.1.12) in
-[RFC5280](https://tools.ietf.org/html/rfc5280):
-
-* `digital_signature`
-* `content_commitment`
-* `key_encipherment`
-* `data_encipherment`
-* `key_agreement`
-* `cert_signing`
-* `crl_signing`
-* `encipher_only`
-* `decipher_only`
-* `any_extended`
-* `server_auth`
-* `client_auth`
-* `code_signing`
-* `email_protection`
-* `ipsec_end_system`
-* `ipsec_tunnel`
-* `ipsec_user`
-* `timestamping`
-* `ocsp_signing`
-* `microsoft_server_gated_crypto`
-* `netscape_server_gated_crypto`
-
-## Attributes Reference
-
-The following attributes are exported:
-
-* `cert_pem` - The certificate data in PEM format.
-* `validity_start_time` - The time after which the certificate is valid, as an
- [RFC3339](https://tools.ietf.org/html/rfc3339) timestamp.
-* `validity_end_time` - The time until which the certificate is invalid, as an
- [RFC3339](https://tools.ietf.org/html/rfc3339) timestamp.
-
-## Automatic Renewal
-
-This resource considers its instances to have been deleted after either their validity
-periods ends or the early renewal period is reached. At this time, applying the
-Terraform configuration will cause a new certificate to be generated for the instance.
-
-Therefore in a development environment with frequent deployments it may be convenient
-to set a relatively-short expiration time and use early renewal to automatically provision
-a new certificate when the current one is about to expire.
-
-The creation of a new certificate may of course cause dependent resources to be updated
-or replaced, depending on the lifecycle rules applying to those resources.
diff --git a/website/source/docs/providers/tls/r/private_key.html.md b/website/source/docs/providers/tls/r/private_key.html.md
deleted file mode 100644
index 06d4fc4bc..000000000
--- a/website/source/docs/providers/tls/r/private_key.html.md
+++ /dev/null
@@ -1,72 +0,0 @@
----
-layout: "tls"
-page_title: "TLS: tls_private_key"
-sidebar_current: "docs-tls-resource-private-key"
-description: |-
- Creates a PEM-encoded private key.
----
-
-# tls\_private\_key
-
-Generates a secure private key and encodes it as PEM. This resource is
-primarily intended for easily bootstrapping throwaway development
-environments.
-
-~> **Important Security Notice** The private key generated by this resource will
-be stored *unencrypted* in your Terraform state file. **Use of this resource
-for production deployments is *not* recommended**. Instead, generate
-a private key file outside of Terraform and distribute it securely
-to the system where Terraform will be run.
-
-This is a *logical resource*, so it contributes only to the current Terraform
-state and does not create any external managed resources.
-
-## Example Usage
-
-```hcl
-resource "tls_private_key" "example" {
- algorithm = "ECDSA"
- ecdsa_curve = "P384"
-}
-```
-
-## Argument Reference
-
-The following arguments are supported:
-
-* `algorithm` - (Required) The name of the algorithm to use for
-the key. Currently-supported values are "RSA" and "ECDSA".
-
-* `rsa_bits` - (Optional) When `algorithm` is "RSA", the size of the generated
-RSA key in bits. Defaults to 2048.
-
-* `ecdsa_curve` - (Optional) When `algorithm` is "ECDSA", the name of the elliptic
-curve to use. May be any one of "P224", "P256", "P384" or "P521", with "P224" as the
-default.
-
-## Attributes Reference
-
-The following attributes are exported:
-
-* `algorithm` - The algorithm that was selected for the key.
-* `private_key_pem` - The private key data in PEM format.
-* `public_key_pem` - The public key data in PEM format.
-* `public_key_openssh` - The public key data in OpenSSH `authorized_keys`
- format, if the selected private key format is compatible. All RSA keys
- are supported, and ECDSA keys with curves "P256", "P384" and "P251"
- are supported. This attribute is empty if an incompatible ECDSA curve
- is selected.
-
-## Generating a New Key
-
-Since a private key is a logical resource that lives only in the Terraform state,
-it will persist until it is explicitly destroyed by the user.
-
-In order to force the generation of a new key within an existing state, the
-private key instance can be "tainted":
-
-```
-terraform taint tls_private_key.example
-```
-
-A new key will then be generated on the next ``terraform apply``.
diff --git a/website/source/docs/providers/tls/r/self_signed_cert.html.md b/website/source/docs/providers/tls/r/self_signed_cert.html.md
deleted file mode 100644
index 60cf5bcf1..000000000
--- a/website/source/docs/providers/tls/r/self_signed_cert.html.md
+++ /dev/null
@@ -1,139 +0,0 @@
----
-layout: "tls"
-page_title: "TLS: tls_self_signed_cert"
-sidebar_current: "docs-tls-resource-self-signed-cert"
-description: |-
- Creates a self-signed TLS certificate in PEM format.
----
-
-# tls\_self\_signed\_cert
-
-Generates a *self-signed* TLS certificate in PEM format, which is the typical
-format used to configure TLS server software.
-
-Self-signed certificates are generally not trusted by client software such
-as web browsers. Therefore clients are likely to generate trust warnings when
-connecting to a server that has a self-signed certificate. Self-signed certificates
-are usually used only in development environments or apps deployed internally
-to an organization.
-
-This resource is intended to be used in conjunction with a Terraform provider
-that has a resource that requires a TLS certificate, such as:
-
-* ``aws_iam_server_certificate`` to register certificates for use with AWS *Elastic
-Load Balancer*, *Elastic Beanstalk*, *CloudFront* or *OpsWorks*.
-
-* ``heroku_cert`` to register certificates for applications deployed on Heroku.
-
-## Example Usage
-
-```hcl
-resource "tls_self_signed_cert" "example" {
- key_algorithm = "ECDSA"
- private_key_pem = "${file(\"private_key.pem\")}"
-
- subject {
- common_name = "example.com"
- organization = "ACME Examples, Inc"
- }
-
- validity_period_hours = 12
-
- allowed_uses = [
- "key_encipherment",
- "digital_signature",
- "server_auth",
- ]
-}
-```
-
-## Argument Reference
-
-The following arguments are supported:
-
-* `key_algorithm` - (Required) The name of the algorithm for the key provided
- in `private_key_pem`.
-
-* `private_key_pem` - (Required) PEM-encoded private key data. This can be
- read from a separate file using the ``file`` interpolation function. If the
- certificate is being generated to be used for a throwaway development
- environment or other non-critical application, the `tls_private_key` resource
- can be used to generate a TLS private key from within Terraform. Only
- an irreversable secure hash of the private key will be stored in the Terraform
- state.
-
-* `subject` - (Required) The subject for which a certificate is being requested.
- This is a nested configuration block whose structure matches the
- corresponding block for [`tls_cert_request`](cert_request.html).
-
-* `validity_period_hours` - (Required) The number of hours after initial issuing that the
- certificate will become invalid.
-
-* `allowed_uses` - (Required) List of keywords each describing a use that is permitted
- for the issued certificate. The valid keywords are listed below.
-
-* `dns_names` - (Optional) List of DNS names for which a certificate is being requested.
-
-* `ip_addresses` - (Optional) List of IP addresses for which a certificate is being requested.
-
-* `early_renewal_hours` - (Optional) If set, the resource will consider the certificate to
- have expired the given number of hours before its actual expiry time. This can be useful
- to deploy an updated certificate in advance of the expiration of the current certificate.
- Note however that the old certificate remains valid until its true expiration time, since
- this resource does not (and cannot) support certificate revocation. Note also that this
- advance update can only be performed should the Terraform configuration be applied during the
- early renewal period.
-
-* `is_ca_certificate` - (Optional) Boolean controlling whether the CA flag will be set in the
- generated certificate. Defaults to `false`, meaning that the certificate does not represent
- a certificate authority.
-
-The `allowed_uses` list accepts the following keywords, combining the set of flags defined by
-both [Key Usage](https://tools.ietf.org/html/rfc5280#section-4.2.1.3) and
-[Extended Key Usage](https://tools.ietf.org/html/rfc5280#section-4.2.1.12) in
-[RFC5280](https://tools.ietf.org/html/rfc5280):
-
-* `digital_signature`
-* `content_commitment`
-* `key_encipherment`
-* `data_encipherment`
-* `key_agreement`
-* `cert_signing`
-* `crl_signing`
-* `encipher_only`
-* `decipher_only`
-* `any_extended`
-* `server_auth`
-* `client_auth`
-* `code_signing`
-* `email_protection`
-* `ipsec_end_system`
-* `ipsec_tunnel`
-* `ipsec_user`
-* `timestamping`
-* `ocsp_signing`
-* `microsoft_server_gated_crypto`
-* `netscape_server_gated_crypto`
-
-## Attributes Reference
-
-The following attributes are exported:
-
-* `cert_pem` - The certificate data in PEM format.
-* `validity_start_time` - The time after which the certificate is valid, as an
- [RFC3339](https://tools.ietf.org/html/rfc3339) timestamp.
-* `validity_end_time` - The time until which the certificate is invalid, as an
- [RFC3339](https://tools.ietf.org/html/rfc3339) timestamp.
-
-## Automatic Renewal
-
-This resource considers its instances to have been deleted after either their validity
-periods ends or the early renewal period is reached. At this time, applying the
-Terraform configuration will cause a new certificate to be generated for the instance.
-
-Therefore in a development environment with frequent deployments it may be convenient
-to set a relatively-short expiration time and use early renewal to automatically provision
-a new certificate when the current one is about to expire.
-
-The creation of a new certificate may of course cause dependent resources to be updated
-or replaced, depending on the lifecycle rules applying to those resources.
diff --git a/website/source/docs/providers/triton/index.html.markdown b/website/source/docs/providers/triton/index.html.markdown
deleted file mode 100644
index 22b9a40e6..000000000
--- a/website/source/docs/providers/triton/index.html.markdown
+++ /dev/null
@@ -1,36 +0,0 @@
----
-layout: "triton"
-page_title: "Provider: Joyent Triton"
-sidebar_current: "docs-triton-index"
-description: |-
- Used to provision infrastructure in Joyent's Triton public or on-premise clouds.
----
-
-# Joyent Triton Provider
-
-The Triton provider is used to interact with resources in Joyent's Triton cloud. It is compatible with both public- and on-premise installations of Triton. The provider needs to be configured with the proper credentials before it can be used.
-
-Use the navigation to the left to read about the available resources.
-
-## Example Usage
-
-```hcl
-provider "triton" {
- account = "AccountName"
- key_id = "25:d4:a9:fe:ef:e6:c0:bf:b4:4b:4b:d4:a8:8f:01:0f"
-
- # If using a private installation of Triton, specify the URL, otherwise
- # set the URL according to the region you wish to provision.
- url = "https://us-west-1.api.joyentcloud.com"
-}
-```
-
-## Argument Reference
-
-The following arguments are supported in the `provider` block:
-
-* `account` - (Required) This is the name of the Triton account. It can also be provided via the `SDC_ACCOUNT` environment variable.
-* `key_material` - (Optional) This is the private key of an SSH key associated with the Triton account to be used. If this is not set, the private key corresponding to the fingerprint in `key_id` must be available via an SSH Agent.
-* `key_id` - (Required) This is the fingerprint of the public key matching the key specified in `key_path`. It can be obtained via the command `ssh-keygen -l -E md5 -f /path/to/key`
-* `url` - (Optional) This is the URL to the Triton API endpoint. It is required if using a private installation of Triton. The default is to use the Joyent public cloud us-west-1 endpoint. Valid public cloud endpoints include: `us-east-1`, `us-east-2`, `us-east-3`, `us-sw-1`, `us-west-1`, `eu-ams-1`
-* `insecure_skip_tls_verify` (Optional - defaults to false) This allows skipping TLS verification of the Triton endpoint. It is useful when connecting to a temporary Triton installation such as Cloud-On-A-Laptop which does not generally use a certificate signed by a trusted root CA.
diff --git a/website/source/docs/providers/triton/r/triton_fabric.html.markdown b/website/source/docs/providers/triton/r/triton_fabric.html.markdown
deleted file mode 100644
index cd783d87d..000000000
--- a/website/source/docs/providers/triton/r/triton_fabric.html.markdown
+++ /dev/null
@@ -1,79 +0,0 @@
----
-layout: "triton"
-page_title: "Triton: triton_fabric"
-sidebar_current: "docs-triton-resource-fabric"
-description: |-
- The `triton_fabric` resource represents an SSH fabric for a Triton account.
----
-
-# triton\_fabric
-
-The `triton_fabric` resource represents an fabric for a Triton account. The fabric is a logical set of interconnected switches.
-
-## Example Usages
-
-### Create a fabric
-
-```hcl
-resource "triton_fabric" "dmz" {
- vlan_id = 100
- name = "dmz"
- description = "DMZ Network"
- subnet = "10.60.1.0/24"
- provision_start_ip = "10.60.1.10"
- provision_end_ip = "10.60.1.240"
- gateway = "10.60.1.1"
- resolvers = ["8.8.8.8", "8.8.4.4"]
-}
-```
-
-## Argument Reference
-
-The following arguments are supported:
-
-* `name` - (String, Required, Change forces new resource)
- Network name.
-
-* `description` - (String, Optional, Change forces new resource)
- Optional description of network.
-
-* `subnet` - (String, Required, Change forces new resource)
- CIDR formatted string describing network.
-
-* `provision_start_ip` - (String, Required, Change forces new resource)
- First IP on the network that can be assigned.
-
-* `provision_end_ip` - (String, Required, Change forces new resource)
- Last assignable IP on the network.
-
-* `gateway` - (String, Optional, Change forces new resource)
- Optional gateway IP.
-
-* `resolvers` - (List, Optional)
- Array of IP addresses for resolvers.
-
-* `routes` - (Map, Optional, Change forces new resource)
- Map of CIDR block to Gateway IP address.
-
-* `internet_nat` - (Bool, Optional, Change forces new resource)
- If a NAT zone is provisioned at Gateway IP address.
-
-* `vlan_id` - (Int, Required, Change forces new resource)
- VLAN id the network is on. Number between 0-4095 indicating VLAN ID.
-
-## Attribute Reference
-
-The following attributes are exported:
-
-* `name` - (String) - Network name.
-* `public` - (Bool) - Whether or not this is an RFC1918 network.
-* `fabric` - (Bool) - Whether or not this network is on a fabric.
-* `description` - (String) - Optional description of network.
-* `subnet` - (String) - CIDR formatted string describing network.
-* `provision_start_ip` - (String) - First IP on the network that can be assigned.
-* `provision_end_ip` - (String) - Last assignable IP on the network.
-* `gateway` - (String) - Optional gateway IP.
-* `resolvers` - (List) - Array of IP addresses for resolvers.
-* `routes` - (Map) - Map of CIDR block to Gateway IP address.
-* `internet_nat` - (Bool) - If a NAT zone is provisioned at Gateway IP address.
-* `vlan_id` - (Int) - VLAN id the network is on. Number between 0-4095 indicating VLAN ID.
diff --git a/website/source/docs/providers/triton/r/triton_firewall_rule.html.markdown b/website/source/docs/providers/triton/r/triton_firewall_rule.html.markdown
deleted file mode 100644
index 0e09eb8ab..000000000
--- a/website/source/docs/providers/triton/r/triton_firewall_rule.html.markdown
+++ /dev/null
@@ -1,56 +0,0 @@
----
-layout: "triton"
-page_title: "Triton: triton_firewall_rule"
-sidebar_current: "docs-triton-resource-firewall-rule"
-description: |-
- The `triton_firewall_rule` resource represents a rule for the Triton cloud firewall.
----
-
-# triton\_firewall\_rule
-
-The `triton_firewall_rule` resource represents a rule for the Triton cloud firewall.
-
-## Example Usages
-
-### Allow web traffic on ports tcp/80 and tcp/443 to machines with the 'www' tag from any source
-
-```hcl
-resource "triton_firewall_rule" "www" {
- rule = "FROM any TO tag www ALLOW tcp (PORT 80 AND PORT 443)"
- enabled = true
-}
-```
-
-### Allow ssh traffic on port tcp/22 to all machines from known remote IPs
-
-```hcl
-resource "triton_firewall_rule" "22" {
- rule = "FROM IP (IP w.x.y.z OR IP w.x.y.z) TO all vms ALLOW tcp port 22"
- enabled = true
-}
-```
-
-### Block IMAP traffic on port tcp/143 to all machines
-
-```hcl
-resource "triton_firewall_rule" "imap" {
- rule = "FROM any TO all vms BLOCK tcp port 143"
- enabled = true
-}
-```
-
-## Argument Reference
-
-The following arguments are supported:
-
-* `rule` - (string, Required)
- The firewall rule described using the Cloud API rule syntax defined at https://docs.joyent.com/public-cloud/network/firewall/cloud-firewall-rules-reference.
-
-* `enabled` - (boolean) Default: `false`
- Whether the rule should be effective.
-
-## Attribute Reference
-
-The following attributes are exported:
-
-* `id` - (string) - The identifier representing the firewall rule in Triton.
diff --git a/website/source/docs/providers/triton/r/triton_key.html.markdown b/website/source/docs/providers/triton/r/triton_key.html.markdown
deleted file mode 100644
index 83763880e..000000000
--- a/website/source/docs/providers/triton/r/triton_key.html.markdown
+++ /dev/null
@@ -1,32 +0,0 @@
----
-layout: "triton"
-page_title: "Triton: triton_key"
-sidebar_current: "docs-triton-resource-key"
-description: |-
- The `triton_key` resource represents an SSH key for a Triton account.
----
-
-# triton\_key
-
-The `triton_key` resource represents an SSH key for a Triton account.
-
-## Example Usages
-
-Create a key
-
-```hcl
-resource "triton_key" "example" {
- name = "Example Key"
- key = "${file("keys/id_rsa")}"
-}
-```
-
-## Argument Reference
-
-The following arguments are supported:
-
-* `name` - (string, Change forces new resource)
- The name of the key. If this is left empty, the name is inferred from the comment in the SSH key material.
-
-* `key` - (string, Required, Change forces new resource)
- The SSH key material. In order to read this from a file, use the `file` interpolation.
diff --git a/website/source/docs/providers/triton/r/triton_machine.html.markdown b/website/source/docs/providers/triton/r/triton_machine.html.markdown
deleted file mode 100644
index af34fade2..000000000
--- a/website/source/docs/providers/triton/r/triton_machine.html.markdown
+++ /dev/null
@@ -1,100 +0,0 @@
----
-layout: "triton"
-page_title: "Triton: triton_machine"
-sidebar_current: "docs-triton-resource-machine"
-description: |-
- The `triton_machine` resource represents a virtual machine or infrastructure container running in Triton.
----
-
-# triton\_machine
-
-The `triton_machine` resource represents a virtual machine or infrastructure container running in Triton.
-
-## Example Usages
-
-### Run a SmartOS base-64 machine.
-
-```hcl
-resource "triton_machine" "test-smartos" {
- name = "test-smartos"
- package = "g3-standard-0.25-smartos"
- image = "842e6fa6-6e9b-11e5-8402-1b490459e334"
-
- tags = {
- hello = "world"
- }
-}
-```
-
-### Run an Ubuntu 14.04 LTS machine.
-
-```hcl
-resource "triton_machine" "test-ubuntu" {
- name = "test-ubuntu"
- package = "g4-general-4G"
- image = "1996a1d6-c0d9-11e6-8b80-4772e39dc920"
- firewall_enabled = true
- root_authorized_keys = "Example Key"
- user_script = "#!/bin/bash\necho 'testing user-script' >> /tmp/test.out\nhostname $IMAGENAME"
-
- tags = {
- purpose = "testing ubuntu"
- } ## tags
-} ## resource
-```
-
-## Argument Reference
-
-The following arguments are supported:
-
-* `name` - (string)
- The friendly name for the machine. Triton will generate a name if one is not specified.
-
-* `tags` - (map)
- A mapping of tags to apply to the machine.
-
-* `package` - (string, Required)
- The name of the package to use for provisioning.
-
-* `image` - (string, Required)
- The UUID of the image to provision.
-
-* `nic` - (list of NIC blocks, Optional)
- NICs associated with the machine. The fields allowed in a `NIC` block are defined below.
-
-* `firewall_enabled` - (boolean) Default: `false`
- Whether the cloud firewall should be enabled for this machine.
-
-* `root_authorized_keys` - (string)
- The public keys authorized for root access via SSH to the machine.
-
-* `user_data` - (string)
- Data to be copied to the machine on boot.
-
-* `user_script` - (string)
- The user script to run on boot (every boot on SmartMachines).
-
-* `administrator_pw` - (string)
- The initial password for the Administrator user. Only used for Windows virtual machines.
-
-* `cloud_config` - (string)
- Cloud-init configuration for Linux brand machines, used instead of `user_data`.
-
-The nested `nic` block supports the following:
-* `network` - (string, Optional)
- The network id to attach to the network interface. It will be hex, in the format: `xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx`.
-
-## Attribute Reference
-
-The following attributes are exported:
-
-* `id` - (string) - The identifier representing the firewall rule in Triton.
-* `type` - (string) - The type of the machine (`smartmachine` or `virtualmachine`).
-* `state` - (string) - The current state of the machine.
-* `dataset` - (string) - The dataset URN with which the machine was provisioned.
-* `memory` - (int) - The amount of memory the machine has (in Mb).
-* `disk` - (int) - The amount of disk the machine has (in Gb).
-* `ips` - (list of strings) - IP addresses of the machine.
-* `primaryip` - (string) - The primary (public) IP address for the machine.
-* `created` - (string) - The time at which the machine was created.
-* `updated` - (string) - The time at which the machine was last updated.
diff --git a/website/source/docs/providers/triton/r/triton_vlan.html.markdown b/website/source/docs/providers/triton/r/triton_vlan.html.markdown
deleted file mode 100644
index a7515f3f7..000000000
--- a/website/source/docs/providers/triton/r/triton_vlan.html.markdown
+++ /dev/null
@@ -1,36 +0,0 @@
----
-layout: "triton"
-page_title: "Triton: triton_vlan"
-sidebar_current: "docs-triton-resource-vlan"
-description: |-
- The `triton_vlan` resource represents an VLAN for a Triton account.
----
-
-# triton\_vlan
-
-The `triton_vlan` resource represents an Triton VLAN. A VLAN provides a low level way to segregate and subdivide the network. Traffic on one VLAN cannot, _on its own_, reach another VLAN.
-
-## Example Usages
-
-### Create a VLAN
-
-```hcl
-resource "triton_vlan" "dmz" {
- vlan_id = 100
- name = "dmz"
- description = "DMZ VLAN"
-}
-```
-
-## Argument Reference
-
-The following arguments are supported:
-
-* `vlan_id` - (int, Required, Change forces new resource)
- Number between 0-4095 indicating VLAN ID
-
-* `name` - (string, Required)
- Unique name to identify VLAN
-
-* `description` - (string, Optional)
- Description of the VLAN
diff --git a/website/source/docs/providers/ultradns/index.html.markdown b/website/source/docs/providers/ultradns/index.html.markdown
deleted file mode 100644
index 3d1a9b0f7..000000000
--- a/website/source/docs/providers/ultradns/index.html.markdown
+++ /dev/null
@@ -1,39 +0,0 @@
----
-layout: "ultradns"
-page_title: "Provider: UltraDNS"
-sidebar_current: "docs-ultradns-index"
-description: |-
- The UltraDNS provider is used to interact with the resources supported by UltraDNS. The provider needs to be configured with the proper credentials before it can be used.
----
-
-# UltraDNS Provider
-
-The UltraDNS provider is used to interact with the
-resources supported by UltraDNS. The provider needs to be configured
-with the proper credentials before it can be used.
-
-Use the navigation to the left to read about the available resources.
-
-## Example Usage
-
-```hcl
-# Configure the UltraDNS provider
-provider "ultradns" {
- username = "${var.ultradns_username}"
- password = "${var.ultradns_password}"
- baseurl = "https://test-restapi.ultradns.com/"
-}
-
-# Create a record
-resource "ultradns_record" "www" {
- # ...
-}
-```
-
-## Argument Reference
-
-The following arguments are supported:
-
-* `username` - (Required) The UltraDNS username. It must be provided, but it can also be sourced from the `ULTRADNS_USERNAME` environment variable.
-* `password` - (Required) The password associated with the username. It must be provided, but it can also be sourced from the `ULTRADNS_PASSWORD` environment variable.
-* `baseurl` - (Required) The base url for the UltraDNS REST API, but it can also be sourced from the `ULTRADNS_BASEURL` environment variable.
diff --git a/website/source/docs/providers/ultradns/r/dirpool.html.markdown b/website/source/docs/providers/ultradns/r/dirpool.html.markdown
deleted file mode 100644
index c0377ad9d..000000000
--- a/website/source/docs/providers/ultradns/r/dirpool.html.markdown
+++ /dev/null
@@ -1,75 +0,0 @@
----
-layout: "ultradns"
-page_title: "UltraDNS: ultradns_dirpool"
-sidebar_current: "docs-ultradns-resource-dirpool"
-description: |-
- Provides an UltraDNS Directional Controller pool resource.
----
-
-# ultradns\_dirpool
-
-Provides an UltraDNS Directional Controller pool resource.
-
-## Example Usage
-
-```hcl
-# Create a Directional Controller pool
-resource "ultradns_dirpool" "pool" {
- zone = "${var.ultradns_domain}"
- name = "terraform-dirpool"
- ttl = 300
- description = "Minimal DirPool"
-
- rdata {
- host = "192.168.0.10"
- }
-}
-```
-
-## Argument Reference
-
-See [related part of UltraDNS Docs](https://restapi.ultradns.com/v1/docs#post-rrset) for details about valid values.
-
-The following arguments are supported:
-
-* `zone` - (Required) The domain to add the record to
-* `name` - (Required) The name of the record
-- `type` - (Required) The Record Type of the record
-* `description` - (Required) Description of the Traffic Controller pool. Valid values are strings less than 256 characters.
-* `rdata` - (Required) a list of Record Data blocks, one for each member in the pool. Record Data documented below.
-* `ttl` - (Optional) The TTL of the record. Default: `3600`.
-* `conflict_resolve` - (Optional) String. Valid: `"GEO"` or `"IP"`. Default: `"GEO"`.
-* `no_response` - (Optional) a single Record Data block, without any `host` attribute. Record Data documented below.
-
-Record Data blocks support the following:
-
-* `host` - (Required in `rdata`, absent in `no_response`) IPv4 address or CNAME for the pool member.
-- `all_non_configured` - (Optional) Boolean. Default: `false`.
-- `geo_info` - (Optional) a single Geo Info block. Geo Info documented below.
-- `ip_info` - (Optional) a single IP Info block. IP Info documented below.
-
-
-Geo Info blocks support the following:
-
-- `name` - (Optional) String.
-- `is_account_level` - (Optional) Boolean. Default: `false`.
-- `codes` - (Optional) Set of geo code strings. Shorthand codes are expanded.
-
-IP Info blocks support the following:
-
-- `name` - (Optional) String.
-- `is_account_level` - (Optional) Boolean. Default: `false`.
-- `ips` - (Optional) Set of IP blocks. IP Info documented below.
-
-IP blocks support the following:
-- `start` - (Optional) String. IP Address. Must be paired with `end`. Conflicts with `cidr` or `address`.
-- `end` - (Optional) String. IP Address. Must be paired with `start`.
-- `cidr` - (Optional) String.
-- `address` - (Optional) String. IP Address.
-
-## Attributes Reference
-
-The following attributes are exported:
-
-* `id` - The record ID
-* `hostname` - The FQDN of the record
diff --git a/website/source/docs/providers/ultradns/r/probe_http.html.markdown b/website/source/docs/providers/ultradns/r/probe_http.html.markdown
deleted file mode 100644
index 80e7847c7..000000000
--- a/website/source/docs/providers/ultradns/r/probe_http.html.markdown
+++ /dev/null
@@ -1,102 +0,0 @@
----
-layout: "ultradns"
-page_title: "UltraDNS: ultradns_probe_http"
-sidebar_current: "docs-ultradns-resource-probe-http"
-description: |-
- Provides an UltraDNS HTTP probe
----
-
-# ultradns\_probe\_http
-
-Provides an UltraDNS HTTP probe
-
-## Example Usage
-
-```hcl
-resource "ultradns_probe_http" "probe" {
- zone = "${ultradns_tcpool.pool.zone}"
- name = "${ultradns_tcpool.pool.name}"
- pool_record = "10.2.1.1"
-
- agents = ["DALLAS", "AMSTERDAM"]
-
- interval = "ONE_MINUTE"
- threshold = 1
-
- http_probe {
- transaction {
- method = "POST"
- url = "http://localhost/index"
- transmitted_data = "{}"
- follow_redirects = true
-
- limit {
- name = "run"
-
- warning = 1
- critical = 2
- fail = 3
- }
-
- limit {
- name = "avgConnect"
-
- warning = 4
- critical = 5
- fail = 6
- }
-
- limit {
- name = "avgRun"
-
- warning = 7
- critical = 8
- fail = 9
- }
-
- limit {
- name = "connect"
-
- warning = 10
- critical = 11
- fail = 12
- }
- }
-
- total_limits {
- warning = 13
- critical = 14
- fail = 15
- }
- }
-}
-```
-
-## Argument Reference
-
-The following arguments are supported:
-
-* `zone` - (Required) The domain of the pool to probe.
-* `name` - (Required) The name of the pool to probe.
-- `pool_record` - (optional) IP address or domain. If provided, a record-level probe is created, otherwise a pool-level probe is created.
-- `agents` - (Required) List of locations that will be used for probing. One or more values must be specified. Valid values are `"NEW_YORK"`, `"PALO_ALTO"`, `"DALLAS"` & `"AMSTERDAM"`.
-- `threshold` - (Required) Number of agents that must agree for a probe state to be changed.
-- `http_probe` - (Required) an HTTP Probe block.
-- `interval` - (Optional) Length of time between probes in minutes. Valid values are `"HALF_MINUTE"`, `"ONE_MINUTE"`, `"TWO_MINUTES"`, `"FIVE_MINUTES"`, `"TEN_MINUTES"` & `"FIFTEEN_MINUTE"`. Default: `"FIVE_MINUTES"`.
-
-HTTP Probe block
-- `transaction` - (Optional) One or more Transaction blocks.
-- `total_limits` - (Optional) A Limit block, but with no `name` attribute.
-
-Transaction block
-- `method` - (Required) HTTP method. Valid values are`"GET"`, `"POST"`.
-- `url` - (Required) URL to probe.
-- `transmitted_data` - (Optional) Data to send to URL.
-- `follow_redirects` - (Optional) Whether to follow redirects.
-- `limit` - (Required) One or more Limit blocks. Only one limit block may exist for each name.
-
-Limit block
-- `name` - (Required) Kind of limit. Valid values are `"lossPercent"`, `"total"`, `"average"`, `"run"` & `"avgRun"`.
-- `warning` - (Optional) Amount to trigger a warning.
-- `critical` - (Optional) Amount to trigger a critical.
-- `fail` - (Optional) Amount to trigger a failure.
diff --git a/website/source/docs/providers/ultradns/r/probe_ping.html.markdown b/website/source/docs/providers/ultradns/r/probe_ping.html.markdown
deleted file mode 100644
index e680e255b..000000000
--- a/website/source/docs/providers/ultradns/r/probe_ping.html.markdown
+++ /dev/null
@@ -1,68 +0,0 @@
----
-layout: "ultradns"
-page_title: "UltraDNS: ultradns_probe_ping"
-sidebar_current: "docs-ultradns-resource-probe-ping"
-description: |-
- Provides an UltraDNS Ping Probe
----
-
-# ultradns\_probe\_ping
-
-Provides an UltraDNS ping probe
-
-## Example Usage
-
-```hcl
-resource "ultradns_probe_ping" "probe" {
- zone = "${ultradns_tcpool.pool.zone}"
- name = "${ultradns_tcpool.pool.name}"
- pool_record = "10.3.0.1"
-
- agents = ["DALLAS", "AMSTERDAM"]
-
- interval = "ONE_MINUTE"
- threshold = 1
-
- ping_probe {
- packets = 15
- packet_size = 56
-
- limit {
- name = "lossPercent"
- warning = 1
- critical = 2
- fail = 3
- }
-
- limit {
- name = "total"
- warning = 2
- critical = 3
- fail = 4
- }
- }
-}
-```
-
-## Argument Reference
-
-The following arguments are supported:
-
-* `zone` - (Required) The domain of the pool to probe.
-* `name` - (Required) The name of the pool to probe.
-- `pool_record` - (optional) IP address or domain. If provided, a record-level probe is created, otherwise a pool-level probe is created.
-- `agents` - (Required) List of locations that will be used for probing. One or more values must be specified. Valid values are `"NEW_YORK"`, `"PALO_ALTO"`, `"DALLAS"` & `"AMSTERDAM"`.
-- `threshold` - (Required) Number of agents that must agree for a probe state to be changed.
-- `ping_probe` - (Required) a Ping Probe block.
-- `interval` - (Optional) Length of time between probes in minutes. Valid values are `"HALF_MINUTE"`, `"ONE_MINUTE"`, `"TWO_MINUTES"`, `"FIVE_MINUTES"`, `"TEN_MINUTES"` & `"FIFTEEN_MINUTE"`. Default: `"FIVE_MINUTES"`.
-
-Ping Probe block
-- `packets` - (Optional) Number of ICMP packets to send. Default `3`.
-- `packet_size` - (Optional) Size of packets in bytes. Default `56`.
-- `limit` - (Required) One or more Limit blocks. Only one limit block may exist for each name.
-
-Limit block
-- `name` - (Required) Kind of limit. Valid values are `"lossPercent"`, `"total"`, `"average"`, `"run"` & `"avgRun"`.
-- `warning` - (Optional) Amount to trigger a warning.
-- `critical` - (Optional) Amount to trigger a critical.
-- `fail` - (Optional) Amount to trigger a failure.
diff --git a/website/source/docs/providers/ultradns/r/rdpool.html.markdown b/website/source/docs/providers/ultradns/r/rdpool.html.markdown
deleted file mode 100644
index 80f9a3e55..000000000
--- a/website/source/docs/providers/ultradns/r/rdpool.html.markdown
+++ /dev/null
@@ -1,46 +0,0 @@
----
-layout: "ultradns"
-page_title: "UltraDNS: ultradns_rdpool"
-sidebar_current: "docs-ultradns-resource-rdpool"
-description: |-
- Provides an UltraDNS Resource Distribution pool resource.
----
-
-# ultradns\_rdpool
-
-Provides an UltraDNS Resource Distribution (RD) pool resource, which are
-used to define rules for returning multiple A or AAAA records for a given owner name. Ordering can be FIXED, RANDOM or ROUND_ROBIN.
-
-## Example Usage
-```
-# Create a Resource Distribution pool
-
-resource "ultradns_rdpool" "pool" {
- zone = "${var.ultradns_domain}"
- name = "terraform-rdpool"
- ttl = 600
- description = "Example RD Pool"
- order = "ROUND_ROBIN"
- rdata = [ "192.168.0.10", "192.168.0.11" ]
-}
-```
-
-## Argument Reference
-
-See [related part of UltraDNS Docs](https://restapi.ultradns.com/v1/docs#post-rrset) for details about valid values.
-
-The following arguments are supported:
-
-* `zone` - (Required) The domain to add the record to
-* `name` - (Required) The name of the record
-* `rdata` - (Required) list ip addresses.
-* `order` - (Optional) Ordering rule, one of FIXED, RANDOM or ROUND_ROBIN. Default: 'ROUND_ROBIN'.
-* `description` - (Optional) Description of the Resource Distribution pool. Valid values are strings less than 256 characters.
-* `ttl` - (Optional) The TTL of the pool in seconds. Default: `3600`.
-
-## Attributes Reference
-
-The following attributes are exported:
-
-* `id` - The record ID
-* `hostname` - The FQDN of the record
diff --git a/website/source/docs/providers/ultradns/r/record.html.markdown b/website/source/docs/providers/ultradns/r/record.html.markdown
deleted file mode 100644
index ca8da9dfb..000000000
--- a/website/source/docs/providers/ultradns/r/record.html.markdown
+++ /dev/null
@@ -1,48 +0,0 @@
----
-layout: "ultradns"
-page_title: "UltraDNS: ultradns_record"
-sidebar_current: "docs-ultradns-resource-record"
-description: |-
- Provides an UltraDNS record resource.
----
-
-# ultradns\_record
-
-Provides an UltraDNS record resource.
-
-## Example Usage
-
-```hcl
-# Add a record to the domain
-resource "ultradns_record" "foobar" {
- zone = "${var.ultradns_domain}"
- name = "terraform"
- rdata = ["192.168.0.11"]
- type = "A"
- ttl = 3600
-}
-```
-
-## Argument Reference
-
-See [related part of UltraDNS Docs](https://restapi.ultradns.com/v1/docs#post-rrset) for details about valid values.
-
-The following arguments are supported:
-
-* `zone` - (Required) The domain to add the record to
-* `name` - (Required) The name of the record
-* `rdata` - (Required) An array containing the values of the record
-* `type` - (Required) The type of the record
-* `ttl` - (Optional) The TTL of the record
-
-## Attributes Reference
-
-The following attributes are exported:
-
-* `id` - The record ID
-* `name` - The name of the record
-* `rdata` - An array containing the values of the record
-* `type` - The type of the record
-* `ttl` - The TTL of the record
-* `zone` - The domain of the record
-* `hostname` - The FQDN of the record
diff --git a/website/source/docs/providers/ultradns/r/tcpool.html.markdown b/website/source/docs/providers/ultradns/r/tcpool.html.markdown
deleted file mode 100644
index b9295cfe6..000000000
--- a/website/source/docs/providers/ultradns/r/tcpool.html.markdown
+++ /dev/null
@@ -1,61 +0,0 @@
----
-layout: "ultradns"
-page_title: "UltraDNS: ultradns_tcpool"
-sidebar_current: "docs-ultradns-resource-tcpool"
-description: |-
- Provides an UltraDNS Traffic Controller pool resource.
----
-
-# ultradns\_tcpool
-
-Provides an UltraDNS Traffic Controller pool resource.
-
-## Example Usage
-
-```hcl
-# Create a Traffic Controller pool
-resource "ultradns_tcpool" "pool" {
- zone = "${var.ultradns_domain}"
- name = "terraform-tcpool"
- ttl = 300
- description = "Minimal TC Pool"
-
- rdata {
- host = "192.168.0.10"
- }
-}
-```
-
-## Argument Reference
-
-See [related part of UltraDNS Docs](https://restapi.ultradns.com/v1/docs#post-rrset) for details about valid values.
-
-The following arguments are supported:
-
-* `zone` - (Required) The domain to add the record to
-* `name` - (Required) The name of the record
-* `rdata` - (Required) a list of rdata blocks, one for each member in the pool. Record Data documented below.
-* `description` - (Required) Description of the Traffic Controller pool. Valid values are strings less than 256 characters.
-* `ttl` - (Optional) The TTL of the record. Default: `3600`.
-* `run_probes` - (Optional) Boolean to run probes for this pool. Default: `true`.
-* `act_on_probes` - (Optional) Boolean to enable and disable pool records when probes are run. Default: `true`.
-* `max_to_lb` - (Optional) Determines the number of records to balance between. Valid values are integers `0` - `len(rdata)`. Default: `0`.
-* `backup_record_rdata` - (Optional) IPv4 address or CNAME for the backup record. Default: `nil`.
-* `backup_record_failover_delay` - (Optional) Time in minutes that Traffic Controller waits after detecting that the pool record has failed before activating primary records. Valid values are integers `0` - `30`. Default: `0`.
-
-Record Data blocks support the following:
-
-* `host` - (Required) IPv4 address or CNAME for the pool member.
-* `failover_delay` - (Optional) Time in minutes that Traffic Controller waits after detecting that the pool record has failed before activating secondary records. `0` will activate the secondary records immediately. Integer. Range: `0` - `30`. Default: `0`.
-* `priority` - (Optional) Indicates the serving preference for this pool record. Valid values are integers `1` or greater. Default: `1`.
-* `run_probes` - (Optional) Whether probes are run for this pool record. Boolean. Default: `true`.
-* `state` - (Optional) Current state of the pool record. String. Must be one of `"NORMAL"`, `"ACTIVE"`, or `"INACTIVE"`. Default: `"NORMAL"`.
-* `threshold` - (Optional) How many probes must agree before the record state is changed. Valid values are integers `1` - `len(probes)`. Default: `1`.
-* `weight` - (Optional) Traffic load to send to each server in the Traffic Controller pool. Valid values are integers `2` - `100`. Default: `2`
-
-## Attributes Reference
-
-The following attributes are exported:
-
-* `id` - The record ID
-* `hostname` - The FQDN of the record
diff --git a/website/source/docs/providers/vault/d/generic_secret.html.md b/website/source/docs/providers/vault/d/generic_secret.html.md
deleted file mode 100644
index c6ef7fecf..000000000
--- a/website/source/docs/providers/vault/d/generic_secret.html.md
+++ /dev/null
@@ -1,81 +0,0 @@
----
-layout: "vault"
-page_title: "Vault: vault_generic_secret data source"
-sidebar_current: "docs-vault-datasource-generic-secret"
-description: |-
- Reads arbitrary data from a given path in Vault
----
-
-# vault\_generic\_secret
-
-Reads arbitrary data from a given path in Vault.
-
-This resource is primarily intended to be used with
-[Vault's "generic" secret backend](https://www.vaultproject.io/docs/secrets/generic/index.html),
-but it is also compatible with any other Vault endpoint that supports
-the `vault read` command.
-
-~> **Important** All data retrieved from Vault will be
-written in cleartext to state file generated by Terraform, will appear in
-the console output when Terraform runs, and may be included in plan files
-if secrets are interpolated into any resource attributes.
-Protect these artifacts accordingly. See
-[the main provider documentation](../index.html)
-for more details.
-
-## Example Usage
-
-```hcl
-data "vault_generic_secret" "rundeck_auth" {
- path = "secret/rundeck_auth"
-}
-
-# Rundeck Provider, for example
-provider "rundeck" {
- url = "http://rundeck.example.com/"
- auth_token = "${data.vault_generic_secret.rundeck_auth.data["auth_token"]}"
-}
-```
-
-## Argument Reference
-
-The following arguments are supported:
-
-* `path` - (Required) The full logical path from which to request data.
-To read data from the "generic" secret backend mounted in Vault by
-default, this should be prefixed with `secret/`. Reading from other backends
-with this data source is possible; consult each backend's documentation
-to see which endpoints support the `GET` method.
-
-## Required Vault Capabilities
-
-Use of this resource requires the `read` capability on the given path.
-
-## Attributes Reference
-
-The following attributes are exported:
-
-* `data_json` - A string containing the full data payload retrieved from
-Vault, serialized in JSON format.
-
-* `data` - A mapping whose keys are the top-level data keys returned from
-Vault and whose values are the corresponding values. This map can only
-represent string data, so any non-string values returned from Vault are
-serialized as JSON.
-
-* `lease_id` - The lease identifier assigned by Vault, if any.
-
-* `lease_duration` - The duration of the secret lease, in seconds relative
-to the time the data was requested. Once this time has passed any plan
-generated with this data may fail to apply.
-
-* `lease_start_time` - As a convenience, this records the current time
-on the computer where Terraform is running when the data is requested.
-This can be used to approximate the absolute time represented by
-`lease_duration`, though users must allow for any clock drift and response
-latency relative to to the Vault server.
-
-* `lease_renewable` - `true` if the lease can be renewed using Vault's
-`sys/renew/{lease-id}` endpoint. Terraform does not currently support lease
-renewal, and so it will request a new lease each time this data source is
-refreshed.
diff --git a/website/source/docs/providers/vault/index.html.markdown b/website/source/docs/providers/vault/index.html.markdown
deleted file mode 100644
index d58195d72..000000000
--- a/website/source/docs/providers/vault/index.html.markdown
+++ /dev/null
@@ -1,151 +0,0 @@
----
-layout: "vault"
-page_title: "Provider: Vault"
-sidebar_current: "docs-vault-index"
-description: |-
- The Vault provider allows Terraform to read from, write to, and configure Hashicorp Vault
----
-
-# Vault Provider
-
-The Vault provider allows Terraform to read from, write to, and configure
-[Hashicorp Vault](https://vaultproject.io/).
-
-~> **Important** Interacting with Vault from Terraform causes any secrets
-that you read and write to be persisted in both Terraform's state file
-*and* in any generated plan files. For any Terraform module that reads or
-writes Vault secrets, these files should be treated as sensitive and
-protected accordingly.
-
-This provider serves two pretty-distinct use-cases, which each have their
-own security trade-offs and caveats that are covered in the sections that
-follow. Consider these carefully before using this provider within your
-Terraform configuration.
-
-## Configuring and Populating Vault
-
-Terraform can be used by the Vault adminstrators to configure Vault and
-populate it with secrets. In this case, the state and any plans associated
-with the configuration must be stored and communicated with care, since they
-will contain in cleartext any values that were written into Vault.
-
-Currently Terraform has no mechanism to redact or protect secrets
-that are provided via configuration, so teams choosing to use Terraform
-for populating Vault secrets should pay careful attention to the notes
-on each resource's documentation page about how any secrets are persisted
-to the state and consider carefully whether such usage is compatible with
-their security policies.
-
-Except as otherwise noted, the resources that write secrets into Vault are
-designed such that they require only the *create* and *update* capabilities
-on the relevant resources, so that distinct tokens can be used for reading
-vs. writing and thus limit the exposure of a compromised token.
-
-## Using Vault credentials in Terraform configuration
-
-Most Terraform providers require credentials to interact with a third-party
-service that they wrap. This provider allows such credentials to be obtained
-from Vault, which means that operators or systems running Terraform need
-only access to a suitably-privileged Vault token in order to temporarily
-lease the credentials for other providers.
-
-Currently Terraform has no mechanism to redact or protect secrets that
-are returned via data sources, so secrets read via this provider will be
-persisted into the Terraform state, into any plan files, and in some cases
-in the console output produced while planning and applying. These artifacts
-must therefore all be protected accordingly.
-
-To reduce the exposure of such secrets, the provider requests a Vault token
-with a relatively-short TTL (20 minutes, by default) which in turn means
-that where possible Vault will revoke any issued credentials after that
-time, but in particular it is unable to retract any static secrets such as
-those stored in Vault's "generic" secret backend.
-
-The requested token TTL can be controlled by the `max_lease_ttl_seconds`
-provider argument described below. It is important to consider that Terraform
-reads from data sources during the `plan` phase and writes the result into
-the plan. Thus a subsequent `apply` will likely fail if it is run after the
-intermediate token has expired, due to the revocation of the secrets that
-are stored in the plan.
-
-Except as otherwise noted, the resources that read secrets from Vault
-are designed such that they require only the *read* capability on the relevant
-resources.
-
-## Provider Arguments
-
-The provider configuration block accepts the following arguments.
-In most cases it is recommended to set them via the indicated environment
-variables in order to keep credential information out of the configuration.
-
-* `address` - (Required) Origin URL of the Vault server. This is a URL
- with a scheme, a hostname and a port but with no path. May be set
- via the `VAULT_ADDR` environment variable.
-
-* `token` - (Required) Vault token that will be used by Terraform to
- authenticate. May be set via the `VAULT_TOKEN` environment variable.
- If none is otherwise supplied, Terraform will attempt to read it from
- `~/.vault-token` (where the vault command stores its current token).
- Terraform will issue itself a new token that is a child of the one given,
- with a short TTL to limit the exposure of any requested secrets.
-
-* `ca_cert_file` - (Optional) Path to a file on local disk that will be
- used to validate the certificate presented by the Vault server.
- May be set via the `VAULT_CACERT` environment variable.
-
-* `ca_cert_dir` - (Optional) Path to a directory on local disk that
- contains one or more certificate files that will be used to validate
- the certificate presented by the Vault server. May be set via the
- `VAULT_CAPATH` environment variable.
-
-* `client_auth` - (Optional) A configuration block, described below, that
- provides credentials used by Terraform to authenticate with the Vault
- server. At present there is little reason to set this, because Terraform
- does not support the TLS certificate authentication mechanism.
-
-* `skip_tls_verify` - (Optional) Set this to `true` to disable verification
- of the Vault server's TLS certificate. This is strongly discouraged except
- in prototype or development environments, since it exposes the possibility
- that Terraform can be tricked into writing secrets to a server controlled
- by an intruder. May be set via the `VAULT_SKIP_VERIFY` environment variable.
-
-* `max_lease_ttl_seconds` - (Optional) Used as the duration for the
- intermediate Vault token Terraform issues itself, which in turn limits
- the duration of secret leases issued by Vault. Defaults to 20 minutes
- and may be set via the `TERRAFORM_VAULT_MAX_TTL` environment variable.
- See the section above on *Using Vault credentials in Terraform configuration*
- for the implications of this setting.
-
-The `client_auth` configuration block accepts the following arguments:
-
-* `cert_file` - (Required) Path to a file on local disk that contains the
- PEM-encoded certificate to present to the server.
-
-* `key_file` - (Required) Path to a file on local disk that contains the
- PEM-encoded private key for which the authentication certificate was issued.
-
-## Example Usage
-
-```hcl
-provider "vault" {
- # It is strongly recommended to configure this provider through the
- # environment variables described above, so that each user can have
- # separate credentials set in the environment.
- #
- # This will default to using $VAULT_ADDR
- # But can be set explicitly
- # address = "https://vault.example.net:8200"
-}
-
-resource "vault_generic_secret" "example" {
- path = "secret/foo"
-
- data_json = < **Important** All data provided in the resource configuration will be
-written in cleartext to state and plan files generated by Terraform, and
-will appear in the console output when Terraform runs. Protect these
-artifacts accordingly. See
-[the main provider documentation](../index.html)
-for more details.
-
-## Example Usage
-
-```hcl
-resource "vault_generic_secret" "example" {
- path = "secret/foo"
-
- data_json = < **NOTE:** The VMware vCloud Director Provider currently represents _initial support_ and therefore may undergo significant changes as the community improves it.
-
-## Example Usage
-
-```hcl
-# Configure the VMware vCloud Director Provider
-provider "vcd" {
- user = "${var.vcd_user}"
- password = "${var.vcd_pass}"
- org = "${var.vcd_org}"
- url = "${var.vcd_url}"
- vdc = "${var.vcd_vdc}"
- maxRetryTimeout = "${var.vcd_maxRetryTimeout}"
- allow_unverified_ssl = "${var.vcd_allow_unverified_ssl}"
-}
-
-# Create a new network
-resource "vcd_network" "net" {
- # ...
-}
-```
-
-## Argument Reference
-
-The following arguments are used to configure the VMware vCloud Director Provider:
-
-* `user` - (Required) This is the username for vCloud Director API operations. Can also
- be specified with the `VCD_USER` environment variable.
-* `password` - (Required) This is the password for vCloud Director API operations. Can
- also be specified with the `VCD_PASSWORD` environment variable.
-* `org` - (Required) This is the vCloud Director Org on which to run API
- operations. Can also be specified with the `VCD_ORG` environment
- variable.
-* `url` - (Required) This is the URL for the vCloud Director API endpoint. e.g.
- https://server.domain.com/api. Can also be specified with the `VCD_URL` environment variable.
-* `vdc` - (Optional) This is the virtual datacenter within vCloud Director to run
- API operations against. If not set the plugin will select the first virtual
- datacenter available to your Org. Can also be specified with the `VCD_VDC` environment
- variable.
-* `maxRetryTimeout` - (Optional) This provides you with the ability to specify the maximum
- amount of time (in seconds) you are prepared to wait for interactions on resources managed
- by vCloud Director to be successful. If a resource action fails, the action will be retried
- (as long as it is still within the `maxRetryTimeout` value) to try and ensure success.
- Defaults to 60 seconds if not set.
- Can also be specified with the `VCD_MAX_RETRY_TIMEOUT` environment variable.
-* `allow_unverified_ssl` - (Optional) Boolean that can be set to true to
- disable SSL certificate verification. This should be used with care as it
- could allow an attacker to intercept your auth token. If omitted, default
- value is false. Can also be specified with the
- `VCD_ALLOW_UNVERIFIED_SSL` environment variable.
diff --git a/website/source/docs/providers/vcd/r/dnat.html.markdown b/website/source/docs/providers/vcd/r/dnat.html.markdown
deleted file mode 100644
index 6f18fb9af..000000000
--- a/website/source/docs/providers/vcd/r/dnat.html.markdown
+++ /dev/null
@@ -1,32 +0,0 @@
----
-layout: "vcd"
-page_title: "vCloudDirector: vcd_dnat"
-sidebar_current: "docs-vcd-resource-dnat"
-description: |-
- Provides a vCloud Director DNAT resource. This can be used to create, modify, and delete destination NATs to map external IPs to a VM.
----
-
-# vcd\_dnat
-
-Provides a vCloud Director DNAT resource. This can be used to create, modify,
-and delete destination NATs to map an external IP/port to a VM.
-
-## Example Usage
-
-```hcl
-resource "vcd_dnat" "web" {
- edge_gateway = "Edge Gateway Name"
- external_ip = "78.101.10.20"
- port = 80
- internal_ip = "10.10.0.5"
-}
-```
-
-## Argument Reference
-
-The following arguments are supported:
-
-* `edge_gateway` - (Required) The name of the edge gateway on which to apply the DNAT
-* `external_ip` - (Required) One of the external IPs available on your Edge Gateway
-* `port` - (Required) The port number to map
-* `internal_ip` - (Required) The IP of the VM to map to
diff --git a/website/source/docs/providers/vcd/r/edgegateway_vpn.html.markdown b/website/source/docs/providers/vcd/r/edgegateway_vpn.html.markdown
deleted file mode 100644
index 2096016c5..000000000
--- a/website/source/docs/providers/vcd/r/edgegateway_vpn.html.markdown
+++ /dev/null
@@ -1,88 +0,0 @@
----
-layout: "vcd"
-page_title: "vCloudDirector: vcd_edgegateway_vpn"
-sidebar_current: "docs-vcd-resource-edgegateway-vpn"
-description: |-
- Provides a vCloud Director IPsec VPN. This can be used to create, modify, and delete VPN settings and rules.
----
-
-# vcd\_edgegateway\_vpn
-
-Provides a vCloud Director IPsec VPN. This can be used to create,
-modify, and delete VPN settings and rules.
-
-## Example Usage
-
-```
-resource "vcd_edgegateway_vpn" "vpn" {
- edge_gateway = "Internet_01(nti0000bi2_123-456-2)"
- name = "west-to-east"
- description = "Description"
- encryption_protocol = "AES256"
- mtu = 1400
- peer_id = "64.121.123.11"
- peer_ip_address = "64.121.123.11"
- local_id = "64.121.123.10"
- local_ip_address = "64.121.123.10"
- shared_secret = "***********************"
-
- peer_subnets {
- peer_subnet_name = "DMZ_WEST"
- peer_subnet_gateway = "10.0.10.1"
- peer_subnet_mask = "255.255.255.0"
- }
-
- peer_subnets {
- peer_subnet_name = "WEB_WEST"
- peer_subnet_gateway = "10.0.20.1"
- peer_subnet_mask = "255.255.255.0"
- }
-
- local_subnets {
- local_subnet_name = "DMZ_EAST"
- local_subnet_gateway = "10.0.1.1"
- local_subnet_mask = "255.255.255.0"
- }
-
- local_subnets {
- local_subnet_name = "WEB_EAST"
- local_subnet_gateway = "10.0.22.1"
- local_subnet_mask = "255.255.255.0"
- }
-}
-```
-
-## Argument Reference
-
-The following arguments are supported:
-
-* `edge_gateway` - (Required) The name of the edge gateway on which to apply the Firewall Rules
-* `name` - (Required) The name of the VPN
-* `description` - (Required) A description for the VPN
-* `encryption_protocol` - (Required) - E.g. `AES256`
-* `local_ip_address` - (Required) - Local IP Address
-* `local_id` - (Required) - Local ID
-* `mtu` - (Required) - The MTU setting
-* `peer_ip_address` - (Required) - Peer IP Address
-* `peer_id` - (Required) - Peer ID
-* `shared_secret` - (Required) - Shared Secret
-* `local_subnets` - (Required) - List of Local Subnets see [Local Subnets](#localsubnets) below for details.
-* `peer_subnets` - (Required) - List of Peer Subnets see [Peer Subnets](#peersubnets) below for details.
-
-
-## Local Subnets
-
-Each Local Subnet supports the following attributes:
-
-* `local_subnet_name` - (Required) Name of the local subnet
-* `local_subnet_gateway` - (Required) Gateway of the local subnet
-* `local_subnet_mask` - (Required) Subnet mask of the local subnet
-
-
-## Peer Subnets
-
-Each Peer Subnet supports the following attributes:
-
-* `peer_subnet_name` - (Required) Name of the peer subnet
-* `peer_subnet_gateway` - (Required) Gateway of the peer subnet
-* `peer_subnet_mask` - (Required) Subnet mask of the peer subnet
\ No newline at end of file
diff --git a/website/source/docs/providers/vcd/r/firewall_rules.html.markdown b/website/source/docs/providers/vcd/r/firewall_rules.html.markdown
deleted file mode 100644
index a25a288fb..000000000
--- a/website/source/docs/providers/vcd/r/firewall_rules.html.markdown
+++ /dev/null
@@ -1,81 +0,0 @@
----
-layout: "vcd"
-page_title: "vCloudDirector: vcd_firewall_rules"
-sidebar_current: "docs-vcd-resource-firewall-rules"
-description: |-
- Provides a vCloud Director Firewall resource. This can be used to create, modify, and delete firewall settings and rules.
----
-
-# vcd\_firewall\_rules
-
-Provides a vCloud Director Firewall resource. This can be used to create,
-modify, and delete firewall settings and rules.
-
-## Example Usage
-
-```hcl
-resource "vcd_firewall_rules" "fw" {
- edge_gateway = "Edge Gateway Name"
- default_action = "drop"
-
- rule {
- description = "deny-ftp-out"
- policy = "deny"
- protocol = "tcp"
- destination_port = "21"
- destination_ip = "any"
- source_port = "any"
- source_ip = "10.10.0.0/24"
- }
-
- rule {
- description = "allow-outbound"
- policy = "allow"
- protocol = "any"
- destination_port = "any"
- destination_ip = "any"
- source_port = "any"
- source_ip = "10.10.0.0/24"
- }
-}
-
-resource "vcd_vapp" "web" {
- # ...
-}
-
-resource "vcd_firewall_rules" "fw-web" {
- edge_gateway = "Edge Gateway Name"
- default_action = "drop"
-
- rule {
- description = "allow-web"
- policy = "allow"
- protocol = "tcp"
- destination_port = "80"
- destination_ip = "${vcd_vapp.web.ip}"
- source_port = "any"
- source_ip = "any"
- }
-}
-```
-
-## Argument Reference
-
-The following arguments are supported:
-
-* `edge_gateway` - (Required) The name of the edge gateway on which to apply the Firewall Rules
-* `default_action` - (Required) Either "allow" or "deny". Specifies what to do should none of the rules match
-* `rule` - (Optional) Configures a firewall rule; see [Rules](#rules) below for details.
-
-
-## Rules
-
-Each firewall rule supports the following attributes:
-
-* `description` - (Required) Description of the fireall rule
-* `policy` - (Required) Specifies what to do when this rule is matched. Either "allow" or "deny"
-* `protocol` - (Required) The protocol to match. One of "tcp", "udp", "icmp" or "any"
-* `destination_port` - (Required) The destination port to match. Either a port number or "any"
-* `destination_ip` - (Required) The destination IP to match. Either an IP address, IP range or "any"
-* `source_port` - (Required) The source port to match. Either a port number or "any"
-* `source_ip` - (Required) The source IP to match. Either an IP address, IP range or "any"
diff --git a/website/source/docs/providers/vcd/r/network.html.markdown b/website/source/docs/providers/vcd/r/network.html.markdown
deleted file mode 100644
index 85aafceaf..000000000
--- a/website/source/docs/providers/vcd/r/network.html.markdown
+++ /dev/null
@@ -1,56 +0,0 @@
----
-layout: "vcd"
-page_title: "vCloudDirector: vcd_network"
-sidebar_current: "docs-vcd-resource-network"
-description: |-
- Provides a vCloud Director VDC Network. This can be used to create, modify, and delete internal networks for vApps to connect.
----
-
-# vcd\_network
-
-Provides a vCloud Director VDC Network. This can be used to create,
-modify, and delete internal networks for vApps to connect.
-
-## Example Usage
-
-```hcl
-resource "vcd_network" "net" {
- name = "my-net"
- edge_gateway = "Edge Gateway Name"
- gateway = "10.10.0.1"
-
- dhcp_pool {
- start_address = "10.10.0.2"
- end_address = "10.10.0.100"
- }
-
- static_ip_pool {
- start_address = "10.10.0.152"
- end_address = "10.10.0.254"
- }
-}
-```
-
-## Argument Reference
-
-The following arguments are supported:
-
-* `name` - (Required) A unique name for the network
-* `edge_gateway` - (Required) The name of the edge gateway
-* `netmask` - (Optional) The netmask for the new network. Defaults to `255.255.255.0`
-* `gateway` (Required) The gateway for this network
-* `dns1` - (Optional) First DNS server to use. Defaults to `8.8.8.8`
-* `dns2` - (Optional) Second DNS server to use. Defaults to `8.8.4.4`
-* `dns_suffix` - (Optional) A FQDN for the virtual machines on this network
-* `dhcp_pool` - (Optional) A range of IPs to issue to virtual machines that don't
- have a static IP; see [IP Pools](#ip-pools) below for details.
-* `static_ip_pool` - (Optional) A range of IPs permitted to be used as static IPs for
- virtual machines; see [IP Pools](#ip-pools) below for details.
-
-
-## IP Pools
-
-Network interfaces support the following attributes:
-
-* `start_address` - (Required) The first address in the IP Range
-* `end_address` - (Required) The final address in the IP Range
diff --git a/website/source/docs/providers/vcd/r/snat.html.markdown b/website/source/docs/providers/vcd/r/snat.html.markdown
deleted file mode 100644
index 6c9903aca..000000000
--- a/website/source/docs/providers/vcd/r/snat.html.markdown
+++ /dev/null
@@ -1,30 +0,0 @@
----
-layout: "vcd"
-page_title: "vCloudDirector: vcd_snat"
-sidebar_current: "docs-vcd-resource-snat"
-description: |-
- Provides a vCloud Director SNAT resource. This can be used to create, modify, and delete source NATs to allow vApps to send external traffic.
----
-
-# vcd\_snat
-
-Provides a vCloud Director SNAT resource. This can be used to create, modify,
-and delete source NATs to allow vApps to send external traffic.
-
-## Example Usage
-
-```hcl
-resource "vcd_snat" "outbound" {
- edge_gateway = "Edge Gateway Name"
- external_ip = "78.101.10.20"
- internal_ip = "10.10.0.0/24"
-}
-```
-
-## Argument Reference
-
-The following arguments are supported:
-
-* `edge_gateway` - (Required) The name of the edge gateway on which to apply the SNAT
-* `external_ip` - (Required) One of the external IPs available on your Edge Gateway
-* `internal_ip` - (Required) The IP or IP Range of the VM(s) to map from
diff --git a/website/source/docs/providers/vcd/r/vapp.html.markdown b/website/source/docs/providers/vcd/r/vapp.html.markdown
deleted file mode 100644
index bed73fba9..000000000
--- a/website/source/docs/providers/vcd/r/vapp.html.markdown
+++ /dev/null
@@ -1,60 +0,0 @@
----
-layout: "vcd"
-page_title: "vCloudDirector: vcd_vapp"
-sidebar_current: "docs-vcd-resource-vapp"
-description: |-
- Provides a vCloud Director vApp resource. This can be used to create, modify, and delete vApps.
----
-
-# vcd\_vapp
-
-Provides a vCloud Director vApp resource. This can be used to create,
-modify, and delete vApps.
-
-## Example Usage
-
-```hcl
-resource "vcd_network" "net" {
- # ...
-}
-
-resource "vcd_vapp" "web" {
- name = "web"
- catalog_name = "Boxes"
- template_name = "lampstack-1.10.1-ubuntu-10.04"
- memory = 2048
- cpus = 1
-
- network_name = "${vcd_network.net.name}"
- network_href = "${vcd_network.net.href}"
- ip = "10.10.104.160"
-
- metadata {
- role = "web"
- env = "staging"
- version = "v1"
- }
-}
-```
-
-## Argument Reference
-
-The following arguments are supported:
-
-* `name` - (Required) A unique name for the vApp
-* `catalog_name` - (Required) The catalog name in which to find the given vApp Template
-* `template_name` - (Required) The name of the vApp Template to use
-* `memory` - (Optional) The amount of RAM (in MB) to allocate to the vApp
-* `cpus` - (Optional) The number of virtual CPUs to allocate to the vApp
-* `initscript` (Optional) A script to be run only on initial boot
-* `network_name` - (Required) Name of the network this vApp should join
-* `network_href` - (Optional) The vCloud Director generated href of the network this vApp
- should join. If empty it will use the network name and query vCloud Director to discover
- this
-* `ip` - (Optional) The IP to assign to this vApp. Must be an IP address or
- one of dhcp, allocated or none. If given the address must be within the
- `static_ip_pool` set for the network. If left blank, and the network has
- `dhcp_pool` set with at least one available IP then this will be set with
- DHCP.
-* `metadata` - (Optional) Key value map of metadata to assign to this vApp
-* `power_on` - (Optional) A boolean value stating if this vApp should be powered on. Default to `true`
diff --git a/website/source/docs/providers/vsphere/index.html.markdown b/website/source/docs/providers/vsphere/index.html.markdown
deleted file mode 100644
index 46fdaee55..000000000
--- a/website/source/docs/providers/vsphere/index.html.markdown
+++ /dev/null
@@ -1,200 +0,0 @@
----
-layout: "vsphere"
-page_title: "Provider: VMware vSphere"
-sidebar_current: "docs-vsphere-index"
-description: |-
- The VMware vSphere provider is used to interact with the resources supported by
- VMware vSphere. The provider needs to be configured with the proper credentials
- before it can be used.
----
-
-# VMware vSphere Provider
-
-The VMware vSphere provider is used to interact with the resources supported by
-VMware vSphere.
-The provider needs to be configured with the proper credentials before it can be used.
-
-Use the navigation to the left to read about the available resources.
-
-~> **NOTE:** The VMware vSphere Provider currently represents _initial support_
-and therefore may undergo significant changes as the community improves it. This
-provider at this time only supports IPv4 addresses on virtual machines.
-
-## Example Usage
-
-```hcl
-# Configure the VMware vSphere Provider
-provider "vsphere" {
- user = "${var.vsphere_user}"
- password = "${var.vsphere_password}"
- vsphere_server = "${var.vsphere_server}"
-
- # if you have a self-signed cert
- allow_unverified_ssl = true
-}
-
-# Create a folder
-resource "vsphere_folder" "frontend" {
- path = "frontend"
-}
-
-# Create a file
-resource "vsphere_file" "ubuntu_disk" {
- datastore = "local"
- source_file = "/home/ubuntu/my_disks/custom_ubuntu.vmdk"
- destination_file = "/my_path/disks/custom_ubuntu.vmdk"
-}
-
-# Create a disk image
-resource "vsphere_virtual_disk" "extraStorage" {
- size = 2
- vmdk_path = "myDisk.vmdk"
- datacenter = "Datacenter"
- datastore = "local"
-}
-
-# Create a virtual machine within the folder
-resource "vsphere_virtual_machine" "web" {
- name = "terraform-web"
- folder = "${vsphere_folder.frontend.path}"
- vcpu = 2
- memory = 4096
-
- network_interface {
- label = "VM Network"
- }
-
- disk {
- template = "centos-7"
- }
-}
-```
-
-## Argument Reference
-
-The following arguments are used to configure the VMware vSphere Provider:
-
-* `user` - (Required) This is the username for vSphere API operations. Can also
- be specified with the `VSPHERE_USER` environment variable.
-* `password` - (Required) This is the password for vSphere API operations. Can
- also be specified with the `VSPHERE_PASSWORD` environment variable.
-* `vsphere_server` - (Required) This is the vCenter server name for vSphere API
- operations. Can also be specified with the `VSPHERE_SERVER` environment
- variable.
-* `allow_unverified_ssl` - (Optional) Boolean that can be set to true to
- disable SSL certificate verification. This should be used with care as it
- could allow an attacker to intercept your auth token. If omitted, default
- value is `false`. Can also be specified with the `VSPHERE_ALLOW_UNVERIFIED_SSL`
- environment variable.
-* `client_debug` - (Optional) Boolean to set the govomomi api to log soap calls
- to disk. The log files are logged to `${HOME}/.govc`, the same path used by
- `govc`. Can also be specified with the `VSPHERE_CLIENT_DEBUG` environment
- variable.
-* `client_debug_path` - (Optional) Override the default log path. Can also
- be specified with the `VSPHERE_CLIENT_DEBUG_PATH` environment variable.
-* `client_debug_path_run` - (Optional) Client debug file path for a single run. Can also
- be specified with the `VSPHERE_CLIENT_DEBUG_PATH_RUN` environment variable.
-
-## Required Privileges
-
-In order to use Terraform provider as non priviledged user, a Role within
-vCenter must be assigned the following privileges:
-
-* Datastore
- - Allocate space
- - Browse datastore
- - Low level file operations
- - Remove file
- - Update virtual machine files
- - Update virtual machine metadata
-
-* Folder (all)
- - Create folder
- - Delete folder
- - Move folder
- - Rename folder
-
-* Network
- - Assign network
-
-* Resource
- - Apply recommendation
- - Assign virtual machine to resource pool
-
-* Virtual Machine
- - Configuration (all) - for now
- - Guest Operations (all) - for now
- - Interaction (all)
- - Inventory (all)
- - Provisioning (all)
-
-These settings were tested with [vSphere
-6.0](https://pubs.vmware.com/vsphere-60/index.jsp?topic=%2Fcom.vmware.vsphere.security.doc%2FGUID-18071E9A-EED1-4968-8D51-E0B4F526FDA3.html)
-and [vSphere
-5.5](https://pubs.vmware.com/vsphere-55/index.jsp?topic=%2Fcom.vmware.vsphere.security.doc%2FGUID-18071E9A-EED1-4968-8D51-E0B4F526FDA3.html).
-For additional information on roles and permissions, please refer to official
-VMware documentation.
-
-## Virtual Machine Customization
-
-Guest Operating Systems can be configured using
-[customizations](https://pubs.vmware.com/vsphere-50/index.jsp#com.vmware.vsphere.vm_admin.doc_50/GUID-80F3F5B5-F795-45F1-B0FA-3709978113D5.html),
-in order to set things properties such as domain and hostname. This mechanism
-is not compatible with all operating systems, however. A list of compatible
-operating systems can be found
-[here](http://partnerweb.vmware.com/programs/guestOS/guest-os-customization-matrix.pdf)
-
-If customization is attempted on an operating system which is not supported, Terraform will
-create the virtual machine, but fail with the following error message:
-
-```
-Customization of the guest operating system 'debian6_64Guest' is not
-supported in this configuration. Microsoft Vista (TM) and Linux guests with
-Logical Volume Manager are supported only for recent ESX host and VMware Tools
-versions. Refer to vCenter documentation for supported configurations.
-```
-
-In order to skip the customization step for unsupported operating systems, use
-the `skip_customization` argument on the virtual machine resource.
-
-## Acceptance Tests
-
-The VMware vSphere provider's acceptance tests require the above provider
-configuration fields to be set using the documented environment variables.
-
-In addition, the following environment variables are used in tests, and must be
-set to valid values for your VMware vSphere environment:
-
- * VSPHERE\_IPV4\_GATEWAY
- * VSPHERE\_IPV4\_ADDRESS
- * VSPHERE\_IPV6\_GATEWAY
- * VSPHERE\_IPV6\_ADDRESS
- * VSPHERE\_NETWORK\_LABEL
- * VSPHERE\_NETWORK\_LABEL\_DHCP
- * VSPHERE\_TEMPLATE
- * VSPHERE\_MAC\_ADDRESS
-
-The following environment variables depend on your vSphere environment:
-
- * VSPHERE\_DATACENTER
- * VSPHERE\_CLUSTER
- * VSPHERE\_RESOURCE\_POOL
- * VSPHERE\_DATASTORE
-
-The following additional environment variables are needed for running the
-"Mount ISO as CDROM media" acceptance tests.
-
- * VSPHERE\_CDROM\_DATASTORE
- * VSPHERE\_CDROM\_PATH
-
-
-These are used to set and verify attributes on the `vsphere_virtual_machine`
-resource in tests.
-
-Once all these variables are in place, the tests can be run like this:
-
-```
-make testacc TEST=./builtin/providers/vsphere
-```
-
-
diff --git a/website/source/docs/providers/vsphere/r/file.html.markdown b/website/source/docs/providers/vsphere/r/file.html.markdown
deleted file mode 100644
index 5ac5b55ab..000000000
--- a/website/source/docs/providers/vsphere/r/file.html.markdown
+++ /dev/null
@@ -1,53 +0,0 @@
----
-layout: "vsphere"
-page_title: "VMware vSphere: vsphere_file"
-sidebar_current: "docs-vsphere-resource-file"
-description: |-
- Provides a VMware vSphere virtual machine file resource. This can be used to upload files (e.g. vmdk disks) from the Terraform host machine to a remote vSphere or copy fields within vSphere.
----
-
-# vsphere\_file
-
-Provides a VMware vSphere virtual machine file resource. This can be used to upload files (e.g. vmdk disks) from the Terraform host machine to a remote vSphere. The file resource can also be used to copy files within vSphere. Files can be copied between Datacenters and/or Datastores.
-
-Updates to file resources will handle moving a file to a new destination (datacenter and/or datastore and/or destination_file). If any source parameter (e.g. `source_datastore`, `source_datacenter` or `source_file`) are changed, this results in a new resource (new file uploaded or copied and old one being deleted).
-
-## Example Usages
-
-**Upload file to vSphere:**
-
-```hcl
-resource "vsphere_file" "ubuntu_disk_upload" {
- datacenter = "my_datacenter"
- datastore = "local"
- source_file = "/home/ubuntu/my_disks/custom_ubuntu.vmdk"
- destination_file = "/my_path/disks/custom_ubuntu.vmdk"
-}
-```
-
-**Copy file within vSphere:**
-
-```hcl
-resource "vsphere_file" "ubuntu_disk_copy" {
- source_datacenter = "my_datacenter"
- datacenter = "my_datacenter"
- source_datastore = "local"
- datastore = "local"
- source_file = "/my_path/disks/custom_ubuntu.vmdk"
- destination_file = "/my_path/custom_ubuntu_id.vmdk"
-}
-```
-
-## Argument Reference
-
-If `source_datacenter` and `source_datastore` are not provided, the file resource will upload the file from Terraform host. If either `source_datacenter` or `source_datastore` are provided, the file resource will copy from within specified locations in vSphere.
-
-The following arguments are supported:
-
-* `source_file` - (Required) The path to the file being uploaded from the Terraform host to vSphere or copied within vSphere.
-* `destination_file` - (Required) The path to where the file should be uploaded or copied to on vSphere.
-* `source_datacenter` - (Optional) The name of a Datacenter in which the file will be copied from.
-* `datacenter` - (Optional) The name of a Datacenter in which the file will be uploaded to.
-* `source_datastore` - (Optional) The name of the Datastore in which file will be copied from.
-* `datastore` - (Required) The name of the Datastore in which to upload the file to.
-* `create_directories` - (Optional) Create directories in `destination_file` path parameter if any missing for copy operation. *Note: Directories are not deleted on destroy operation.
diff --git a/website/source/docs/providers/vsphere/r/folder.html.markdown b/website/source/docs/providers/vsphere/r/folder.html.markdown
deleted file mode 100644
index 47cb880b0..000000000
--- a/website/source/docs/providers/vsphere/r/folder.html.markdown
+++ /dev/null
@@ -1,28 +0,0 @@
----
-layout: "vsphere"
-page_title: "VMware vSphere: vsphere_folder"
-sidebar_current: "docs-vsphere-resource-folder"
-description: |-
- Provides a VMware vSphere virtual machine folder resource. This can be used to create and delete virtual machine folders.
----
-
-# vsphere\_folder
-
-Provides a VMware vSphere virtual machine folder resource. This can be used to create and delete virtual machine folders.
-
-## Example Usage
-
-```hcl
-resource "vsphere_folder" "web" {
- path = "terraform_web_folder"
-}
-```
-
-## Argument Reference
-
-The following arguments are supported:
-
-* `path` - (Required) The path of the folder to be created (relative to the datacenter root); should not begin or end with a "/"
-* `datacenter` - (Optional) The name of a Datacenter in which the folder will be created
-* `existing_path` - (Computed) The path of any parent folder segments which existed at the time this folder was created; on a
-destroy action, the (pre-) existing path is not removed.
diff --git a/website/source/docs/providers/vsphere/r/virtual_disk.html.markdown b/website/source/docs/providers/vsphere/r/virtual_disk.html.markdown
deleted file mode 100644
index deb5221e0..000000000
--- a/website/source/docs/providers/vsphere/r/virtual_disk.html.markdown
+++ /dev/null
@@ -1,35 +0,0 @@
----
-layout: "vsphere"
-page_title: "VMware vSphere: vsphere_virtual_disk"
-sidebar_current: "docs-vsphere-resource-virtual-disk"
-description: |-
- Provides a VMware virtual disk resource. This can be used to create and delete virtual disks.
----
-
-# vsphere\_virtual\_disk
-
-Provides a VMware virtual disk resource. This can be used to create and delete virtual disks.
-
-## Example Usage
-
-```hcl
-resource "vsphere_virtual_disk" "myDisk" {
- size = 2
- vmdk_path = "myDisk.vmdk"
- datacenter = "Datacenter"
- datastore = "local"
- type = "thin"
- adapter_type = "lsiLogic"
-}
-```
-
-## Argument Reference
-
-The following arguments are supported:
-
-* `size` - (Required) Size of the disk (in GB).
-* `vmdk_path` - (Required) The path, including filename, of the virtual disk to be created. This should end with '.vmdk'.
-* `type` - (Optional) 'eagerZeroedThick' (the default), 'lazy', or 'thin' are supported options.
-* `adapter_type` - (Optional) set adapter type, 'ide' (the default), 'lsiLogic', or 'busLogic' are supported options.
-* `datacenter` - (Optional) The name of a Datacenter in which to create the disk.
-* `datastore` - (Required) The name of the Datastore in which to create the disk.
diff --git a/website/source/docs/providers/vsphere/r/virtual_machine.html.markdown b/website/source/docs/providers/vsphere/r/virtual_machine.html.markdown
deleted file mode 100644
index 1a0434cfe..000000000
--- a/website/source/docs/providers/vsphere/r/virtual_machine.html.markdown
+++ /dev/null
@@ -1,149 +0,0 @@
----
-layout: "vsphere"
-page_title: "VMware vSphere: vsphere_virtual_machine"
-sidebar_current: "docs-vsphere-resource-virtual-machine"
-description: |-
- Provides a VMware vSphere virtual machine resource. This can be used to create, modify, and delete virtual machines.
----
-
-# vsphere\_virtual\_machine
-
-Provides a VMware vSphere virtual machine resource. This can be used to create,
-modify, and delete virtual machines.
-
-## Example Usage
-
-```hcl
-resource "vsphere_virtual_machine" "web" {
- name = "terraform-web"
- vcpu = 2
- memory = 4096
-
- network_interface {
- label = "VM Network"
- }
-
- disk {
- template = "centos-7"
- }
-}
-```
-
-## Example Usage VMware Cluster
-
-```hcl
-resource "vsphere_virtual_machine" "lb" {
- name = "lb01"
- folder = "Loadbalancers"
- vcpu = 2
- memory = 4096
- domain = "MYDOMAIN"
- datacenter = "EAST"
- cluster = "Production Cluster"
- resource_pool = "Production Cluster/Resources/Production Servers"
-
- network_interface {
- label = "10_20_30_VMNet"
- ipv4_address = "10.20.30.40"
- ipv4_prefix_length = "24"
- ipv4_gateway = "10.20.30.254"
- }
-
- disk {
- datastore = "EAST/VMFS01-EAST"
- template = "Templates/Centos7"
- }
-}
-```
-
-## Argument Reference
-
-The following arguments are supported:
-
-* `name` - (Required) The virtual machine name (cannot contain underscores and must be less than 15 characters)
-* `vcpu` - (Required) The number of virtual CPUs to allocate to the virtual machine
-* `memory` - (Required) The amount of RAM (in MB) to allocate to the virtual machine
-* `memory_reservation` - (Optional) The amount of RAM (in MB) to reserve physical memory resource; defaults to 0 (means not to reserve)
-* `datacenter` - (Optional) The name of a Datacenter in which to launch the virtual machine
-* `cluster` - (Optional) Name of a Cluster in which to launch the virtual machine
-* `resource_pool` (Optional) The name of a Resource Pool in which to launch the virtual machine. Requires full path (see cluster example).
-* `gateway` - __Deprecated, please use `network_interface.ipv4_gateway` instead__.
-* `domain` - (Optional) A FQDN for the virtual machine; defaults to "vsphere.local"
-* `time_zone` - (Optional) The [Linux](https://www.vmware.com/support/developer/vc-sdk/visdk41pubs/ApiReference/timezone.html) or [Windows](https://msdn.microsoft.com/en-us/library/ms912391.aspx) time zone to set on the virtual machine. Defaults to "Etc/UTC"
-* `dns_suffixes` - (Optional) List of name resolution suffixes for the virtual network adapter
-* `dns_servers` - (Optional) List of DNS servers for the virtual network adapter; defaults to 8.8.8.8, 8.8.4.4
-* `network_interface` - (Required) Configures virtual network interfaces; see [Network Interfaces](#network-interfaces) below for details.
-* `disk` - (Required) Configures virtual disks; see [Disks](#disks) below for details
-* `detach_unknown_disks_on_delete` - (Optional) will detach disks not managed by this resource on delete (avoids deletion of disks attached after resource creation outside of Terraform scope).
-* `cdrom` - (Optional) Configures a CDROM device and mounts an image as its media; see [CDROM](#cdrom) below for more details.
-* `windows_opt_config` - (Optional) Extra options for clones of Windows machines.
-* `linked_clone` - (Optional) Specifies if the new machine is a [linked clone](https://www.vmware.com/support/ws5/doc/ws_clone_overview.html#wp1036396) of another machine or not.
-* `enable_disk_uuid` - (Optional) This option causes the vm to mount disks by uuid on the guest OS.
-* `custom_configuration_parameters` - (Optional) Map of values that is set as virtual machine custom configurations.
-* `skip_customization` - (Optional) skip virtual machine customization (useful if OS is not in the guest OS support matrix of VMware like "other3xLinux64Guest").
-
-The `network_interface` block supports:
-
-* `label` - (Required) Label to assign to this network interface
-* `ipv4_address` - (Optional) Static IPv4 to assign to this network interface. Interface will use DHCP if this is left blank.
-* `ipv4_prefix_length` - (Optional) prefix length to use when statically assigning an IPv4 address.
-* `ipv4_gateway` - (Optional) IPv4 gateway IP address to use.
-* `ipv6_address` - (Optional) Static IPv6 to assign to this network interface. Interface will use DHCPv6 if this is left blank.
-* `ipv6_prefix_length` - (Optional) prefix length to use when statically assigning an IPv6.
-* `ipv6_gateway` - (Optional) IPv6 gateway IP address to use.
-* `mac_address` - (Optional) Manual MAC address to assign to this network interface. Will be generated by VMware if not set. ([VMware KB: Setting a static MAC address for a virtual NIC (219)](https://kb.vmware.com/selfservice/microsites/search.do?cmd=displayKC&externalId=219))
-
-The following arguments are maintained for backwards compatibility and may be
-removed in a future version:
-
-* `ip_address` - __Deprecated, please use `ipv4_address` instead__.
-* `subnet_mask` - __Deprecated, please use `ipv4_prefix_length` instead__.
-
-The `windows_opt_config` block supports:
-
-* `product_key` - (Optional) Serial number for new installation of Windows. This serial number is ignored if the original guest operating system was installed using a volume-licensed CD.
-* `admin_password` - (Optional) The password for the new `administrator` account. Omit for passwordless admin (using `""` does not work).
-* `domain` - (Optional) Domain that the new machine will be placed into. If `domain`, `domain_user`, and `domain_user_password` are not all set, all three will be ignored.
-* `domain_user` - (Optional) User that is a member of the specified domain.
-* `domain_user_password` - (Optional) Password for domain user, in plain text.
-
-
-## Disks
-
-The `disk` block supports:
-
-* `template` - (Required if size and bootable_vmdk_path not provided) Template for this disk.
-* `datastore` - (Optional) Datastore for this disk
-* `size` - (Required if template and bootable_vmdks_path not provided) Size of this disk (in GB).
-* `name` - (Required if size is provided when creating a new disk) This "name" is used for the disk file name in vSphere, when the new disk is created.
-* `iops` - (Optional) Number of virtual iops to allocate for this disk.
-* `type` - (Optional) 'eager_zeroed' (the default), 'lazy', or 'thin' are supported options.
-* `vmdk` - (Required if template and size not provided) Path to a vmdk in a vSphere datastore.
-* `bootable` - (Optional) Set to 'true' if a vmdk was given and it should attempt to boot after creation.
-* `controller_type` - (Optional) Controller type to attach the disk to. 'scsi' (the default), or 'ide' are supported options.
-* `keep_on_remove` - (Optional) Set to 'true' to not delete a disk on removal.
-
-
-## CDROM
-
-The `cdrom` block supports:
-
-* `datastore` - (Required) The name of the datastore where the disk image is stored.
-* `path` - (Required) The absolute path to the image within the datastore.
-
-## Attributes Reference
-
-The following attributes are exported:
-
-* `id` - The instance ID.
-* `uuid` - The instance UUID.
-* `moid` - The instance MOID (Managed Object Reference ID).
-* `name` - See Argument Reference above.
-* `vcpu` - See Argument Reference above.
-* `memory` - See Argument Reference above.
-* `datacenter` - See Argument Reference above.
-* `network_interface/label` - See Argument Reference above.
-* `network_interface/ipv4_address` - See Argument Reference above.
-* `network_interface/ipv4_prefix_length` - See Argument Reference above.
-* `network_interface/ipv6_address` - Assigned static IPv6 address.
-* `network_interface/ipv6_prefix_length` - Prefix length of assigned static IPv6 address.
diff --git a/website/source/downloads.html.erb b/website/source/downloads.html.erb
deleted file mode 100644
index 86e2900af..000000000
--- a/website/source/downloads.html.erb
+++ /dev/null
@@ -1,61 +0,0 @@
----
-layout: "downloads"
-page_title: "Download Terraform"
-sidebar_current: "downloads-terraform"
-description: |-
- Download Terraform
----
-
-
Download Terraform
-
-
-
-
-
- Below are the available downloads for the latest version of Terraform
- (<%= latest_version %>). Please download the proper package for your
- operating system and architecture.
-
-
diff --git a/website/source/favicon.ico b/website/source/favicon.ico
deleted file mode 100644
index fd3d1442e..000000000
Binary files a/website/source/favicon.ico and /dev/null differ
diff --git a/website/source/index.html.erb b/website/source/index.html.erb
deleted file mode 100644
index 97425655e..000000000
--- a/website/source/index.html.erb
+++ /dev/null
@@ -1,316 +0,0 @@
----
-description: |-
- Terraform enables you to safely and predictably create, change, and improve
- production infrastructure. It is an open source tool that codifies APIs into
- declarative configuration files that can be shared amongst team members,
- treated as code, edited, reviewed, and versioned.
----
-
-
-
- Terraform enables you to safely and predictably create, change, and
- improve production infrastructure. It is an open source tool that
- codifies APIs into declarative configuration files that can be shared amongst team members,
- treated as code, edited, reviewed, and versioned.
-
- Understand how a minor change could have potential cascading effects
- across an infrastructure before executing that change. Terraform
- builds a dependency graph from the configurations, and walks this
- graph to generate plans, refresh state, and more.
-
-
-
-
Separation of Plan & Apply
-
- Separating plans and applies reduces mistakes and uncertainty at
- scale. Plans show operators what would happen, applies execute
- changes.
-
-
-
-
One Safe Workflow
-
- Use Terraform to create resources across all major infrastructure
- providers (AWS, GCP, Azure, OpenStack, VMware, and more).
-
-
-
-
-
-
-
-
-
-
- Create
-
Reproducible Infrastructure
-
Terraform lets operators easily use the same
- configurations in multiple places to reduce mistakes and save time.
Use the same Terraform configuration to provision identical staging,
- QA, and production environments.
-
-
-
Shareable modules
-
Common Terraform configurations can be packaged as modules and used
- across teams and organizations.
-
-
-
Combine multiple providers consistently
-
Terraform allows you to effortlessly combine high-level system
- providers. Launch a server from one cloud provider, add a DNS entry
- with its IP with a different provider. Built-in dependency resolution
- means things happen in the right order.
- Use attributes from other resources to create an infrastructure
- composed of resources across multiple providers.
-
-
-
-
-
-
-
-
-
-
-
- resource "digitalocean_droplet""web" {
- name = "tf-web"
- size = "512mb"
- image = "centos-5-8-x32"
- region = "sfo1"
- }
-
- resource "dnsimple_record""hello" {
- domain = "example.com"
- name = "test"
- value = "${digitalocean_droplet.web.ipv4_address}"
- type = "A"
- }
-
-
-
-
-
-
-
-
Fast, Simplified Interaction
-
- Simple and intuitive configuration makes even the most complicated
- services approachable: no more web consoles, loading bars, or
- confusing CLI clients.
-
- Collaborative Infrastructure Automation for organizations. Collaborate
- on Terraform configurations, validate changes, and automate
- provisioning across providers.
-
- NOTE: The Azure Service Management Provider is no longer
- being actively developed by HashiCorp employees. It continues to be
- supported by the community. We recommend using the Azure Resource Manager
- based Microsoft Azure Provider
- instead if possible.
-
-
- <%= yield %>
-<% end %>
diff --git a/website/source/layouts/azurerm.erb b/website/source/layouts/azurerm.erb
deleted file mode 100644
index 95c5b3979..000000000
--- a/website/source/layouts/azurerm.erb
+++ /dev/null
@@ -1,366 +0,0 @@
-
-<% wrap_layout :inner do %>
- <% content_for :sidebar do %>
-
- <% end %>
-
- <%= yield %>
-<% end %>
diff --git a/website/source/microsoft-tile.xml.builder b/website/source/microsoft-tile.xml.builder
deleted file mode 100644
index 3935c4a78..000000000
--- a/website/source/microsoft-tile.xml.builder
+++ /dev/null
@@ -1,14 +0,0 @@
----
-layout: false
-noindex: true
----
-
-xml.instruct!
-xml.browserconfig do
- xml.msapplication do
- xml.tile do
- xml.square150x150logo src: image_path("favicons/mstile-150x150.png")
- xml.TileColor "#603CBA"
- end
- end
-end
diff --git a/website/source/robots.txt b/website/source/robots.txt
deleted file mode 100644
index 190c6ce04..000000000
--- a/website/source/robots.txt
+++ /dev/null
@@ -1,8 +0,0 @@
----
-layout: false
-noindex: true
----
-
-User-agent: *
-Disallow: /404
-Disallow: /500
diff --git a/website/source/security.html.erb b/website/source/security.html.erb
deleted file mode 100644
index 5171d4833..000000000
--- a/website/source/security.html.erb
+++ /dev/null
@@ -1,32 +0,0 @@
----
-layout: "inner"
-page_title: "Security"
-description: |-
- Terraform takes security very seriously. Please responsibly disclose any security vulnerabilities found and we'll handle it quickly.
----
-
-
Terraform Security
-
-
- We understand that many users place a high level of trust in HashiCorp
- and the tools we build. We apply best practices and focus on security to
- make sure we can maintain the trust of the community.
-
-
-
- We deeply appreciate any effort to disclose vulnerabilities responsibly.
-
-
-
- If you would like to report a vulnerability, please see the HashiCorp security
- page, which has the proper email to communicate with as well as our
- PGP key. Please do not create an GitHub issue for security
- concerns.
-