Merge branch 'terraform' into hmrc
This commit is contained in:
commit
bda4ef7e7c
|
@ -28,11 +28,13 @@ IMPROVEMENTS:
|
|||
* provider/aws: Add `kinesis_endpoint` for configuring Kinesis [GH-3255]
|
||||
* provider/aws: Add a computed ARN for S3 Buckets [GH-3685]
|
||||
* provider/aws: Add configuration to enable copying RDS tags to final snapshot [GH-3529]
|
||||
* provider/aws: RDS Cluster additions (`backup_retention_period`, `preferred_backup_window`, `preferred_maintenance_window`) [GH-3757]
|
||||
* provider/openstack: Use IPv4 as the defeault IP version for subnets [GH-3091]
|
||||
* provider/aws: Apply security group after restoring db_instance from snapshot [GH-3513]
|
||||
* provider/aws: Making the AutoScalingGroup name optional [GH-3710]
|
||||
* provider/openstack: Add "delete on termination" boot-from-volume option [GH-3232]
|
||||
* provider/digitalocean: Make user_data force a new droplet [GH-3740]
|
||||
* provider/vsphere: Do not add network interfaces by default [GH-3652]
|
||||
|
||||
BUG FIXES:
|
||||
|
||||
|
@ -44,8 +46,10 @@ BUG FIXES:
|
|||
* provider/aws: Allow cluster name, not only ARN for `aws_ecs_service` [GH-3668]
|
||||
* provider/aws: ignore association not exist on route table destroy [GH-3615]
|
||||
* provider/aws: Fix policy encoding issue with SNS Topics [GH-3700]
|
||||
* provider/aws: Tolerate ElastiCache clusters being deleted outside Terraform [GH-3767]
|
||||
* provider/azure: various bugfixes [GH-3695]
|
||||
* provider/digitalocean: fix issue preventing SSH fingerprints from working [GH-3633]
|
||||
* provider/digitalocean: Fixing the DigitalOcean Droplet 404 potential on refresh of state [GH-3768]
|
||||
* provider/openstack: Fix several issues causing unresolvable diffs [GH-3440]
|
||||
* provider/openstack: Safely delete security groups [GH-3696]
|
||||
* provider/openstack: Ignore order of security_groups in instance [GH-3651]
|
||||
|
|
|
@ -241,6 +241,12 @@ func resourceAwsElasticacheClusterRead(d *schema.ResourceData, meta interface{})
|
|||
|
||||
res, err := conn.DescribeCacheClusters(req)
|
||||
if err != nil {
|
||||
if eccErr, ok := err.(awserr.Error); ok && eccErr.Code() == "CacheClusterNotFound" {
|
||||
log.Printf("[WARN] ElastiCache Cluster (%s) not found", d.Id())
|
||||
d.SetId("")
|
||||
return nil
|
||||
}
|
||||
|
||||
return err
|
||||
}
|
||||
|
||||
|
|
|
@ -4,6 +4,7 @@ import (
|
|||
"fmt"
|
||||
"log"
|
||||
"regexp"
|
||||
"strings"
|
||||
"time"
|
||||
|
||||
"github.com/aws/aws-sdk-go/aws"
|
||||
|
@ -122,6 +123,38 @@ func resourceAwsRDSCluster() *schema.Resource {
|
|||
Elem: &schema.Schema{Type: schema.TypeString},
|
||||
Set: schema.HashString,
|
||||
},
|
||||
|
||||
"preferred_backup_window": &schema.Schema{
|
||||
Type: schema.TypeString,
|
||||
Optional: true,
|
||||
Computed: true,
|
||||
},
|
||||
|
||||
"preferred_maintenance_window": &schema.Schema{
|
||||
Type: schema.TypeString,
|
||||
Optional: true,
|
||||
Computed: true,
|
||||
StateFunc: func(val interface{}) string {
|
||||
if val == nil {
|
||||
return ""
|
||||
}
|
||||
return strings.ToLower(val.(string))
|
||||
},
|
||||
},
|
||||
|
||||
"backup_retention_period": &schema.Schema{
|
||||
Type: schema.TypeInt,
|
||||
Optional: true,
|
||||
Default: 1,
|
||||
ValidateFunc: func(v interface{}, k string) (ws []string, es []error) {
|
||||
value := v.(int)
|
||||
if value > 35 {
|
||||
es = append(es, fmt.Errorf(
|
||||
"backup retention period cannot be more than 35 days"))
|
||||
}
|
||||
return
|
||||
},
|
||||
},
|
||||
},
|
||||
}
|
||||
}
|
||||
|
@ -156,6 +189,18 @@ func resourceAwsRDSClusterCreate(d *schema.ResourceData, meta interface{}) error
|
|||
createOpts.AvailabilityZones = expandStringList(attr.List())
|
||||
}
|
||||
|
||||
if v, ok := d.GetOk("backup_retention_period"); ok {
|
||||
createOpts.BackupRetentionPeriod = aws.Int64(int64(v.(int)))
|
||||
}
|
||||
|
||||
if v, ok := d.GetOk("preferred_backup_window"); ok {
|
||||
createOpts.PreferredBackupWindow = aws.String(v.(string))
|
||||
}
|
||||
|
||||
if v, ok := d.GetOk("preferred_maintenance_window"); ok {
|
||||
createOpts.PreferredMaintenanceWindow = aws.String(v.(string))
|
||||
}
|
||||
|
||||
log.Printf("[DEBUG] RDS Cluster create options: %s", createOpts)
|
||||
resp, err := conn.CreateDBCluster(createOpts)
|
||||
if err != nil {
|
||||
|
@ -223,6 +268,9 @@ func resourceAwsRDSClusterRead(d *schema.ResourceData, meta interface{}) error {
|
|||
d.Set("engine", dbc.Engine)
|
||||
d.Set("master_username", dbc.MasterUsername)
|
||||
d.Set("port", dbc.Port)
|
||||
d.Set("backup_retention_period", dbc.BackupRetentionPeriod)
|
||||
d.Set("preferred_backup_window", dbc.PreferredBackupWindow)
|
||||
d.Set("preferred_maintenance_window", dbc.PreferredMaintenanceWindow)
|
||||
|
||||
var vpcg []string
|
||||
for _, g := range dbc.VpcSecurityGroups {
|
||||
|
@ -263,6 +311,18 @@ func resourceAwsRDSClusterUpdate(d *schema.ResourceData, meta interface{}) error
|
|||
}
|
||||
}
|
||||
|
||||
if d.HasChange("preferred_backup_window") {
|
||||
req.PreferredBackupWindow = aws.String(d.Get("preferred_backup_window").(string))
|
||||
}
|
||||
|
||||
if d.HasChange("preferred_maintenance_window") {
|
||||
req.PreferredMaintenanceWindow = aws.String(d.Get("preferred_maintenance_window").(string))
|
||||
}
|
||||
|
||||
if d.HasChange("backup_retention_period") {
|
||||
req.BackupRetentionPeriod = aws.Int64(int64(d.Get("backup_retention_period").(int)))
|
||||
}
|
||||
|
||||
_, err := conn.ModifyDBCluster(req)
|
||||
if err != nil {
|
||||
return fmt.Errorf("[WARN] Error modifying RDS Cluster (%s): %s", d.Id(), err)
|
||||
|
|
|
@ -17,13 +17,16 @@ import (
|
|||
func TestAccAWSRDSCluster_basic(t *testing.T) {
|
||||
var v rds.DBCluster
|
||||
|
||||
ri := rand.New(rand.NewSource(time.Now().UnixNano())).Int()
|
||||
config := fmt.Sprintf(testAccAWSClusterConfig, ri)
|
||||
|
||||
resource.Test(t, resource.TestCase{
|
||||
PreCheck: func() { testAccPreCheck(t) },
|
||||
Providers: testAccProviders,
|
||||
CheckDestroy: testAccCheckAWSClusterDestroy,
|
||||
Steps: []resource.TestStep{
|
||||
resource.TestStep{
|
||||
Config: testAccAWSClusterConfig,
|
||||
Config: config,
|
||||
Check: resource.ComposeTestCheckFunc(
|
||||
testAccCheckAWSClusterExists("aws_rds_cluster.default", &v),
|
||||
),
|
||||
|
@ -32,6 +35,47 @@ func TestAccAWSRDSCluster_basic(t *testing.T) {
|
|||
})
|
||||
}
|
||||
|
||||
func TestAccAWSRDSCluster_backupsUpdate(t *testing.T) {
|
||||
var v rds.DBCluster
|
||||
|
||||
ri := rand.New(rand.NewSource(time.Now().UnixNano())).Int()
|
||||
preConfig := fmt.Sprintf(testAccAWSClusterConfig_backups, ri)
|
||||
postConfig := fmt.Sprintf(testAccAWSClusterConfig_backupsUpdate, ri)
|
||||
|
||||
resource.Test(t, resource.TestCase{
|
||||
PreCheck: func() { testAccPreCheck(t) },
|
||||
Providers: testAccProviders,
|
||||
CheckDestroy: testAccCheckAWSClusterDestroy,
|
||||
Steps: []resource.TestStep{
|
||||
resource.TestStep{
|
||||
Config: preConfig,
|
||||
Check: resource.ComposeTestCheckFunc(
|
||||
testAccCheckAWSClusterExists("aws_rds_cluster.default", &v),
|
||||
resource.TestCheckResourceAttr(
|
||||
"aws_rds_cluster.default", "preferred_backup_window", "07:00-09:00"),
|
||||
resource.TestCheckResourceAttr(
|
||||
"aws_rds_cluster.default", "backup_retention_period", "5"),
|
||||
resource.TestCheckResourceAttr(
|
||||
"aws_rds_cluster.default", "preferred_maintenance_window", "tue:04:00-tue:04:30"),
|
||||
),
|
||||
},
|
||||
|
||||
resource.TestStep{
|
||||
Config: postConfig,
|
||||
Check: resource.ComposeTestCheckFunc(
|
||||
testAccCheckAWSClusterExists("aws_rds_cluster.default", &v),
|
||||
resource.TestCheckResourceAttr(
|
||||
"aws_rds_cluster.default", "preferred_backup_window", "03:00-09:00"),
|
||||
resource.TestCheckResourceAttr(
|
||||
"aws_rds_cluster.default", "backup_retention_period", "10"),
|
||||
resource.TestCheckResourceAttr(
|
||||
"aws_rds_cluster.default", "preferred_maintenance_window", "wed:01:00-wed:01:30"),
|
||||
),
|
||||
},
|
||||
},
|
||||
})
|
||||
}
|
||||
|
||||
func testAccCheckAWSClusterDestroy(s *terraform.State) error {
|
||||
for _, rs := range s.RootModule().Resources {
|
||||
if rs.Type != "aws_rds_cluster" {
|
||||
|
@ -97,12 +141,36 @@ func testAccCheckAWSClusterExists(n string, v *rds.DBCluster) resource.TestCheck
|
|||
}
|
||||
}
|
||||
|
||||
// Add some random to the name, to avoid collision
|
||||
var testAccAWSClusterConfig = fmt.Sprintf(`
|
||||
var testAccAWSClusterConfig = `
|
||||
resource "aws_rds_cluster" "default" {
|
||||
cluster_identifier = "tf-aurora-cluster-%d"
|
||||
availability_zones = ["us-west-2a","us-west-2b","us-west-2c"]
|
||||
database_name = "mydb"
|
||||
master_username = "foo"
|
||||
master_password = "mustbeeightcharaters"
|
||||
}`, rand.New(rand.NewSource(time.Now().UnixNano())).Int())
|
||||
}`
|
||||
|
||||
var testAccAWSClusterConfig_backups = `
|
||||
resource "aws_rds_cluster" "default" {
|
||||
cluster_identifier = "tf-aurora-cluster-%d"
|
||||
availability_zones = ["us-west-2a","us-west-2b","us-west-2c"]
|
||||
database_name = "mydb"
|
||||
master_username = "foo"
|
||||
master_password = "mustbeeightcharaters"
|
||||
backup_retention_period = 5
|
||||
preferred_backup_window = "07:00-09:00"
|
||||
preferred_maintenance_window = "tue:04:00-tue:04:30"
|
||||
}`
|
||||
|
||||
var testAccAWSClusterConfig_backupsUpdate = `
|
||||
resource "aws_rds_cluster" "default" {
|
||||
cluster_identifier = "tf-aurora-cluster-%d"
|
||||
availability_zones = ["us-west-2a","us-west-2b","us-west-2c"]
|
||||
database_name = "mydb"
|
||||
master_username = "foo"
|
||||
master_password = "mustbeeightcharaters"
|
||||
backup_retention_period = 10
|
||||
preferred_backup_window = "03:00-09:00"
|
||||
preferred_maintenance_window = "wed:01:00-wed:01:30"
|
||||
apply_immediately = true
|
||||
}`
|
||||
|
|
|
@ -186,10 +186,11 @@ func resourceDigitalOceanDropletRead(d *schema.ResourceData, meta interface{}) e
|
|||
}
|
||||
|
||||
// Retrieve the droplet properties for updating the state
|
||||
droplet, _, err := client.Droplets.Get(id)
|
||||
droplet, resp, err := client.Droplets.Get(id)
|
||||
if err != nil {
|
||||
// check if the droplet no longer exists.
|
||||
if err.Error() == "Error retrieving droplet: API Error: 404 Not Found" {
|
||||
if resp.StatusCode == 404 {
|
||||
log.Printf("[WARN] DigitalOcean Droplet (%s) not found", d.Id())
|
||||
d.SetId("")
|
||||
return nil
|
||||
}
|
||||
|
|
|
@ -1000,7 +1000,6 @@ func (vm *virtualMachine) deployVirtualMachine(c *govmomi.Client) error {
|
|||
NumCPUs: vm.vcpu,
|
||||
NumCoresPerSocket: 1,
|
||||
MemoryMB: vm.memoryMb,
|
||||
DeviceChange: networkDevices,
|
||||
}
|
||||
log.Printf("[DEBUG] virtual machine config spec: %v", configSpec)
|
||||
|
||||
|
@ -1024,11 +1023,10 @@ func (vm *virtualMachine) deployVirtualMachine(c *govmomi.Client) error {
|
|||
|
||||
// make vm clone spec
|
||||
cloneSpec := types.VirtualMachineCloneSpec{
|
||||
Location: relocateSpec,
|
||||
Template: false,
|
||||
Config: &configSpec,
|
||||
Customization: &customSpec,
|
||||
PowerOn: true,
|
||||
Location: relocateSpec,
|
||||
Template: false,
|
||||
Config: &configSpec,
|
||||
PowerOn: false,
|
||||
}
|
||||
log.Printf("[DEBUG] clone spec: %v", cloneSpec)
|
||||
|
||||
|
@ -1048,6 +1046,43 @@ func (vm *virtualMachine) deployVirtualMachine(c *govmomi.Client) error {
|
|||
}
|
||||
log.Printf("[DEBUG] new vm: %v", newVM)
|
||||
|
||||
devices, err := newVM.Device(context.TODO())
|
||||
if err != nil {
|
||||
log.Printf("[DEBUG] Template devices can't be found")
|
||||
return err
|
||||
}
|
||||
|
||||
for _, dvc := range devices {
|
||||
// Issue 3559/3560: Delete all ethernet devices to add the correct ones later
|
||||
if devices.Type(dvc) == "ethernet" {
|
||||
err := newVM.RemoveDevice(context.TODO(), dvc)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
}
|
||||
// Add Network devices
|
||||
for _, dvc := range networkDevices {
|
||||
err := newVM.AddDevice(
|
||||
context.TODO(), dvc.GetVirtualDeviceConfigSpec().Device)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
||||
taskb, err := newVM.Customize(context.TODO(), customSpec)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
_, err = taskb.WaitForResult(context.TODO(), nil)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
log.Printf("[DEBUG]VM customization finished")
|
||||
|
||||
newVM.PowerOn(context.TODO())
|
||||
|
||||
ip, err := newVM.WaitForIP(context.TODO())
|
||||
if err != nil {
|
||||
return err
|
||||
|
|
|
@ -15,9 +15,21 @@ import (
|
|||
|
||||
func TestAccVSphereVirtualMachine_basic(t *testing.T) {
|
||||
var vm virtualMachine
|
||||
datacenter := os.Getenv("VSPHERE_DATACENTER")
|
||||
cluster := os.Getenv("VSPHERE_CLUSTER")
|
||||
datastore := os.Getenv("VSPHERE_DATASTORE")
|
||||
var locationOpt string
|
||||
var datastoreOpt string
|
||||
|
||||
if v := os.Getenv("VSPHERE_DATACENTER"); v != "" {
|
||||
locationOpt += fmt.Sprintf(" datacenter = \"%s\"\n", v)
|
||||
}
|
||||
if v := os.Getenv("VSPHERE_CLUSTER"); v != "" {
|
||||
locationOpt += fmt.Sprintf(" cluster = \"%s\"\n", v)
|
||||
}
|
||||
if v := os.Getenv("VSPHERE_RESOURCE_POOL"); v != "" {
|
||||
locationOpt += fmt.Sprintf(" resource_pool = \"%s\"\n", v)
|
||||
}
|
||||
if v := os.Getenv("VSPHERE_DATASTORE"); v != "" {
|
||||
datastoreOpt = fmt.Sprintf(" datastore = \"%s\"\n", v)
|
||||
}
|
||||
template := os.Getenv("VSPHERE_TEMPLATE")
|
||||
gateway := os.Getenv("VSPHERE_NETWORK_GATEWAY")
|
||||
label := os.Getenv("VSPHERE_NETWORK_LABEL")
|
||||
|
@ -31,28 +43,23 @@ func TestAccVSphereVirtualMachine_basic(t *testing.T) {
|
|||
resource.TestStep{
|
||||
Config: fmt.Sprintf(
|
||||
testAccCheckVSphereVirtualMachineConfig_basic,
|
||||
datacenter,
|
||||
cluster,
|
||||
locationOpt,
|
||||
gateway,
|
||||
label,
|
||||
ip_address,
|
||||
datastore,
|
||||
datastoreOpt,
|
||||
template,
|
||||
),
|
||||
Check: resource.ComposeTestCheckFunc(
|
||||
testAccCheckVSphereVirtualMachineExists("vsphere_virtual_machine.foo", &vm),
|
||||
resource.TestCheckResourceAttr(
|
||||
"vsphere_virtual_machine.foo", "name", "terraform-test"),
|
||||
resource.TestCheckResourceAttr(
|
||||
"vsphere_virtual_machine.foo", "datacenter", datacenter),
|
||||
resource.TestCheckResourceAttr(
|
||||
"vsphere_virtual_machine.foo", "vcpu", "2"),
|
||||
resource.TestCheckResourceAttr(
|
||||
"vsphere_virtual_machine.foo", "memory", "4096"),
|
||||
resource.TestCheckResourceAttr(
|
||||
"vsphere_virtual_machine.foo", "disk.#", "2"),
|
||||
resource.TestCheckResourceAttr(
|
||||
"vsphere_virtual_machine.foo", "disk.0.datastore", datastore),
|
||||
resource.TestCheckResourceAttr(
|
||||
"vsphere_virtual_machine.foo", "disk.0.template", template),
|
||||
resource.TestCheckResourceAttr(
|
||||
|
@ -67,12 +74,23 @@ func TestAccVSphereVirtualMachine_basic(t *testing.T) {
|
|||
|
||||
func TestAccVSphereVirtualMachine_dhcp(t *testing.T) {
|
||||
var vm virtualMachine
|
||||
datacenter := os.Getenv("VSPHERE_DATACENTER")
|
||||
cluster := os.Getenv("VSPHERE_CLUSTER")
|
||||
datastore := os.Getenv("VSPHERE_DATASTORE")
|
||||
var locationOpt string
|
||||
var datastoreOpt string
|
||||
|
||||
if v := os.Getenv("VSPHERE_DATACENTER"); v != "" {
|
||||
locationOpt += fmt.Sprintf(" datacenter = \"%s\"\n", v)
|
||||
}
|
||||
if v := os.Getenv("VSPHERE_CLUSTER"); v != "" {
|
||||
locationOpt += fmt.Sprintf(" cluster = \"%s\"\n", v)
|
||||
}
|
||||
if v := os.Getenv("VSPHERE_RESOURCE_POOL"); v != "" {
|
||||
locationOpt += fmt.Sprintf(" resource_pool = \"%s\"\n", v)
|
||||
}
|
||||
if v := os.Getenv("VSPHERE_DATASTORE"); v != "" {
|
||||
datastoreOpt = fmt.Sprintf(" datastore = \"%s\"\n", v)
|
||||
}
|
||||
template := os.Getenv("VSPHERE_TEMPLATE")
|
||||
label := os.Getenv("VSPHERE_NETWORK_LABEL_DHCP")
|
||||
password := os.Getenv("VSPHERE_VM_PASSWORD")
|
||||
|
||||
resource.Test(t, resource.TestCase{
|
||||
PreCheck: func() { testAccPreCheck(t) },
|
||||
|
@ -82,27 +100,21 @@ func TestAccVSphereVirtualMachine_dhcp(t *testing.T) {
|
|||
resource.TestStep{
|
||||
Config: fmt.Sprintf(
|
||||
testAccCheckVSphereVirtualMachineConfig_dhcp,
|
||||
datacenter,
|
||||
cluster,
|
||||
locationOpt,
|
||||
label,
|
||||
datastore,
|
||||
datastoreOpt,
|
||||
template,
|
||||
password,
|
||||
),
|
||||
Check: resource.ComposeTestCheckFunc(
|
||||
testAccCheckVSphereVirtualMachineExists("vsphere_virtual_machine.bar", &vm),
|
||||
resource.TestCheckResourceAttr(
|
||||
"vsphere_virtual_machine.bar", "name", "terraform-test"),
|
||||
resource.TestCheckResourceAttr(
|
||||
"vsphere_virtual_machine.bar", "datacenter", datacenter),
|
||||
resource.TestCheckResourceAttr(
|
||||
"vsphere_virtual_machine.bar", "vcpu", "2"),
|
||||
resource.TestCheckResourceAttr(
|
||||
"vsphere_virtual_machine.bar", "memory", "4096"),
|
||||
resource.TestCheckResourceAttr(
|
||||
"vsphere_virtual_machine.bar", "disk.#", "1"),
|
||||
resource.TestCheckResourceAttr(
|
||||
"vsphere_virtual_machine.bar", "disk.0.datastore", datastore),
|
||||
resource.TestCheckResourceAttr(
|
||||
"vsphere_virtual_machine.bar", "disk.0.template", template),
|
||||
resource.TestCheckResourceAttr(
|
||||
|
@ -168,20 +180,6 @@ func testAccCheckVSphereVirtualMachineExists(n string, vm *virtualMachine) resou
|
|||
}
|
||||
|
||||
_, err = object.NewSearchIndex(client.Client).FindChild(context.TODO(), dcFolders.VmFolder, rs.Primary.Attributes["name"])
|
||||
/*
|
||||
vmRef, err := client.SearchIndex().FindChild(dcFolders.VmFolder, rs.Primary.Attributes["name"])
|
||||
if err != nil {
|
||||
return fmt.Errorf("error %s", err)
|
||||
}
|
||||
|
||||
found := govmomi.NewVirtualMachine(client, vmRef.Reference())
|
||||
fmt.Printf("%v", found)
|
||||
|
||||
if found.Name != rs.Primary.ID {
|
||||
return fmt.Errorf("Instance not found")
|
||||
}
|
||||
*instance = *found
|
||||
*/
|
||||
|
||||
*vm = virtualMachine{
|
||||
name: rs.Primary.ID,
|
||||
|
@ -194,8 +192,7 @@ func testAccCheckVSphereVirtualMachineExists(n string, vm *virtualMachine) resou
|
|||
const testAccCheckVSphereVirtualMachineConfig_basic = `
|
||||
resource "vsphere_virtual_machine" "foo" {
|
||||
name = "terraform-test"
|
||||
datacenter = "%s"
|
||||
cluster = "%s"
|
||||
%s
|
||||
vcpu = 2
|
||||
memory = 4096
|
||||
gateway = "%s"
|
||||
|
@ -205,7 +202,7 @@ resource "vsphere_virtual_machine" "foo" {
|
|||
subnet_mask = "255.255.255.0"
|
||||
}
|
||||
disk {
|
||||
datastore = "%s"
|
||||
%s
|
||||
template = "%s"
|
||||
iops = 500
|
||||
}
|
||||
|
@ -219,22 +216,15 @@ resource "vsphere_virtual_machine" "foo" {
|
|||
const testAccCheckVSphereVirtualMachineConfig_dhcp = `
|
||||
resource "vsphere_virtual_machine" "bar" {
|
||||
name = "terraform-test"
|
||||
datacenter = "%s"
|
||||
cluster = "%s"
|
||||
%s
|
||||
vcpu = 2
|
||||
memory = 4096
|
||||
network_interface {
|
||||
label = "%s"
|
||||
}
|
||||
disk {
|
||||
datastore = "%s"
|
||||
%s
|
||||
template = "%s"
|
||||
}
|
||||
|
||||
connection {
|
||||
host = "${self.network_interface.0.ip_address}"
|
||||
user = "root"
|
||||
password = "%s"
|
||||
}
|
||||
}
|
||||
`
|
||||
|
|
|
@ -24,6 +24,8 @@ resource "aws_rds_cluster" "default" {
|
|||
database_name = "mydb"
|
||||
master_username = "foo"
|
||||
master_password = "bar"
|
||||
backup_retention_period = 5
|
||||
preferred_backup_window = "07:00-09:00"
|
||||
}
|
||||
```
|
||||
|
||||
|
@ -52,6 +54,9 @@ string.
|
|||
instances in the DB cluster can be created in
|
||||
* `backup_retention_period` - (Optional) The days to retain backups for. Default
|
||||
1
|
||||
* `preferred_backup_window` - (Optional) The daily time range during which automated backups are created if automated backups are enabled using the BackupRetentionPeriod parameter.
|
||||
Default: A 30-minute window selected at random from an 8-hour block of time per region. e.g. 04:00-09:00
|
||||
* `preferred_maintenance_window` - (Optional) The weekly time range during which system maintenance can occur, in (UTC) e.g. wed:04:00-wed:04:30
|
||||
* `port` - (Optional) The port on which the DB accepts connections
|
||||
* `vpc_security_group_ids` - (Optional) List of VPC security groups to associate
|
||||
with the Cluster
|
||||
|
@ -70,7 +75,8 @@ The following attributes are exported:
|
|||
* `allocated_storage` - The amount of allocated storage
|
||||
* `availability_zones` - The availability zone of the instance
|
||||
* `backup_retention_period` - The backup retention period
|
||||
* `backup_window` - The backup window
|
||||
* `preferred_backup_window` - The backup window
|
||||
* `preferred_maintenance_window` - The maintenance window
|
||||
* `endpoint` - The primary, writeable connection endpoint
|
||||
* `engine` - The database engine
|
||||
* `engine_version` - The database engine version
|
||||
|
@ -80,6 +86,7 @@ The following attributes are exported:
|
|||
* `status` - The RDS instance status
|
||||
* `username` - The master username for the database
|
||||
* `storage_encrypted` - Specifies whether the DB instance is encrypted
|
||||
* `preferred_backup_window` - The daily time range during which the backups happen
|
||||
|
||||
[1]: http://docs.aws.amazon.com/AmazonRDS/latest/UserGuide/Overview.Replication.html
|
||||
|
||||
|
|
|
@ -1,27 +1,28 @@
|
|||
---
|
||||
layout: "vsphere"
|
||||
page_title: "Provider: vSphere"
|
||||
page_title: "Provider: VMware vSphere"
|
||||
sidebar_current: "docs-vsphere-index"
|
||||
description: |-
|
||||
The vSphere provider is used to interact with the resources supported by
|
||||
vSphere. The provider needs to be configured with the proper credentials before
|
||||
it can be used.
|
||||
The VMware vSphere provider is used to interact with the resources supported by
|
||||
VMware vSphere. The provider needs to be configured with the proper credentials
|
||||
before it can be used.
|
||||
---
|
||||
|
||||
# vSphere Provider
|
||||
# VMware vSphere Provider
|
||||
|
||||
The vSphere provider is used to interact with the resources supported by vSphere.
|
||||
The VMware vSphere provider is used to interact with the resources supported by
|
||||
VMware vSphere.
|
||||
The provider needs to be configured with the proper credentials before it can be used.
|
||||
|
||||
Use the navigation to the left to read about the available resources.
|
||||
|
||||
~> **NOTE:** The vSphere Provider currently represents _initial support_ and
|
||||
therefore may undergo significant changes as the community improves it.
|
||||
~> **NOTE:** The VMware vSphere Provider currently represents _initial support_
|
||||
and therefore may undergo significant changes as the community improves it.
|
||||
|
||||
## Example Usage
|
||||
|
||||
```
|
||||
# Configure the vSphere Provider
|
||||
# Configure the VMware vSphere Provider
|
||||
provider "vsphere" {
|
||||
user = "${var.vsphere_user}"
|
||||
password = "${var.vsphere_password}"
|
||||
|
@ -47,7 +48,7 @@ resource "vsphere_virtual_machine" "web" {
|
|||
|
||||
## Argument Reference
|
||||
|
||||
The following arguments are used to configure the vSphere Provider:
|
||||
The following arguments are used to configure the VMware vSphere Provider:
|
||||
|
||||
* `user` - (Required) This is the username for vSphere API operations. Can also
|
||||
be specified with the `VSPHERE_USER` environment variable.
|
||||
|
@ -59,20 +60,24 @@ The following arguments are used to configure the vSphere Provider:
|
|||
|
||||
## Acceptance Tests
|
||||
|
||||
The vSphere provider's acceptance tests require the above provider
|
||||
The VMware vSphere provider's acceptance tests require the above provider
|
||||
configuration fields to be set using the documented environment variables.
|
||||
|
||||
In addition, the following environment variables are used in tests, and must be set to valid values for your vSphere environment:
|
||||
In addition, the following environment variables are used in tests, and must be set to valid values for your VMware vSphere environment:
|
||||
|
||||
* VSPHERE\_CLUSTER
|
||||
* VSPHERE\_DATACENTER
|
||||
* VSPHERE\_DATASTORE
|
||||
* VSPHERE\_NETWORK\_GATEWAY
|
||||
* VSPHERE\_NETWORK\_IP\_ADDRESS
|
||||
* VSPHERE\_NETWORK\_LABEL
|
||||
* VSPHERE\_NETWORK\_LABEL\_DHCP
|
||||
* VSPHERE\_TEMPLATE
|
||||
* VSPHERE\_VM\_PASSWORD
|
||||
|
||||
The following environment variables depend on your vSphere environment:
|
||||
|
||||
* VSPHERE\_DATACENTER
|
||||
* VSPHERE\_CLUSTER
|
||||
* VSPHERE\_RESOURCE\_POOL
|
||||
* VSPHERE\_DATASTORE
|
||||
|
||||
|
||||
These are used to set and verify attributes on the `vsphere_virtual_machine`
|
||||
resource in tests.
|
||||
|
|
|
@ -1,14 +1,14 @@
|
|||
---
|
||||
layout: "vsphere"
|
||||
page_title: "vSphere: vsphere_virtual_machine"
|
||||
page_title: "VMware vSphere: vsphere_virtual_machine"
|
||||
sidebar_current: "docs-vsphere-resource-virtual-machine"
|
||||
description: |-
|
||||
Provides a vSphere virtual machine resource. This can be used to create, modify, and delete virtual machines.
|
||||
Provides a VMware vSphere virtual machine resource. This can be used to create, modify, and delete virtual machines.
|
||||
---
|
||||
|
||||
# vsphere\_virtual\_machine
|
||||
|
||||
Provides a vSphere virtual machine resource. This can be used to create,
|
||||
Provides a VMware vSphere virtual machine resource. This can be used to create,
|
||||
modify, and delete virtual machines.
|
||||
|
||||
## Example Usage
|
||||
|
|
|
@ -197,10 +197,9 @@
|
|||
<a href="/docs/providers/vcd/index.html">vCloud Director</a>
|
||||
</li>
|
||||
|
||||
<li<%= sidebar_current("docs-providers-vsphere") %>>
|
||||
<a href="/docs/providers/vsphere/index.html">vSphere</a>
|
||||
</li>
|
||||
|
||||
<li<%= sidebar_current("docs-providers-vsphere") %>>
|
||||
<a href="/docs/providers/vsphere/index.html">VMware vSphere</a>
|
||||
</li>
|
||||
</ul>
|
||||
</li>
|
||||
|
||||
|
|
|
@ -7,7 +7,7 @@
|
|||
</li>
|
||||
|
||||
<li<%= sidebar_current("docs-vsphere-index") %>>
|
||||
<a href="/docs/providers/vsphere/index.html">vSphere Provider</a>
|
||||
<a href="/docs/providers/vsphere/index.html">VMware vSphere Provider</a>
|
||||
</li>
|
||||
|
||||
<li<%= sidebar_current(/^docs-vsphere-resource/) %>>
|
||||
|
|
Loading…
Reference in New Issue