remove various typos
This commit is contained in:
parent
b91df72371
commit
198e1a5186
|
@ -532,7 +532,7 @@ func resourceAwsDbInstanceRead(d *schema.ResourceData, meta interface{}) error {
|
||||||
})
|
})
|
||||||
|
|
||||||
if err != nil {
|
if err != nil {
|
||||||
log.Printf("[DEBUG] Error retreiving tags for ARN: %s", arn)
|
log.Printf("[DEBUG] Error retrieving tags for ARN: %s", arn)
|
||||||
}
|
}
|
||||||
|
|
||||||
var dt []*rds.Tag
|
var dt []*rds.Tag
|
||||||
|
@ -716,7 +716,7 @@ func resourceAwsDbInstanceUpdate(d *schema.ResourceData, meta interface{}) error
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
// seperate request to promote a database
|
// separate request to promote a database
|
||||||
if d.HasChange("replicate_source_db") {
|
if d.HasChange("replicate_source_db") {
|
||||||
if d.Get("replicate_source_db").(string) == "" {
|
if d.Get("replicate_source_db").(string) == "" {
|
||||||
// promote
|
// promote
|
||||||
|
|
|
@ -253,7 +253,7 @@ func resourceAwsElasticacheClusterRead(d *schema.ResourceData, meta interface{})
|
||||||
})
|
})
|
||||||
|
|
||||||
if err != nil {
|
if err != nil {
|
||||||
log.Printf("[DEBUG] Error retreiving tags for ARN: %s", arn)
|
log.Printf("[DEBUG] Error retrieving tags for ARN: %s", arn)
|
||||||
}
|
}
|
||||||
|
|
||||||
var et []*elasticache.Tag
|
var et []*elasticache.Tag
|
||||||
|
|
|
@ -30,7 +30,7 @@ func resourceAwsIamUser() *schema.Resource {
|
||||||
name. The only way to locate a user by UniqueID is to list them
|
name. The only way to locate a user by UniqueID is to list them
|
||||||
all and that would make this provider unnecessarilly complex
|
all and that would make this provider unnecessarilly complex
|
||||||
and inefficient. Still, there are other reasons one might want
|
and inefficient. Still, there are other reasons one might want
|
||||||
the UniqueID, so we can make it availible.
|
the UniqueID, so we can make it available.
|
||||||
*/
|
*/
|
||||||
"unique_id": &schema.Schema{
|
"unique_id": &schema.Schema{
|
||||||
Type: schema.TypeString,
|
Type: schema.TypeString,
|
||||||
|
|
|
@ -295,7 +295,7 @@ func testIngressRuleLength(networkAcl *ec2.NetworkAcl, length int) resource.Test
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
// There is always a default rule (ALL Traffic ... DENY)
|
// There is always a default rule (ALL Traffic ... DENY)
|
||||||
// so we have to increase the lenght by 1
|
// so we have to increase the length by 1
|
||||||
if len(ingressEntries) != length+1 {
|
if len(ingressEntries) != length+1 {
|
||||||
return fmt.Errorf("Invalid number of ingress entries found; count = %d", len(ingressEntries))
|
return fmt.Errorf("Invalid number of ingress entries found; count = %d", len(ingressEntries))
|
||||||
}
|
}
|
||||||
|
|
|
@ -96,7 +96,7 @@ func resourceAwsNetworkInterfaceCreate(d *schema.ResourceData, meta interface{})
|
||||||
|
|
||||||
private_ips := d.Get("private_ips").(*schema.Set).List()
|
private_ips := d.Get("private_ips").(*schema.Set).List()
|
||||||
if len(private_ips) != 0 {
|
if len(private_ips) != 0 {
|
||||||
request.PrivateIpAddresses = expandPrivateIPAddesses(private_ips)
|
request.PrivateIpAddresses = expandPrivateIPAddresses(private_ips)
|
||||||
}
|
}
|
||||||
|
|
||||||
log.Printf("[DEBUG] Creating network interface")
|
log.Printf("[DEBUG] Creating network interface")
|
||||||
|
@ -133,7 +133,7 @@ func resourceAwsNetworkInterfaceRead(d *schema.ResourceData, meta interface{}) e
|
||||||
|
|
||||||
eni := describeResp.NetworkInterfaces[0]
|
eni := describeResp.NetworkInterfaces[0]
|
||||||
d.Set("subnet_id", eni.SubnetId)
|
d.Set("subnet_id", eni.SubnetId)
|
||||||
d.Set("private_ips", flattenNetworkInterfacesPrivateIPAddesses(eni.PrivateIpAddresses))
|
d.Set("private_ips", flattenNetworkInterfacesPrivateIPAddresses(eni.PrivateIpAddresses))
|
||||||
d.Set("security_groups", flattenGroupIdentifiers(eni.Groups))
|
d.Set("security_groups", flattenGroupIdentifiers(eni.Groups))
|
||||||
d.Set("source_dest_check", eni.SourceDestCheck)
|
d.Set("source_dest_check", eni.SourceDestCheck)
|
||||||
|
|
||||||
|
|
|
@ -378,7 +378,7 @@ func expandStringList(configured []interface{}) []*string {
|
||||||
}
|
}
|
||||||
|
|
||||||
//Flattens an array of private ip addresses into a []string, where the elements returned are the IP strings e.g. "192.168.0.0"
|
//Flattens an array of private ip addresses into a []string, where the elements returned are the IP strings e.g. "192.168.0.0"
|
||||||
func flattenNetworkInterfacesPrivateIPAddesses(dtos []*ec2.NetworkInterfacePrivateIpAddress) []string {
|
func flattenNetworkInterfacesPrivateIPAddresses(dtos []*ec2.NetworkInterfacePrivateIpAddress) []string {
|
||||||
ips := make([]string, 0, len(dtos))
|
ips := make([]string, 0, len(dtos))
|
||||||
for _, v := range dtos {
|
for _, v := range dtos {
|
||||||
ip := *v.PrivateIpAddress
|
ip := *v.PrivateIpAddress
|
||||||
|
@ -398,7 +398,7 @@ func flattenGroupIdentifiers(dtos []*ec2.GroupIdentifier) []string {
|
||||||
}
|
}
|
||||||
|
|
||||||
//Expands an array of IPs into a ec2 Private IP Address Spec
|
//Expands an array of IPs into a ec2 Private IP Address Spec
|
||||||
func expandPrivateIPAddesses(ips []interface{}) []*ec2.PrivateIpAddressSpecification {
|
func expandPrivateIPAddresses(ips []interface{}) []*ec2.PrivateIpAddressSpecification {
|
||||||
dtos := make([]*ec2.PrivateIpAddressSpecification, 0, len(ips))
|
dtos := make([]*ec2.PrivateIpAddressSpecification, 0, len(ips))
|
||||||
for i, v := range ips {
|
for i, v := range ips {
|
||||||
new_private_ip := &ec2.PrivateIpAddressSpecification{
|
new_private_ip := &ec2.PrivateIpAddressSpecification{
|
||||||
|
|
|
@ -189,7 +189,7 @@ func TestExpandIPPerms_NegOneProtocol(t *testing.T) {
|
||||||
}
|
}
|
||||||
|
|
||||||
// Now test the error case. This *should* error when either from_port
|
// Now test the error case. This *should* error when either from_port
|
||||||
// or to_port is not zero, but protocal is "-1".
|
// or to_port is not zero, but protocol is "-1".
|
||||||
errorCase := []interface{}{
|
errorCase := []interface{}{
|
||||||
map[string]interface{}{
|
map[string]interface{}{
|
||||||
"protocol": "-1",
|
"protocol": "-1",
|
||||||
|
@ -497,13 +497,13 @@ func TestexpandInstanceString(t *testing.T) {
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
func TestflattenNetworkInterfacesPrivateIPAddesses(t *testing.T) {
|
func TestflattenNetworkInterfacesPrivateIPAddresses(t *testing.T) {
|
||||||
expanded := []*ec2.NetworkInterfacePrivateIpAddress{
|
expanded := []*ec2.NetworkInterfacePrivateIpAddress{
|
||||||
&ec2.NetworkInterfacePrivateIpAddress{PrivateIpAddress: aws.String("192.168.0.1")},
|
&ec2.NetworkInterfacePrivateIpAddress{PrivateIpAddress: aws.String("192.168.0.1")},
|
||||||
&ec2.NetworkInterfacePrivateIpAddress{PrivateIpAddress: aws.String("192.168.0.2")},
|
&ec2.NetworkInterfacePrivateIpAddress{PrivateIpAddress: aws.String("192.168.0.2")},
|
||||||
}
|
}
|
||||||
|
|
||||||
result := flattenNetworkInterfacesPrivateIPAddesses(expanded)
|
result := flattenNetworkInterfacesPrivateIPAddresses(expanded)
|
||||||
|
|
||||||
if result == nil {
|
if result == nil {
|
||||||
t.Fatal("result was nil")
|
t.Fatal("result was nil")
|
||||||
|
@ -543,7 +543,7 @@ func TestflattenGroupIdentifiers(t *testing.T) {
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
func TestexpandPrivateIPAddesses(t *testing.T) {
|
func TestexpandPrivateIPAddresses(t *testing.T) {
|
||||||
|
|
||||||
ip1 := "192.168.0.1"
|
ip1 := "192.168.0.1"
|
||||||
ip2 := "192.168.0.2"
|
ip2 := "192.168.0.2"
|
||||||
|
@ -552,7 +552,7 @@ func TestexpandPrivateIPAddesses(t *testing.T) {
|
||||||
ip2,
|
ip2,
|
||||||
}
|
}
|
||||||
|
|
||||||
result := expandPrivateIPAddesses(flattened)
|
result := expandPrivateIPAddresses(flattened)
|
||||||
|
|
||||||
if len(result) != 2 {
|
if len(result) != 2 {
|
||||||
t.Fatalf("expected result had %d elements, but got %d", 2, len(result))
|
t.Fatalf("expected result had %d elements, but got %d", 2, len(result))
|
||||||
|
|
|
@ -165,7 +165,7 @@ func resourceAzureDnsServerUpdate(d *schema.ResourceData, meta interface{}) erro
|
||||||
}
|
}
|
||||||
|
|
||||||
// resourceAzureDnsServerExists does all the necessary API calls to
|
// resourceAzureDnsServerExists does all the necessary API calls to
|
||||||
// check if the DNS server definition alredy exists on Azure.
|
// check if the DNS server definition already exists on Azure.
|
||||||
func resourceAzureDnsServerExists(d *schema.ResourceData, meta interface{}) (bool, error) {
|
func resourceAzureDnsServerExists(d *schema.ResourceData, meta interface{}) (bool, error) {
|
||||||
azureClient := meta.(*Client)
|
azureClient := meta.(*Client)
|
||||||
vnetClient := azureClient.vnetClient
|
vnetClient := azureClient.vnetClient
|
||||||
|
|
|
@ -52,7 +52,7 @@ func TestAccAzureInstance_separateHostedService(t *testing.T) {
|
||||||
CheckDestroy: testAccCheckAzureInstanceDestroyed(testAccHostedServiceName),
|
CheckDestroy: testAccCheckAzureInstanceDestroyed(testAccHostedServiceName),
|
||||||
Steps: []resource.TestStep{
|
Steps: []resource.TestStep{
|
||||||
resource.TestStep{
|
resource.TestStep{
|
||||||
Config: testAccAzureInstance_seperateHostedService,
|
Config: testAccAzureInstance_separateHostedService,
|
||||||
Check: resource.ComposeTestCheckFunc(
|
Check: resource.ComposeTestCheckFunc(
|
||||||
testAccCheckAzureInstanceExists(
|
testAccCheckAzureInstanceExists(
|
||||||
"azure_instance.foo", testAccHostedServiceName, &dpmt),
|
"azure_instance.foo", testAccHostedServiceName, &dpmt),
|
||||||
|
@ -384,7 +384,7 @@ resource "azure_instance" "foo" {
|
||||||
}
|
}
|
||||||
}`, instanceName, testAccStorageServiceName)
|
}`, instanceName, testAccStorageServiceName)
|
||||||
|
|
||||||
var testAccAzureInstance_seperateHostedService = fmt.Sprintf(`
|
var testAccAzureInstance_separateHostedService = fmt.Sprintf(`
|
||||||
resource "azure_hosted_service" "foo" {
|
resource "azure_hosted_service" "foo" {
|
||||||
name = "%s"
|
name = "%s"
|
||||||
location = "West US"
|
location = "West US"
|
||||||
|
|
|
@ -137,7 +137,7 @@ func resourceAzureSqlDatabaseServiceUpdate(d *schema.ResourceData, meta interfac
|
||||||
sqlClient := azureClient.sqlClient
|
sqlClient := azureClient.sqlClient
|
||||||
serverName := d.Get("database_server_name").(string)
|
serverName := d.Get("database_server_name").(string)
|
||||||
|
|
||||||
// changes to the name must occur seperately from changes to the attributes:
|
// changes to the name must occur separately from changes to the attributes:
|
||||||
if d.HasChange("name") {
|
if d.HasChange("name") {
|
||||||
oldv, newv := d.GetChange("name")
|
oldv, newv := d.GetChange("name")
|
||||||
|
|
||||||
|
@ -188,7 +188,7 @@ func resourceAzureSqlDatabaseServiceUpdate(d *schema.ResourceData, meta interfac
|
||||||
log.Println("[INFO] Issuing Azure Database Service parameter update.")
|
log.Println("[INFO] Issuing Azure Database Service parameter update.")
|
||||||
reqID, err := sqlClient.UpdateDatabase(serverName, name, updateParams)
|
reqID, err := sqlClient.UpdateDatabase(serverName, name, updateParams)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return fmt.Errorf("Failed issuing Azure SQL Service paramater update: %s", err)
|
return fmt.Errorf("Failed issuing Azure SQL Service parameter update: %s", err)
|
||||||
}
|
}
|
||||||
|
|
||||||
log.Println("[INFO] Waiting for Azure SQL Database Service parameter update.")
|
log.Println("[INFO] Waiting for Azure SQL Database Service parameter update.")
|
||||||
|
|
|
@ -125,7 +125,7 @@ func resourceAzureStorageBlobRead(d *schema.ResourceData, meta interface{}) erro
|
||||||
// resourceAzureStorageBlobUpdate does all the necessary API calls to
|
// resourceAzureStorageBlobUpdate does all the necessary API calls to
|
||||||
// update a blob on Azure.
|
// update a blob on Azure.
|
||||||
func resourceAzureStorageBlobUpdate(d *schema.ResourceData, meta interface{}) error {
|
func resourceAzureStorageBlobUpdate(d *schema.ResourceData, meta interface{}) error {
|
||||||
// NOTE: although empty as most paramters have ForceNew set; this is
|
// NOTE: although empty as most parameters have ForceNew set; this is
|
||||||
// still required in case of changes to the storage_service_key
|
// still required in case of changes to the storage_service_key
|
||||||
|
|
||||||
// run the ExistsFunc beforehand to ensure the resource's existence nonetheless:
|
// run the ExistsFunc beforehand to ensure the resource's existence nonetheless:
|
||||||
|
|
|
@ -173,7 +173,7 @@ var CLOUDSTACK_NETWORK_2_OFFERING = os.Getenv("CLOUDSTACK_NETWORK_2_OFFERING")
|
||||||
// An IP address in CLOUDSTACK_NETWORK_2_CIDR
|
// An IP address in CLOUDSTACK_NETWORK_2_CIDR
|
||||||
var CLOUDSTACK_NETWORK_2_IPADDRESS = os.Getenv("CLOUDSTACK_NETWORK_2_IPADDRESS")
|
var CLOUDSTACK_NETWORK_2_IPADDRESS = os.Getenv("CLOUDSTACK_NETWORK_2_IPADDRESS")
|
||||||
|
|
||||||
// A network that already exists and isnt CLOUDSTACK_NETWORK_1
|
// A network that already exists and isn't CLOUDSTACK_NETWORK_1
|
||||||
var CLOUDSTACK_2ND_NIC_NETWORK = os.Getenv("CLOUDSTACK_2ND_NIC_NETWORK")
|
var CLOUDSTACK_2ND_NIC_NETWORK = os.Getenv("CLOUDSTACK_2ND_NIC_NETWORK")
|
||||||
|
|
||||||
// An IP address in CLOUDSTACK_2ND_NIC_NETWORK
|
// An IP address in CLOUDSTACK_2ND_NIC_NETWORK
|
||||||
|
|
|
@ -93,7 +93,7 @@ func resourceCloudStackDiskCreate(d *schema.ResourceData, meta interface{}) erro
|
||||||
p.SetSize(int64(d.Get("size").(int)))
|
p.SetSize(int64(d.Get("size").(int)))
|
||||||
}
|
}
|
||||||
|
|
||||||
// If there is a project supplied, we retreive and set the project id
|
// If there is a project supplied, we retrieve and set the project id
|
||||||
if project, ok := d.GetOk("project"); ok {
|
if project, ok := d.GetOk("project"); ok {
|
||||||
// Retrieve the project UUID
|
// Retrieve the project UUID
|
||||||
projectid, e := retrieveUUID(cs, "project", project.(string))
|
projectid, e := retrieveUUID(cs, "project", project.(string))
|
||||||
|
|
|
@ -362,7 +362,7 @@ func resourceCloudStackEgressFirewallUpdate(d *schema.ResourceData, meta interfa
|
||||||
|
|
||||||
// Then loop through all the currently configured rules and create the new ones
|
// Then loop through all the currently configured rules and create the new ones
|
||||||
for _, rule := range nrs.List() {
|
for _, rule := range nrs.List() {
|
||||||
// When succesfully deleted, re-create it again if it still exists
|
// When successfully deleted, re-create it again if it still exists
|
||||||
err := resourceCloudStackEgressFirewallCreateRule(
|
err := resourceCloudStackEgressFirewallCreateRule(
|
||||||
d, meta, rule.(map[string]interface{}))
|
d, meta, rule.(map[string]interface{}))
|
||||||
|
|
||||||
|
|
|
@ -362,7 +362,7 @@ func resourceCloudStackFirewallUpdate(d *schema.ResourceData, meta interface{})
|
||||||
|
|
||||||
// Then loop through all the currently configured rules and create the new ones
|
// Then loop through all the currently configured rules and create the new ones
|
||||||
for _, rule := range nrs.List() {
|
for _, rule := range nrs.List() {
|
||||||
// When succesfully deleted, re-create it again if it still exists
|
// When successfully deleted, re-create it again if it still exists
|
||||||
err := resourceCloudStackFirewallCreateRule(
|
err := resourceCloudStackFirewallCreateRule(
|
||||||
d, meta, rule.(map[string]interface{}))
|
d, meta, rule.(map[string]interface{}))
|
||||||
|
|
||||||
|
|
|
@ -153,7 +153,7 @@ func resourceCloudStackInstanceCreate(d *schema.ResourceData, meta interface{})
|
||||||
p.SetIpaddress(ipaddres.(string))
|
p.SetIpaddress(ipaddres.(string))
|
||||||
}
|
}
|
||||||
|
|
||||||
// If there is a project supplied, we retreive and set the project id
|
// If there is a project supplied, we retrieve and set the project id
|
||||||
if project, ok := d.GetOk("project"); ok {
|
if project, ok := d.GetOk("project"); ok {
|
||||||
// Retrieve the project UUID
|
// Retrieve the project UUID
|
||||||
projectid, e := retrieveUUID(cs, "project", project.(string))
|
projectid, e := retrieveUUID(cs, "project", project.(string))
|
||||||
|
|
|
@ -74,7 +74,7 @@ func resourceCloudStackIPAddressCreate(d *schema.ResourceData, meta interface{})
|
||||||
p.SetVpcid(vpcid)
|
p.SetVpcid(vpcid)
|
||||||
}
|
}
|
||||||
|
|
||||||
// If there is a project supplied, we retreive and set the project id
|
// If there is a project supplied, we retrieve and set the project id
|
||||||
if project, ok := d.GetOk("project"); ok {
|
if project, ok := d.GetOk("project"); ok {
|
||||||
// Retrieve the project UUID
|
// Retrieve the project UUID
|
||||||
projectid, e := retrieveUUID(cs, "project", project.(string))
|
projectid, e := retrieveUUID(cs, "project", project.(string))
|
||||||
|
|
|
@ -125,7 +125,7 @@ func resourceCloudStackNetworkCreate(d *schema.ResourceData, meta interface{}) e
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
// If there is a project supplied, we retreive and set the project id
|
// If there is a project supplied, we retrieve and set the project id
|
||||||
if project, ok := d.GetOk("project"); ok {
|
if project, ok := d.GetOk("project"); ok {
|
||||||
// Retrieve the project UUID
|
// Retrieve the project UUID
|
||||||
projectid, e := retrieveUUID(cs, "project", project.(string))
|
projectid, e := retrieveUUID(cs, "project", project.(string))
|
||||||
|
|
|
@ -417,7 +417,7 @@ func resourceCloudStackNetworkACLRuleUpdate(d *schema.ResourceData, meta interfa
|
||||||
|
|
||||||
// Then loop through all the currently configured rules and create the new ones
|
// Then loop through all the currently configured rules and create the new ones
|
||||||
for _, rule := range nrs.List() {
|
for _, rule := range nrs.List() {
|
||||||
// When succesfully deleted, re-create it again if it still exists
|
// When successfully deleted, re-create it again if it still exists
|
||||||
err := resourceCloudStackNetworkACLRuleCreateRule(d, meta, rule.(map[string]interface{}))
|
err := resourceCloudStackNetworkACLRuleCreateRule(d, meta, rule.(map[string]interface{}))
|
||||||
|
|
||||||
// We need to update this first to preserve the correct state
|
// We need to update this first to preserve the correct state
|
||||||
|
|
|
@ -81,7 +81,7 @@ func resourceCloudStackVPCCreate(d *schema.ResourceData, meta interface{}) error
|
||||||
// Create a new parameter struct
|
// Create a new parameter struct
|
||||||
p := cs.VPC.NewCreateVPCParams(d.Get("cidr").(string), displaytext.(string), name, vpcofferingid, zoneid)
|
p := cs.VPC.NewCreateVPCParams(d.Get("cidr").(string), displaytext.(string), name, vpcofferingid, zoneid)
|
||||||
|
|
||||||
// If there is a project supplied, we retreive and set the project id
|
// If there is a project supplied, we retrieve and set the project id
|
||||||
if project, ok := d.GetOk("project"); ok {
|
if project, ok := d.GetOk("project"); ok {
|
||||||
// Retrieve the project UUID
|
// Retrieve the project UUID
|
||||||
projectid, e := retrieveUUID(cs, "project", project.(string))
|
projectid, e := retrieveUUID(cs, "project", project.(string))
|
||||||
|
|
|
@ -58,7 +58,7 @@ func resourceDigitalOceanDomainRead(d *schema.ResourceData, meta interface{}) er
|
||||||
domain, err := client.RetrieveDomain(d.Id())
|
domain, err := client.RetrieveDomain(d.Id())
|
||||||
if err != nil {
|
if err != nil {
|
||||||
// If the domain is somehow already destroyed, mark as
|
// If the domain is somehow already destroyed, mark as
|
||||||
// succesfully gone
|
// successfully gone
|
||||||
if strings.Contains(err.Error(), "404 Not Found") {
|
if strings.Contains(err.Error(), "404 Not Found") {
|
||||||
d.SetId("")
|
d.SetId("")
|
||||||
return nil
|
return nil
|
||||||
|
|
|
@ -96,7 +96,7 @@ func resourceDigitalOceanRecordRead(d *schema.ResourceData, meta interface{}) er
|
||||||
rec, err := client.RetrieveRecord(domain, d.Id())
|
rec, err := client.RetrieveRecord(domain, d.Id())
|
||||||
if err != nil {
|
if err != nil {
|
||||||
// If the record is somehow already destroyed, mark as
|
// If the record is somehow already destroyed, mark as
|
||||||
// succesfully gone
|
// successfully gone
|
||||||
if strings.Contains(err.Error(), "404 Not Found") {
|
if strings.Contains(err.Error(), "404 Not Found") {
|
||||||
d.SetId("")
|
d.SetId("")
|
||||||
return nil
|
return nil
|
||||||
|
@ -152,7 +152,7 @@ func resourceDigitalOceanRecordDelete(d *schema.ResourceData, meta interface{})
|
||||||
err := client.DestroyRecord(d.Get("domain").(string), d.Id())
|
err := client.DestroyRecord(d.Get("domain").(string), d.Id())
|
||||||
if err != nil {
|
if err != nil {
|
||||||
// If the record is somehow already destroyed, mark as
|
// If the record is somehow already destroyed, mark as
|
||||||
// succesfully gone
|
// successfully gone
|
||||||
if strings.Contains(err.Error(), "404 Not Found") {
|
if strings.Contains(err.Error(), "404 Not Found") {
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|
|
@ -68,7 +68,7 @@ func resourceDigitalOceanSSHKeyRead(d *schema.ResourceData, meta interface{}) er
|
||||||
key, err := client.RetrieveSSHKey(d.Id())
|
key, err := client.RetrieveSSHKey(d.Id())
|
||||||
if err != nil {
|
if err != nil {
|
||||||
// If the key is somehow already destroyed, mark as
|
// If the key is somehow already destroyed, mark as
|
||||||
// succesfully gone
|
// successfully gone
|
||||||
if strings.Contains(err.Error(), "404 Not Found") {
|
if strings.Contains(err.Error(), "404 Not Found") {
|
||||||
d.SetId("")
|
d.SetId("")
|
||||||
return nil
|
return nil
|
||||||
|
|
|
@ -97,7 +97,7 @@ func resourceComputeProjectMetadataCreate(d *schema.ResourceData, meta interface
|
||||||
|
|
||||||
log.Printf("[DEBUG] SetCommonMetadata: %d (%s)", op.Id, op.SelfLink)
|
log.Printf("[DEBUG] SetCommonMetadata: %d (%s)", op.Id, op.SelfLink)
|
||||||
|
|
||||||
// Optimistic locking requires the fingerprint recieved to match
|
// Optimistic locking requires the fingerprint received to match
|
||||||
// the fingerprint we send the server, if there is a mismatch then we
|
// the fingerprint we send the server, if there is a mismatch then we
|
||||||
// are working on old data, and must retry
|
// are working on old data, and must retry
|
||||||
err = resourceOperationWaitGlobal(config, op, "SetCommonMetadata")
|
err = resourceOperationWaitGlobal(config, op, "SetCommonMetadata")
|
||||||
|
@ -197,7 +197,7 @@ func resourceComputeProjectMetadataUpdate(d *schema.ResourceData, meta interface
|
||||||
|
|
||||||
log.Printf("[DEBUG] SetCommonMetadata: %d (%s)", op.Id, op.SelfLink)
|
log.Printf("[DEBUG] SetCommonMetadata: %d (%s)", op.Id, op.SelfLink)
|
||||||
|
|
||||||
// Optimistic locking requires the fingerprint recieved to match
|
// Optimistic locking requires the fingerprint received to match
|
||||||
// the fingerprint we send the server, if there is a mismatch then we
|
// the fingerprint we send the server, if there is a mismatch then we
|
||||||
// are working on old data, and must retry
|
// are working on old data, and must retry
|
||||||
err = resourceOperationWaitGlobal(config, op, "SetCommonMetadata")
|
err = resourceOperationWaitGlobal(config, op, "SetCommonMetadata")
|
||||||
|
|
|
@ -190,7 +190,7 @@ func resourceFWPolicyV1Delete(d *schema.ResourceData, meta interface{}) error {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
|
|
||||||
// This error usualy means that the policy is attached
|
// This error usually means that the policy is attached
|
||||||
// to a firewall. At this point, the firewall is probably
|
// to a firewall. At this point, the firewall is probably
|
||||||
// being delete. So, we retry a few times.
|
// being delete. So, we retry a few times.
|
||||||
|
|
||||||
|
|
|
@ -500,7 +500,7 @@ func jobToResourceData(job *rundeck.JobDetail, d *schema.ResourceData) error {
|
||||||
"decription": option.Description,
|
"decription": option.Description,
|
||||||
"required": option.IsRequired,
|
"required": option.IsRequired,
|
||||||
"allow_multiple_values": option.AllowsMultipleValues,
|
"allow_multiple_values": option.AllowsMultipleValues,
|
||||||
"multi_value_delimeter": option.MultiValueDelimiter,
|
"multi_value_delimiter": option.MultiValueDelimiter,
|
||||||
"obscure_input": option.ObscureInput,
|
"obscure_input": option.ObscureInput,
|
||||||
"exposed_to_scripts": option.ValueIsExposedToScripts,
|
"exposed_to_scripts": option.ValueIsExposedToScripts,
|
||||||
}
|
}
|
||||||
|
|
|
@ -149,7 +149,7 @@ func (c *ApplyCommand) Run(args []string) int {
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
// Setup the state hook for continous state updates
|
// Setup the state hook for continuous state updates
|
||||||
{
|
{
|
||||||
state, err := c.State()
|
state, err := c.State()
|
||||||
if err != nil {
|
if err != nil {
|
||||||
|
|
|
@ -74,7 +74,7 @@ func TestApply_destroy(t *testing.T) {
|
||||||
}
|
}
|
||||||
|
|
||||||
// Should have a backup file
|
// Should have a backup file
|
||||||
f, err = os.Open(statePath + DefaultBackupExtention)
|
f, err = os.Open(statePath + DefaultBackupExtension)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
t.Fatalf("err: %s", err)
|
t.Fatalf("err: %s", err)
|
||||||
}
|
}
|
||||||
|
@ -188,7 +188,7 @@ func TestApply_destroyTargeted(t *testing.T) {
|
||||||
}
|
}
|
||||||
|
|
||||||
// Should have a backup file
|
// Should have a backup file
|
||||||
f, err = os.Open(statePath + DefaultBackupExtention)
|
f, err = os.Open(statePath + DefaultBackupExtension)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
t.Fatalf("err: %s", err)
|
t.Fatalf("err: %s", err)
|
||||||
}
|
}
|
||||||
|
|
|
@ -599,7 +599,7 @@ func TestApply_refresh(t *testing.T) {
|
||||||
}
|
}
|
||||||
|
|
||||||
// Should have a backup file
|
// Should have a backup file
|
||||||
f, err = os.Open(statePath + DefaultBackupExtention)
|
f, err = os.Open(statePath + DefaultBackupExtension)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
t.Fatalf("err: %s", err)
|
t.Fatalf("err: %s", err)
|
||||||
}
|
}
|
||||||
|
@ -787,7 +787,7 @@ func TestApply_state(t *testing.T) {
|
||||||
}
|
}
|
||||||
|
|
||||||
// Should have a backup file
|
// Should have a backup file
|
||||||
f, err = os.Open(statePath + DefaultBackupExtention)
|
f, err = os.Open(statePath + DefaultBackupExtension)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
t.Fatalf("err: %s", err)
|
t.Fatalf("err: %s", err)
|
||||||
}
|
}
|
||||||
|
@ -1161,7 +1161,7 @@ func TestApply_disableBackup(t *testing.T) {
|
||||||
}
|
}
|
||||||
|
|
||||||
// Ensure there is no backup
|
// Ensure there is no backup
|
||||||
_, err = os.Stat(statePath + DefaultBackupExtention)
|
_, err = os.Stat(statePath + DefaultBackupExtension)
|
||||||
if err == nil || !os.IsNotExist(err) {
|
if err == nil || !os.IsNotExist(err) {
|
||||||
t.Fatalf("backup should not exist")
|
t.Fatalf("backup should not exist")
|
||||||
}
|
}
|
||||||
|
|
|
@ -19,8 +19,8 @@ const DefaultStateFilename = "terraform.tfstate"
|
||||||
// DefaultVarsFilename is the default filename used for vars
|
// DefaultVarsFilename is the default filename used for vars
|
||||||
const DefaultVarsFilename = "terraform.tfvars"
|
const DefaultVarsFilename = "terraform.tfvars"
|
||||||
|
|
||||||
// DefaultBackupExtention is added to the state file to form the path
|
// DefaultBackupExtension is added to the state file to form the path
|
||||||
const DefaultBackupExtention = ".backup"
|
const DefaultBackupExtension = ".backup"
|
||||||
|
|
||||||
// DefaultDataDirectory is the directory where local state is stored
|
// DefaultDataDirectory is the directory where local state is stored
|
||||||
// by default.
|
// by default.
|
||||||
|
|
|
@ -58,7 +58,7 @@ type Meta struct {
|
||||||
// be overriden.
|
// be overriden.
|
||||||
//
|
//
|
||||||
// backupPath is used to backup the state file before writing a modified
|
// backupPath is used to backup the state file before writing a modified
|
||||||
// version. It defaults to stateOutPath + DefaultBackupExtention
|
// version. It defaults to stateOutPath + DefaultBackupExtension
|
||||||
statePath string
|
statePath string
|
||||||
stateOutPath string
|
stateOutPath string
|
||||||
backupPath string
|
backupPath string
|
||||||
|
@ -74,7 +74,7 @@ func (m *Meta) initStatePaths() {
|
||||||
m.stateOutPath = m.statePath
|
m.stateOutPath = m.statePath
|
||||||
}
|
}
|
||||||
if m.backupPath == "" {
|
if m.backupPath == "" {
|
||||||
m.backupPath = m.stateOutPath + DefaultBackupExtention
|
m.backupPath = m.stateOutPath + DefaultBackupExtension
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
|
@ -190,7 +190,7 @@ func TestMeta_initStatePaths(t *testing.T) {
|
||||||
if m.stateOutPath != DefaultStateFilename {
|
if m.stateOutPath != DefaultStateFilename {
|
||||||
t.Fatalf("bad: %#v", m)
|
t.Fatalf("bad: %#v", m)
|
||||||
}
|
}
|
||||||
if m.backupPath != DefaultStateFilename+DefaultBackupExtention {
|
if m.backupPath != DefaultStateFilename+DefaultBackupExtension {
|
||||||
t.Fatalf("bad: %#v", m)
|
t.Fatalf("bad: %#v", m)
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -201,7 +201,7 @@ func TestMeta_initStatePaths(t *testing.T) {
|
||||||
if m.stateOutPath != "foo" {
|
if m.stateOutPath != "foo" {
|
||||||
t.Fatalf("bad: %#v", m)
|
t.Fatalf("bad: %#v", m)
|
||||||
}
|
}
|
||||||
if m.backupPath != "foo"+DefaultBackupExtention {
|
if m.backupPath != "foo"+DefaultBackupExtension {
|
||||||
t.Fatalf("bad: %#v", m)
|
t.Fatalf("bad: %#v", m)
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -212,7 +212,7 @@ func TestMeta_initStatePaths(t *testing.T) {
|
||||||
if m.statePath != DefaultStateFilename {
|
if m.statePath != DefaultStateFilename {
|
||||||
t.Fatalf("bad: %#v", m)
|
t.Fatalf("bad: %#v", m)
|
||||||
}
|
}
|
||||||
if m.backupPath != "foo"+DefaultBackupExtention {
|
if m.backupPath != "foo"+DefaultBackupExtension {
|
||||||
t.Fatalf("bad: %#v", m)
|
t.Fatalf("bad: %#v", m)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
|
@ -88,7 +88,7 @@ func TestPlan_destroy(t *testing.T) {
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
f, err := os.Open(statePath + DefaultBackupExtention)
|
f, err := os.Open(statePath + DefaultBackupExtension)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
t.Fatalf("err: %s", err)
|
t.Fatalf("err: %s", err)
|
||||||
}
|
}
|
||||||
|
@ -561,7 +561,7 @@ func TestPlan_disableBackup(t *testing.T) {
|
||||||
}
|
}
|
||||||
|
|
||||||
// Ensure there is no backup
|
// Ensure there is no backup
|
||||||
_, err = os.Stat(statePath + DefaultBackupExtention)
|
_, err = os.Stat(statePath + DefaultBackupExtension)
|
||||||
if err == nil || !os.IsNotExist(err) {
|
if err == nil || !os.IsNotExist(err) {
|
||||||
t.Fatalf("backup should not exist")
|
t.Fatalf("backup should not exist")
|
||||||
}
|
}
|
||||||
|
|
|
@ -261,7 +261,7 @@ Options:
|
||||||
automatically loaded if this flag is not specified.
|
automatically loaded if this flag is not specified.
|
||||||
|
|
||||||
-vcs=true If true (default), push will upload only files
|
-vcs=true If true (default), push will upload only files
|
||||||
comitted to your VCS, if detected.
|
committed to your VCS, if detected.
|
||||||
|
|
||||||
-no-color If specified, output won't contain any color.
|
-no-color If specified, output won't contain any color.
|
||||||
|
|
||||||
|
|
|
@ -202,7 +202,7 @@ func TestRefresh_defaultState(t *testing.T) {
|
||||||
t.Fatalf("bad: %#v", actual)
|
t.Fatalf("bad: %#v", actual)
|
||||||
}
|
}
|
||||||
|
|
||||||
f, err = os.Open(statePath + DefaultBackupExtention)
|
f, err = os.Open(statePath + DefaultBackupExtension)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
t.Fatalf("err: %s", err)
|
t.Fatalf("err: %s", err)
|
||||||
}
|
}
|
||||||
|
@ -286,7 +286,7 @@ func TestRefresh_outPath(t *testing.T) {
|
||||||
t.Fatalf("bad: %#v", actual)
|
t.Fatalf("bad: %#v", actual)
|
||||||
}
|
}
|
||||||
|
|
||||||
f, err = os.Open(outPath + DefaultBackupExtention)
|
f, err = os.Open(outPath + DefaultBackupExtension)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
t.Fatalf("err: %s", err)
|
t.Fatalf("err: %s", err)
|
||||||
}
|
}
|
||||||
|
@ -575,7 +575,7 @@ func TestRefresh_disableBackup(t *testing.T) {
|
||||||
}
|
}
|
||||||
|
|
||||||
// Ensure there is no backup
|
// Ensure there is no backup
|
||||||
_, err = os.Stat(outPath + DefaultBackupExtention)
|
_, err = os.Stat(outPath + DefaultBackupExtension)
|
||||||
if err == nil || !os.IsNotExist(err) {
|
if err == nil || !os.IsNotExist(err) {
|
||||||
t.Fatalf("backup should not exist")
|
t.Fatalf("backup should not exist")
|
||||||
}
|
}
|
||||||
|
|
|
@ -295,7 +295,7 @@ func (c *RemoteConfigCommand) enableRemoteState() int {
|
||||||
if backupPath != "-" {
|
if backupPath != "-" {
|
||||||
// Provide default backup path if none provided
|
// Provide default backup path if none provided
|
||||||
if backupPath == "" {
|
if backupPath == "" {
|
||||||
backupPath = c.conf.statePath + DefaultBackupExtention
|
backupPath = c.conf.statePath + DefaultBackupExtension
|
||||||
}
|
}
|
||||||
|
|
||||||
log.Printf("[INFO] Writing backup state to: %s", backupPath)
|
log.Printf("[INFO] Writing backup state to: %s", backupPath)
|
||||||
|
|
|
@ -419,7 +419,7 @@ func testRemoteLocal(t *testing.T, exists bool) {
|
||||||
}
|
}
|
||||||
|
|
||||||
func testRemoteLocalBackup(t *testing.T, exists bool) {
|
func testRemoteLocalBackup(t *testing.T, exists bool) {
|
||||||
_, err := os.Stat(DefaultStateFilename + DefaultBackupExtention)
|
_, err := os.Stat(DefaultStateFilename + DefaultBackupExtension)
|
||||||
if os.IsNotExist(err) && !exists {
|
if os.IsNotExist(err) && !exists {
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
|
|
|
@ -150,7 +150,7 @@ func State(opts *StateOpts) (*StateResult, error) {
|
||||||
|
|
||||||
// If we have a result, make sure to back it up
|
// If we have a result, make sure to back it up
|
||||||
if result.State != nil {
|
if result.State != nil {
|
||||||
backupPath := result.StatePath + DefaultBackupExtention
|
backupPath := result.StatePath + DefaultBackupExtension
|
||||||
if opts.BackupPath != "" {
|
if opts.BackupPath != "" {
|
||||||
backupPath = opts.BackupPath
|
backupPath = opts.BackupPath
|
||||||
}
|
}
|
||||||
|
@ -194,7 +194,7 @@ func StateFromPlan(
|
||||||
// If we have a result, make sure to back it up
|
// If we have a result, make sure to back it up
|
||||||
result = &state.BackupState{
|
result = &state.BackupState{
|
||||||
Real: result,
|
Real: result,
|
||||||
Path: resultPath + DefaultBackupExtention,
|
Path: resultPath + DefaultBackupExtension,
|
||||||
}
|
}
|
||||||
|
|
||||||
return result, resultPath, nil
|
return result, resultPath, nil
|
||||||
|
|
|
@ -139,7 +139,7 @@ func (c *Communicator) Connect(o terraform.UIOutput) (err error) {
|
||||||
c.client = ssh.NewClient(sshConn, sshChan, req)
|
c.client = ssh.NewClient(sshConn, sshChan, req)
|
||||||
|
|
||||||
if c.config.sshAgent != nil {
|
if c.config.sshAgent != nil {
|
||||||
log.Printf("[DEBUG] Telling SSH config to foward to agent")
|
log.Printf("[DEBUG] Telling SSH config to forward to agent")
|
||||||
if err := c.config.sshAgent.ForwardToAgent(c.client); err != nil {
|
if err := c.config.sshAgent.ForwardToAgent(c.client); err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
|
|
|
@ -603,7 +603,7 @@ func loadProvisionersHcl(os *hclobj.Object, connInfo map[string]interface{}) ([]
|
||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
|
|
||||||
// Delete the "connection" section, handle seperately
|
// Delete the "connection" section, handle separately
|
||||||
delete(config, "connection")
|
delete(config, "connection")
|
||||||
|
|
||||||
rawConfig, err := NewRawConfig(config)
|
rawConfig, err := NewRawConfig(config)
|
||||||
|
|
|
@ -791,7 +791,7 @@ func (m schemaMap) diffSet(
|
||||||
ns := n.(*Set)
|
ns := n.(*Set)
|
||||||
|
|
||||||
// If the new value was set, compare the listCode's to determine if
|
// If the new value was set, compare the listCode's to determine if
|
||||||
// the two are equal. Comparing listCode's instead of the actuall values
|
// the two are equal. Comparing listCode's instead of the actual values
|
||||||
// is needed because there could be computed values in the set which
|
// is needed because there could be computed values in the set which
|
||||||
// would result in false positives while comparing.
|
// would result in false positives while comparing.
|
||||||
if !all && nSet && reflect.DeepEqual(os.listCode(), ns.listCode()) {
|
if !all && nSet && reflect.DeepEqual(os.listCode(), ns.listCode()) {
|
||||||
|
|
|
@ -2920,7 +2920,7 @@ func TestSchemaMap_InternalValidate(t *testing.T) {
|
||||||
if tc.Err {
|
if tc.Err {
|
||||||
t.Fatalf("%d: Expected error did not occur:\n\n%#v", i, tc.In)
|
t.Fatalf("%d: Expected error did not occur:\n\n%#v", i, tc.In)
|
||||||
}
|
}
|
||||||
t.Fatalf("%d: Unexpected error occured:\n\n%#v", i, tc.In)
|
t.Fatalf("%d: Unexpected error occurred:\n\n%#v", i, tc.In)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
|
@ -41,7 +41,7 @@ func (d *Diff) AddModule(path []string) *ModuleDiff {
|
||||||
}
|
}
|
||||||
|
|
||||||
// ModuleByPath is used to lookup the module diff for the given path.
|
// ModuleByPath is used to lookup the module diff for the given path.
|
||||||
// This should be the prefered lookup mechanism as it allows for future
|
// This should be the preferred lookup mechanism as it allows for future
|
||||||
// lookup optimizations.
|
// lookup optimizations.
|
||||||
func (d *Diff) ModuleByPath(path []string) *ModuleDiff {
|
func (d *Diff) ModuleByPath(path []string) *ModuleDiff {
|
||||||
if d == nil {
|
if d == nil {
|
||||||
|
@ -459,7 +459,7 @@ func (d *InstanceDiff) Same(d2 *InstanceDiff) (bool, string) {
|
||||||
|
|
||||||
// This is a little tricky, but when a diff contains a computed list
|
// This is a little tricky, but when a diff contains a computed list
|
||||||
// or set that can only be interpolated after the apply command has
|
// or set that can only be interpolated after the apply command has
|
||||||
// created the dependant resources, it could turn out that the result
|
// created the dependent resources, it could turn out that the result
|
||||||
// is actually the same as the existing state which would remove the
|
// is actually the same as the existing state which would remove the
|
||||||
// key from the diff.
|
// key from the diff.
|
||||||
if diffOld.NewComputed && strings.HasSuffix(k, ".#") {
|
if diffOld.NewComputed && strings.HasSuffix(k, ".#") {
|
||||||
|
|
|
@ -83,7 +83,7 @@ func (s *State) AddModule(path []string) *ModuleState {
|
||||||
}
|
}
|
||||||
|
|
||||||
// ModuleByPath is used to lookup the module state for the given path.
|
// ModuleByPath is used to lookup the module state for the given path.
|
||||||
// This should be the prefered lookup mechanism as it allows for future
|
// This should be the preferred lookup mechanism as it allows for future
|
||||||
// lookup optimizations.
|
// lookup optimizations.
|
||||||
func (s *State) ModuleByPath(path []string) *ModuleState {
|
func (s *State) ModuleByPath(path []string) *ModuleState {
|
||||||
if s == nil {
|
if s == nil {
|
||||||
|
|
|
@ -95,7 +95,7 @@ func (t *FlattenTransformer) Transform(g *Graph) error {
|
||||||
g.ConnectDependent(sv)
|
g.ConnectDependent(sv)
|
||||||
}
|
}
|
||||||
|
|
||||||
// Re-connect all the things that dependend on the graph
|
// Re-connect all the things that dependent on the graph
|
||||||
// we just flattened. This should connect them back into the
|
// we just flattened. This should connect them back into the
|
||||||
// correct nodes if their DependentOn() is setup correctly.
|
// correct nodes if their DependentOn() is setup correctly.
|
||||||
for _, v := range dependents {
|
for _, v := range dependents {
|
||||||
|
|
|
@ -67,7 +67,7 @@ The command-line flags are all optional. The list of available flags are:
|
||||||
* `-var-file=foo` - Set the value of variables using a variable file.
|
* `-var-file=foo` - Set the value of variables using a variable file.
|
||||||
|
|
||||||
* `-vcs=true` - If true (default), then Terraform will detect if a VCS
|
* `-vcs=true` - If true (default), then Terraform will detect if a VCS
|
||||||
is in use, such as Git, and will only upload files that are comitted to
|
is in use, such as Git, and will only upload files that are committed to
|
||||||
version control. If no version control system is detected, Terraform will
|
version control. If no version control system is detected, Terraform will
|
||||||
upload all files in `path` (parameter to the command).
|
upload all files in `path` (parameter to the command).
|
||||||
|
|
||||||
|
@ -77,7 +77,7 @@ The files that are uploaded and packaged with a `push` are all the
|
||||||
files in the `path` given as the parameter to the command, recursively.
|
files in the `path` given as the parameter to the command, recursively.
|
||||||
By default (unless `-vcs=false` is specified), Terraform will automatically
|
By default (unless `-vcs=false` is specified), Terraform will automatically
|
||||||
detect when a VCS such as Git is being used, and in that case will only
|
detect when a VCS such as Git is being used, and in that case will only
|
||||||
upload the files that are comitted. Because of this built-in intelligence,
|
upload the files that are committed. Because of this built-in intelligence,
|
||||||
you don't have to worry about excluding folders such as ".git" or ".hg" usually.
|
you don't have to worry about excluding folders such as ".git" or ".hg" usually.
|
||||||
|
|
||||||
If Terraform doesn't detect a VCS, it will upload all files.
|
If Terraform doesn't detect a VCS, it will upload all files.
|
||||||
|
|
|
@ -26,7 +26,7 @@ resource "aws_cloudwatch_metric_alarm" "foobar" {
|
||||||
}
|
}
|
||||||
```
|
```
|
||||||
|
|
||||||
## Example in Conjuction with Scaling Policies
|
## Example in Conjunction with Scaling Policies
|
||||||
```
|
```
|
||||||
resource "aws_autoscaling_policy" "bat" {
|
resource "aws_autoscaling_policy" "bat" {
|
||||||
name = "foobar3-terraform-test"
|
name = "foobar3-terraform-test"
|
||||||
|
|
|
@ -115,7 +115,7 @@ The following arguments are supported:
|
||||||
|
|
||||||
* `domain_name` - (Optional) The name of an Active Directory domain to join.
|
* `domain_name` - (Optional) The name of an Active Directory domain to join.
|
||||||
|
|
||||||
* `domain_ou` - (Optional) Specifies the LDAP Organisational Unit to place the
|
* `domain_ou` - (Optional) Specifies the LDAP Organizational Unit to place the
|
||||||
instance in.
|
instance in.
|
||||||
|
|
||||||
* `domain_username` - (Optional) The username of an account with permission to
|
* `domain_username` - (Optional) The username of an account with permission to
|
||||||
|
|
|
@ -71,7 +71,7 @@ resource "google_compute_autoscaler" "foobar" {
|
||||||
}
|
}
|
||||||
```
|
```
|
||||||
|
|
||||||
## Argument Refernce
|
## Argument Reference
|
||||||
|
|
||||||
The following arguments are supported:
|
The following arguments are supported:
|
||||||
|
|
||||||
|
|
|
@ -27,7 +27,7 @@ resource "google_compute_instance_group_manager" "foobar" {
|
||||||
}
|
}
|
||||||
```
|
```
|
||||||
|
|
||||||
## Argument Refernce
|
## Argument Reference
|
||||||
|
|
||||||
The following arguments are supported:
|
The following arguments are supported:
|
||||||
|
|
||||||
|
|
|
@ -113,7 +113,7 @@ The following arguments are supported:
|
||||||
from the set of predefined values. Defaults to `false`, meaning that the user may choose only
|
from the set of predefined values. Defaults to `false`, meaning that the user may choose only
|
||||||
one value.
|
one value.
|
||||||
|
|
||||||
* `multi_value_delimeter`: (Optional) Delimeter used to join together multiple values into a single
|
* `multi_value_delimiter`: (Optional) Delimiter used to join together multiple values into a single
|
||||||
string when `allow_multiple_values` is set and the user chooses multiple values.
|
string when `allow_multiple_values` is set and the user chooses multiple values.
|
||||||
|
|
||||||
* `obscure_input`: (Optional) Boolean controlling whether the value of this option should be obscured
|
* `obscure_input`: (Optional) Boolean controlling whether the value of this option should be obscured
|
||||||
|
|
|
@ -73,7 +73,7 @@ The following arguments are supported:
|
||||||
|
|
||||||
* `os_type (string)` - (Optional) The OS type of the node. Valid options are: `linux` and
|
* `os_type (string)` - (Optional) The OS type of the node. Valid options are: `linux` and
|
||||||
`windows`. If not supplied the connection type will be used to determine the OS type (`ssh`
|
`windows`. If not supplied the connection type will be used to determine the OS type (`ssh`
|
||||||
will asume `linux` and `winrm` will assume `windows`).
|
will assume `linux` and `winrm` will assume `windows`).
|
||||||
|
|
||||||
* `prevent_sudo (boolean)` - (Optional) Prevent the use of sudo while installing, configuring
|
* `prevent_sudo (boolean)` - (Optional) Prevent the use of sudo while installing, configuring
|
||||||
and running the initial Chef Client run. This option is only used with `ssh` type
|
and running the initial Chef Client run. This option is only used with `ssh` type
|
||||||
|
|
Loading…
Reference in New Issue