provider/aws: Fix misspelled words

This commit is contained in:
Radek Simko 2016-09-12 07:14:24 +01:00
parent 068585bdba
commit 2ad84a51df
No known key found for this signature in database
GPG Key ID: 6823F3DCCE01BB19
17 changed files with 27 additions and 27 deletions

View File

@ -540,7 +540,7 @@ func TestAWSGetCredentials_shouldBeENV(t *testing.T) {
} }
} }
// unsetEnv unsets enviornment variables for testing a "clean slate" with no // unsetEnv unsets environment variables for testing a "clean slate" with no
// credentials in the environment // credentials in the environment
func unsetEnv(t *testing.T) func() { func unsetEnv(t *testing.T) func() {
// Grab any existing AWS keys and preserve. In some tests we'll unset these, so // Grab any existing AWS keys and preserve. In some tests we'll unset these, so

View File

@ -404,10 +404,10 @@ func init() {
"assume_role_role_arn": "The ARN of an IAM role to assume prior to making API calls.", "assume_role_role_arn": "The ARN of an IAM role to assume prior to making API calls.",
"assume_role_session_name": "The session name to use when assuming the role. If ommitted," + "assume_role_session_name": "The session name to use when assuming the role. If omitted," +
" no session name is passed to the AssumeRole call.", " no session name is passed to the AssumeRole call.",
"assume_role_external_id": "The external ID to use when assuming the role. If ommitted," + "assume_role_external_id": "The external ID to use when assuming the role. If omitted," +
" no external ID is passed to the AssumeRole call.", " no external ID is passed to the AssumeRole call.",
} }
} }

View File

@ -503,7 +503,7 @@ func resourceAwsAutoscalingGroupUpdate(d *schema.ResourceData, meta interface{})
if v, ok := d.GetOk("termination_policies"); ok && len(v.([]interface{})) > 0 { if v, ok := d.GetOk("termination_policies"); ok && len(v.([]interface{})) > 0 {
opts.TerminationPolicies = expandStringList(v.([]interface{})) opts.TerminationPolicies = expandStringList(v.([]interface{}))
} else { } else {
log.Printf("[DEBUG] Explictly setting null termination policy to 'Default'") log.Printf("[DEBUG] Explicitly setting null termination policy to 'Default'")
opts.TerminationPolicies = aws.StringSlice([]string{"Default"}) opts.TerminationPolicies = aws.StringSlice([]string{"Default"})
} }
} }

View File

@ -544,7 +544,7 @@ func resourceAwsCloudFrontDistributionDelete(d *schema.ResourceData, meta interf
// skip delete if retain_on_delete is enabled // skip delete if retain_on_delete is enabled
if d.Get("retain_on_delete").(bool) { if d.Get("retain_on_delete").(bool) {
log.Printf("[WARN] Removing Distribtuion ID %s with retain_on_delete set. Please delete this distribution manually.", d.Id()) log.Printf("[WARN] Removing Distributions ID %s with retain_on_delete set. Please delete this distribution manually.", d.Id())
d.SetId("") d.SetId("")
return nil return nil
} }

View File

@ -87,10 +87,10 @@ func TestAccAWSEFSMountTarget_disappears(t *testing.T) {
} }
func TestResourceAWSEFSMountTarget_mountTargetDnsName(t *testing.T) { func TestResourceAWSEFSMountTarget_mountTargetDnsName(t *testing.T) {
actual := resourceAwsEfsMountTargetDnsName("non-existant-1c", actual := resourceAwsEfsMountTargetDnsName("non-existent-1c",
"fs-123456ab", "non-existant-1") "fs-123456ab", "non-existent-1")
expected := "non-existant-1c.fs-123456ab.efs.non-existant-1.amazonaws.com" expected := "non-existent-1c.fs-123456ab.efs.non-existent-1.amazonaws.com"
if actual != expected { if actual != expected {
t.Fatalf("Expected EFS mount target DNS name to be %s, got %s", t.Fatalf("Expected EFS mount target DNS name to be %s, got %s",
expected, actual) expected, actual)

View File

@ -63,7 +63,7 @@ func TestAccAWSIAMServerCertificate_disappears(t *testing.T) {
}) })
if err != nil { if err != nil {
return fmt.Errorf("Error destorying cert in test: %s", err) return fmt.Errorf("Error destroying cert in test: %s", err)
} }
return nil return nil

View File

@ -30,7 +30,7 @@ func resourceAwsIamUser() *schema.Resource {
The UniqueID could be used as the Id(), but none of the API The UniqueID could be used as the Id(), but none of the API
calls allow specifying a user by the UniqueID: they require the calls allow specifying a user by the UniqueID: they require the
name. The only way to locate a user by UniqueID is to list them name. The only way to locate a user by UniqueID is to list them
all and that would make this provider unnecessarilly complex all and that would make this provider unnecessarily complex
and inefficient. Still, there are other reasons one might want and inefficient. Still, there are other reasons one might want
the UniqueID, so we can make it available. the UniqueID, so we can make it available.
*/ */

View File

@ -144,7 +144,7 @@ func resourceAwsLambdaPermissionCreate(d *schema.ResourceData, meta interface{})
*input.FunctionName, err)) *input.FunctionName, err))
} }
log.Printf("[ERROR] An actual error occured when expecting Lambda policy to be there: %s", err) log.Printf("[ERROR] An actual error occurred when expecting Lambda policy to be there: %s", err)
return resource.NonRetryableError(err) return resource.NonRetryableError(err)
} }
return nil return nil

View File

@ -195,7 +195,7 @@ func resourceAwsRDSClusterInstanceRead(d *schema.ResourceData, meta interface{})
return nil return nil
} }
// Retreive DB Cluster information, to determine if this Instance is a writer // Retrieve DB Cluster information, to determine if this Instance is a writer
conn := meta.(*AWSClient).rdsconn conn := meta.(*AWSClient).rdsconn
resp, err := conn.DescribeDBClusters(&rds.DescribeDBClustersInput{ resp, err := conn.DescribeDBClusters(&rds.DescribeDBClustersInput{
DBClusterIdentifier: db.DBClusterIdentifier, DBClusterIdentifier: db.DBClusterIdentifier,

View File

@ -347,7 +347,7 @@ func expandRedshiftSGAuthorizeIngress(configured []interface{}) ([]redshift.Auth
var ingress []redshift.AuthorizeClusterSecurityGroupIngressInput var ingress []redshift.AuthorizeClusterSecurityGroupIngressInput
// Loop over our configured parameters and create // Loop over our configured parameters and create
// an array of aws-sdk-go compatabile objects // an array of aws-sdk-go compatible objects
for _, pRaw := range configured { for _, pRaw := range configured {
data := pRaw.(map[string]interface{}) data := pRaw.(map[string]interface{})
@ -375,7 +375,7 @@ func expandRedshiftSGRevokeIngress(configured []interface{}) ([]redshift.RevokeC
var ingress []redshift.RevokeClusterSecurityGroupIngressInput var ingress []redshift.RevokeClusterSecurityGroupIngressInput
// Loop over our configured parameters and create // Loop over our configured parameters and create
// an array of aws-sdk-go compatabile objects // an array of aws-sdk-go compatible objects
for _, pRaw := range configured { for _, pRaw := range configured {
data := pRaw.(map[string]interface{}) data := pRaw.(map[string]interface{})

View File

@ -589,7 +589,7 @@ func resourceAwsS3BucketRead(d *schema.ResourceData, meta interface{}) error {
if awsErr, ok := err.(awserr.Error); ok && awsErr.Code() != "UnsupportedArgument" { if awsErr, ok := err.(awserr.Error); ok && awsErr.Code() != "UnsupportedArgument" {
return err return err
} }
log.Printf("[WARN] S3 bucket: %s, the S3 Transfer Accelaration is not supported in the region: %s", d.Id(), meta.(*AWSClient).region) log.Printf("[WARN] S3 bucket: %s, the S3 Transfer Acceleration is not supported in the region: %s", d.Id(), meta.(*AWSClient).region)
} else { } else {
log.Printf("[DEBUG] S3 bucket: %s, read Acceleration: %v", d.Id(), accelerate) log.Printf("[DEBUG] S3 bucket: %s, read Acceleration: %v", d.Id(), accelerate)
d.Set("acceleration_status", accelerate.Status) d.Set("acceleration_status", accelerate.Status)

View File

@ -761,7 +761,7 @@ func matchRules(rType string, local []interface{}, remote []map[string]interface
if rcRaw != nil { if rcRaw != nil {
remoteCidrs = rcRaw.([]string) remoteCidrs = rcRaw.([]string)
} }
// convert remote cidrs to a set, for easy comparisions // convert remote cidrs to a set, for easy comparisons
var list []interface{} var list []interface{}
for _, s := range remoteCidrs { for _, s := range remoteCidrs {
list = append(list, s) list = append(list, s)

View File

@ -168,7 +168,7 @@ information and instructions for recovery. Error message: %s`, sg_id, awsErr.Mes
sg, err := findResourceSecurityGroup(conn, sg_id) sg, err := findResourceSecurityGroup(conn, sg_id)
if err != nil { if err != nil {
log.Printf("[DEBUG] Error finding Secuirty Group (%s) for Rule (%s): %s", sg_id, id, err) log.Printf("[DEBUG] Error finding Security Group (%s) for Rule (%s): %s", sg_id, id, err)
return resource.NonRetryableError(err) return resource.NonRetryableError(err)
} }

View File

@ -313,7 +313,7 @@ func TestRulesMixedMatching(t *testing.T) {
}, },
}, },
}, },
// a local rule with 2 cidrs, remote has 4 cidrs, shoudl be saved to match // a local rule with 2 cidrs, remote has 4 cidrs, should be saved to match
// the local but also an extra rule found // the local but also an extra rule found
{ {
local: []interface{}{ local: []interface{}{

View File

@ -35,7 +35,7 @@ func expandListeners(configured []interface{}) ([]*elb.Listener, error) {
listeners := make([]*elb.Listener, 0, len(configured)) listeners := make([]*elb.Listener, 0, len(configured))
// Loop over our configured listeners and create // Loop over our configured listeners and create
// an array of aws-sdk-go compatabile objects // an array of aws-sdk-go compatible objects
for _, lRaw := range configured { for _, lRaw := range configured {
data := lRaw.(map[string]interface{}) data := lRaw.(map[string]interface{})
@ -234,7 +234,7 @@ func expandParameters(configured []interface{}) ([]*rds.Parameter, error) {
var parameters []*rds.Parameter var parameters []*rds.Parameter
// Loop over our configured parameters and create // Loop over our configured parameters and create
// an array of aws-sdk-go compatabile objects // an array of aws-sdk-go compatible objects
for _, pRaw := range configured { for _, pRaw := range configured {
data := pRaw.(map[string]interface{}) data := pRaw.(map[string]interface{})
@ -258,7 +258,7 @@ func expandRedshiftParameters(configured []interface{}) ([]*redshift.Parameter,
var parameters []*redshift.Parameter var parameters []*redshift.Parameter
// Loop over our configured parameters and create // Loop over our configured parameters and create
// an array of aws-sdk-go compatabile objects // an array of aws-sdk-go compatible objects
for _, pRaw := range configured { for _, pRaw := range configured {
data := pRaw.(map[string]interface{}) data := pRaw.(map[string]interface{})
@ -341,7 +341,7 @@ func expandElastiCacheParameters(configured []interface{}) ([]*elasticache.Param
parameters := make([]*elasticache.ParameterNameValue, 0, len(configured)) parameters := make([]*elasticache.ParameterNameValue, 0, len(configured))
// Loop over our configured parameters and create // Loop over our configured parameters and create
// an array of aws-sdk-go compatabile objects // an array of aws-sdk-go compatible objects
for _, pRaw := range configured { for _, pRaw := range configured {
data := pRaw.(map[string]interface{}) data := pRaw.(map[string]interface{})
@ -1356,7 +1356,7 @@ func flattenBeanstalkTrigger(list []*elasticbeanstalk.Trigger) []string {
} }
// There are several parts of the AWS API that will sort lists of strings, // There are several parts of the AWS API that will sort lists of strings,
// causing diffs inbetweeen resources that use lists. This avoids a bit of // causing diffs inbetween resources that use lists. This avoids a bit of
// code duplication for pre-sorts that can be used for things like hash // code duplication for pre-sorts that can be used for things like hash
// functions, etc. // functions, etc.
func sortInterfaceSlice(in []interface{}) []interface{} { func sortInterfaceSlice(in []interface{}) []interface{} {

View File

@ -899,7 +899,7 @@ func TestFlattenSecurityGroups(t *testing.T) {
}, },
// include the owner id, but from a different account. This is reflects // include the owner id, but from a different account. This is reflects
// EC2 Classic when refering to groups by name // EC2 Classic when referring to groups by name
{ {
ownerId: aws.String("user1234"), ownerId: aws.String("user1234"),
pairs: []*ec2.UserIdGroupPair{ pairs: []*ec2.UserIdGroupPair{
@ -918,7 +918,7 @@ func TestFlattenSecurityGroups(t *testing.T) {
}, },
// include the owner id, but from a different account. This reflects in // include the owner id, but from a different account. This reflects in
// EC2 VPC when refering to groups by id // EC2 VPC when referring to groups by id
{ {
ownerId: aws.String("user1234"), ownerId: aws.String("user1234"),
pairs: []*ec2.UserIdGroupPair{ pairs: []*ec2.UserIdGroupPair{

View File

@ -88,7 +88,7 @@ func TestValidateLambdaFunctionName(t *testing.T) {
invalidNames := []string{ invalidNames := []string{
"/FunctionNameWithSlash", "/FunctionNameWithSlash",
"function.name.with.dots", "function.name.with.dots",
// lenght > 140 // length > 140
"arn:aws:lambda:us-west-2:123456789012:function:TooLoooooo" + "arn:aws:lambda:us-west-2:123456789012:function:TooLoooooo" +
"ooooooooooooooooooooooooooooooooooooooooooooooooooooooo" + "ooooooooooooooooooooooooooooooooooooooooooooooooooooooo" +
"ooooooooooooooooongFunctionName", "ooooooooooooooooongFunctionName",
@ -119,7 +119,7 @@ func TestValidateLambdaQualifier(t *testing.T) {
invalidNames := []string{ invalidNames := []string{
// No ARNs allowed // No ARNs allowed
"arn:aws:lambda:us-west-2:123456789012:function:prod", "arn:aws:lambda:us-west-2:123456789012:function:prod",
// lenght > 128 // length > 128
"TooLooooooooooooooooooooooooooooooooooooooooooooooooooo" + "TooLooooooooooooooooooooooooooooooooooooooooooooooooooo" +
"ooooooooooooooooooooooooooooooooooooooooooooooooooo" + "ooooooooooooooooooooooooooooooooooooooooooooooooooo" +
"oooooooooooongQualifier", "oooooooooooongQualifier",