Merge branch 'master' of github.com:hashicorp/terraform into 2087-consul-service-resource

This commit is contained in:
Max Englander 2015-10-29 05:04:40 -04:00
commit da70103a05
131 changed files with 6190 additions and 2411 deletions

1
.gitignore vendored
View File

@ -1,5 +1,6 @@
*.dll *.dll
*.exe *.exe
.DS_Store
example.tf example.tf
terraform.tfplan terraform.tfplan
terraform.tfstate terraform.tfstate

View File

@ -1,4 +1,63 @@
## 0.6.4 (unreleased) ## 0.6.7 (Unreleased)
FEATURES:
* **New resource: `aws_cloudformation_stack`** [GH-2636]
* **New resource: `aws_cloudtrail`** [GH-3094]
IMPROVEMENTS:
* provider/google: Accurate Terraform Version [GH-3554]
* provider/google: Simplified auth (DefaultClient support) [GH-3553]
* provider/google: automatic_restart, preemptible, on_host_maintenance options [GH-3643]
* null_resource: enhance and document [GH-3244, GH-3659]
* provider/aws: Add CORS settings to S3 bucket [GH-3387]
* provider/aws: Add notification topic ARN for ElastiCache clusters [GH-3674]
BUG FIXES:
* `terraform remote config`: update `--help` output [GH-3632]
* core: modules on Git branches now update properly [GH-1568]
* provider/google: Timeout when deleting large instance_group_manager [GH-3591]
* provider/aws: Fix issue with order of Termincation Policies in AutoScaling Groups.
This will introduce plans on upgrade to this version, in order to correct the ordering [GH-2890]
* provider/aws: Allow cluster name, not only ARN for `aws_ecs_service` [GH-3668]
## 0.6.6 (October 23, 2015)
FEATURES:
* New interpolation functions: `cidrhost`, `cidrnetmask` and `cidrsubnet` [GH-3127]
IMPROVEMENTS:
* "forces new resource" now highlighted in plan output [GH-3136]
BUG FIXES:
* helper/schema: Better error message for assigning list/map to string [GH-3009]
* remote/state/atlas: Additional remote state conflict handling for semantically neutral state changes [GH-3603]
## 0.6.5 (October 21, 2015)
FEATURES:
* **New resources: `aws_codeploy_app` and `aws_codeploy_deployment_group`** [GH-2783]
* New remote state backend: `etcd` [GH-3487]
* New interpolation functions: `upper` and `lower` [GH-3558]
BUG FIXES:
* core: Fix remote state conflicts caused by ambiguity in ordering of deeply nested modules [GH-3573]
* core: Fix remote state conflicts caused by state metadata differences [GH-3569]
* core: Avoid using http.DefaultClient [GH-3532]
INTERNAL IMPROVEMENTS:
* provider/digitalocean: use official Go client [GH-3333]
* core: extract module fetching to external library [GH-3516]
## 0.6.4 (October 15, 2015)
FEATURES: FEATURES:
@ -78,6 +137,7 @@ BUG FIXES:
* provider/aws: Migrate KeyPair to version 1, fixing issue with using `file()` [GH-3470] * provider/aws: Migrate KeyPair to version 1, fixing issue with using `file()` [GH-3470]
* provider/aws: Fix force_delete on autoscaling groups [GH-3485] * provider/aws: Fix force_delete on autoscaling groups [GH-3485]
* provider/aws: Fix crash with VPC Peering connections [GH-3490] * provider/aws: Fix crash with VPC Peering connections [GH-3490]
* provider/aws: fix bug with reading GSIs from dynamodb [GH-3300]
* provider/docker: Fix issue preventing private images from being referenced [GH-2619] * provider/docker: Fix issue preventing private images from being referenced [GH-2619]
* provider/digitalocean: Fix issue causing unnecessary diffs based on droplet slugsize case [GH-3284] * provider/digitalocean: Fix issue causing unnecessary diffs based on droplet slugsize case [GH-3284]
* provider/openstack: add state 'downloading' to list of expected states in * provider/openstack: add state 'downloading' to list of expected states in

8
Vagrantfile vendored
View File

@ -13,6 +13,7 @@ ARCH=`uname -m | sed 's|i686|386|' | sed 's|x86_64|amd64|'`
# Install Prereq Packages # Install Prereq Packages
sudo apt-get update sudo apt-get update
sudo apt-get upgrade -y
sudo apt-get install -y build-essential curl git-core libpcre3-dev mercurial pkg-config zip sudo apt-get install -y build-essential curl git-core libpcre3-dev mercurial pkg-config zip
# Install Go # Install Go
@ -41,7 +42,7 @@ source /etc/profile.d/gopath.sh
SCRIPT SCRIPT
Vagrant.configure(VAGRANTFILE_API_VERSION) do |config| Vagrant.configure(VAGRANTFILE_API_VERSION) do |config|
config.vm.box = "chef/ubuntu-12.04" config.vm.box = "bento/ubuntu-12.04"
config.vm.hostname = "terraform" config.vm.hostname = "terraform"
config.vm.provision "shell", inline: $script, privileged: false config.vm.provision "shell", inline: $script, privileged: false
@ -53,4 +54,9 @@ Vagrant.configure(VAGRANTFILE_API_VERSION) do |config|
v.vmx["numvcpus"] = "2" v.vmx["numvcpus"] = "2"
end end
end end
config.vm.provider "virtualbox" do |v|
v.memory = 4096
v.cpus = 2
end
end end

View File

@ -5,14 +5,18 @@ import (
"log" "log"
"strings" "strings"
"github.com/hashicorp/go-cleanhttp"
"github.com/hashicorp/go-multierror" "github.com/hashicorp/go-multierror"
"github.com/aws/aws-sdk-go/aws" "github.com/aws/aws-sdk-go/aws"
"github.com/aws/aws-sdk-go/aws/awserr" "github.com/aws/aws-sdk-go/aws/awserr"
"github.com/aws/aws-sdk-go/aws/credentials" "github.com/aws/aws-sdk-go/aws/credentials"
"github.com/aws/aws-sdk-go/service/autoscaling" "github.com/aws/aws-sdk-go/service/autoscaling"
"github.com/aws/aws-sdk-go/service/cloudformation"
"github.com/aws/aws-sdk-go/service/cloudtrail"
"github.com/aws/aws-sdk-go/service/cloudwatch" "github.com/aws/aws-sdk-go/service/cloudwatch"
"github.com/aws/aws-sdk-go/service/cloudwatchlogs" "github.com/aws/aws-sdk-go/service/cloudwatchlogs"
"github.com/aws/aws-sdk-go/service/codedeploy"
"github.com/aws/aws-sdk-go/service/directoryservice" "github.com/aws/aws-sdk-go/service/directoryservice"
"github.com/aws/aws-sdk-go/service/dynamodb" "github.com/aws/aws-sdk-go/service/dynamodb"
"github.com/aws/aws-sdk-go/service/ec2" "github.com/aws/aws-sdk-go/service/ec2"
@ -47,6 +51,8 @@ type Config struct {
} }
type AWSClient struct { type AWSClient struct {
cfconn *cloudformation.CloudFormation
cloudtrailconn *cloudtrail.CloudTrail
cloudwatchconn *cloudwatch.CloudWatch cloudwatchconn *cloudwatch.CloudWatch
cloudwatchlogsconn *cloudwatchlogs.CloudWatchLogs cloudwatchlogsconn *cloudwatchlogs.CloudWatchLogs
dsconn *directoryservice.DirectoryService dsconn *directoryservice.DirectoryService
@ -69,6 +75,7 @@ type AWSClient struct {
lambdaconn *lambda.Lambda lambdaconn *lambda.Lambda
opsworksconn *opsworks.OpsWorks opsworksconn *opsworks.OpsWorks
glacierconn *glacier.Glacier glacierconn *glacier.Glacier
codedeployconn *codedeploy.CodeDeploy
} }
// Client configures and returns a fully initialized AWSClient // Client configures and returns a fully initialized AWSClient
@ -98,6 +105,7 @@ func (c *Config) Client() (interface{}, error) {
Credentials: creds, Credentials: creds,
Region: aws.String(c.Region), Region: aws.String(c.Region),
MaxRetries: aws.Int(c.MaxRetries), MaxRetries: aws.Int(c.MaxRetries),
HTTPClient: cleanhttp.DefaultClient(),
} }
log.Println("[INFO] Initializing IAM Connection") log.Println("[INFO] Initializing IAM Connection")
@ -123,6 +131,7 @@ func (c *Config) Client() (interface{}, error) {
Credentials: creds, Credentials: creds,
Region: aws.String("us-east-1"), Region: aws.String("us-east-1"),
MaxRetries: aws.Int(c.MaxRetries), MaxRetries: aws.Int(c.MaxRetries),
HTTPClient: cleanhttp.DefaultClient(),
} }
log.Println("[INFO] Initializing DynamoDB connection") log.Println("[INFO] Initializing DynamoDB connection")
@ -175,9 +184,15 @@ func (c *Config) Client() (interface{}, error) {
log.Println("[INFO] Initializing Lambda Connection") log.Println("[INFO] Initializing Lambda Connection")
client.lambdaconn = lambda.New(awsConfig) client.lambdaconn = lambda.New(awsConfig)
log.Println("[INFO] Initializing Cloudformation Connection")
client.cfconn = cloudformation.New(awsConfig)
log.Println("[INFO] Initializing CloudWatch SDK connection") log.Println("[INFO] Initializing CloudWatch SDK connection")
client.cloudwatchconn = cloudwatch.New(awsConfig) client.cloudwatchconn = cloudwatch.New(awsConfig)
log.Println("[INFO] Initializing CloudTrail connection")
client.cloudtrailconn = cloudtrail.New(awsConfig)
log.Println("[INFO] Initializing CloudWatch Logs connection") log.Println("[INFO] Initializing CloudWatch Logs connection")
client.cloudwatchlogsconn = cloudwatchlogs.New(awsConfig) client.cloudwatchlogsconn = cloudwatchlogs.New(awsConfig)
@ -189,6 +204,9 @@ func (c *Config) Client() (interface{}, error) {
log.Println("[INFO] Initializing Glacier connection") log.Println("[INFO] Initializing Glacier connection")
client.glacierconn = glacier.New(awsConfig) client.glacierconn = glacier.New(awsConfig)
log.Println("[INFO] Initializing CodeDeploy Connection")
client.codedeployconn = codedeploy.New(awsConfig)
} }
if len(errs) > 0 { if len(errs) > 0 {

View File

@ -163,9 +163,13 @@ func Provider() terraform.ResourceProvider {
"aws_autoscaling_group": resourceAwsAutoscalingGroup(), "aws_autoscaling_group": resourceAwsAutoscalingGroup(),
"aws_autoscaling_notification": resourceAwsAutoscalingNotification(), "aws_autoscaling_notification": resourceAwsAutoscalingNotification(),
"aws_autoscaling_policy": resourceAwsAutoscalingPolicy(), "aws_autoscaling_policy": resourceAwsAutoscalingPolicy(),
"aws_cloudformation_stack": resourceAwsCloudFormationStack(),
"aws_cloudtrail": resourceAwsCloudTrail(),
"aws_cloudwatch_log_group": resourceAwsCloudWatchLogGroup(), "aws_cloudwatch_log_group": resourceAwsCloudWatchLogGroup(),
"aws_autoscaling_lifecycle_hook": resourceAwsAutoscalingLifecycleHook(), "aws_autoscaling_lifecycle_hook": resourceAwsAutoscalingLifecycleHook(),
"aws_cloudwatch_metric_alarm": resourceAwsCloudWatchMetricAlarm(), "aws_cloudwatch_metric_alarm": resourceAwsCloudWatchMetricAlarm(),
"aws_codedeploy_app": resourceAwsCodeDeployApp(),
"aws_codedeploy_deployment_group": resourceAwsCodeDeployDeploymentGroup(),
"aws_customer_gateway": resourceAwsCustomerGateway(), "aws_customer_gateway": resourceAwsCustomerGateway(),
"aws_db_instance": resourceAwsDbInstance(), "aws_db_instance": resourceAwsDbInstance(),
"aws_db_parameter_group": resourceAwsDbParameterGroup(), "aws_db_parameter_group": resourceAwsDbParameterGroup(),

View File

@ -111,12 +111,9 @@ func resourceAwsAutoscalingGroup() *schema.Resource {
}, },
"termination_policies": &schema.Schema{ "termination_policies": &schema.Schema{
Type: schema.TypeSet, Type: schema.TypeList,
Optional: true, Optional: true,
Computed: true,
ForceNew: true,
Elem: &schema.Schema{Type: schema.TypeString}, Elem: &schema.Schema{Type: schema.TypeString},
Set: schema.HashString,
}, },
"wait_for_capacity_timeout": &schema.Schema{ "wait_for_capacity_timeout": &schema.Schema{
@ -187,9 +184,8 @@ func resourceAwsAutoscalingGroupCreate(d *schema.ResourceData, meta interface{})
autoScalingGroupOpts.VPCZoneIdentifier = expandVpcZoneIdentifiers(v.(*schema.Set).List()) autoScalingGroupOpts.VPCZoneIdentifier = expandVpcZoneIdentifiers(v.(*schema.Set).List())
} }
if v, ok := d.GetOk("termination_policies"); ok && v.(*schema.Set).Len() > 0 { if v, ok := d.GetOk("termination_policies"); ok && len(v.([]interface{})) > 0 {
autoScalingGroupOpts.TerminationPolicies = expandStringList( autoScalingGroupOpts.TerminationPolicies = expandStringList(v.([]interface{}))
v.(*schema.Set).List())
} }
log.Printf("[DEBUG] AutoScaling Group create configuration: %#v", autoScalingGroupOpts) log.Printf("[DEBUG] AutoScaling Group create configuration: %#v", autoScalingGroupOpts)
@ -280,6 +276,24 @@ func resourceAwsAutoscalingGroupUpdate(d *schema.ResourceData, meta interface{})
} }
} }
if d.HasChange("termination_policies") {
// If the termination policy is set to null, we need to explicitly set
// it back to "Default", or the API won't reset it for us.
// This means GetOk() will fail us on the zero check.
v := d.Get("termination_policies")
if len(v.([]interface{})) > 0 {
opts.TerminationPolicies = expandStringList(v.([]interface{}))
} else {
// Policies is a slice of string pointers, so build one.
// Maybe there's a better idiom for this?
log.Printf("[DEBUG] Explictly setting null termination policy to 'Default'")
pol := "Default"
s := make([]*string, 1, 1)
s[0] = &pol
opts.TerminationPolicies = s
}
}
if err := setAutoscalingTags(conn, d); err != nil { if err := setAutoscalingTags(conn, d); err != nil {
return err return err
} else { } else {

View File

@ -45,7 +45,9 @@ func TestAccAWSAutoScalingGroup_basic(t *testing.T) {
resource.TestCheckResourceAttr( resource.TestCheckResourceAttr(
"aws_autoscaling_group.bar", "force_delete", "true"), "aws_autoscaling_group.bar", "force_delete", "true"),
resource.TestCheckResourceAttr( resource.TestCheckResourceAttr(
"aws_autoscaling_group.bar", "termination_policies.912102603", "OldestInstance"), "aws_autoscaling_group.bar", "termination_policies.0", "OldestInstance"),
resource.TestCheckResourceAttr(
"aws_autoscaling_group.bar", "termination_policies.1", "ClosestToNextInstanceHour"),
), ),
}, },
@ -56,6 +58,8 @@ func TestAccAWSAutoScalingGroup_basic(t *testing.T) {
testAccCheckAWSLaunchConfigurationExists("aws_launch_configuration.new", &lc), testAccCheckAWSLaunchConfigurationExists("aws_launch_configuration.new", &lc),
resource.TestCheckResourceAttr( resource.TestCheckResourceAttr(
"aws_autoscaling_group.bar", "desired_capacity", "5"), "aws_autoscaling_group.bar", "desired_capacity", "5"),
resource.TestCheckResourceAttr(
"aws_autoscaling_group.bar", "termination_policies.0", "ClosestToNextInstanceHour"),
testLaunchConfigurationName("aws_autoscaling_group.bar", &lc), testLaunchConfigurationName("aws_autoscaling_group.bar", &lc),
testAccCheckAutoscalingTags(&group.Tags, "Bar", map[string]interface{}{ testAccCheckAutoscalingTags(&group.Tags, "Bar", map[string]interface{}{
"value": "bar-foo", "value": "bar-foo",
@ -359,7 +363,7 @@ resource "aws_autoscaling_group" "bar" {
health_check_type = "ELB" health_check_type = "ELB"
desired_capacity = 4 desired_capacity = 4
force_delete = true force_delete = true
termination_policies = ["OldestInstance"] termination_policies = ["OldestInstance","ClosestToNextInstanceHour"]
launch_configuration = "${aws_launch_configuration.foobar.name}" launch_configuration = "${aws_launch_configuration.foobar.name}"
@ -391,6 +395,7 @@ resource "aws_autoscaling_group" "bar" {
health_check_type = "ELB" health_check_type = "ELB"
desired_capacity = 5 desired_capacity = 5
force_delete = true force_delete = true
termination_policies = ["ClosestToNextInstanceHour"]
launch_configuration = "${aws_launch_configuration.new.name}" launch_configuration = "${aws_launch_configuration.new.name}"

View File

@ -0,0 +1,451 @@
package aws
import (
"fmt"
"log"
"regexp"
"time"
"github.com/hashicorp/terraform/helper/resource"
"github.com/hashicorp/terraform/helper/schema"
"github.com/aws/aws-sdk-go/aws"
"github.com/aws/aws-sdk-go/aws/awserr"
"github.com/aws/aws-sdk-go/service/cloudformation"
)
func resourceAwsCloudFormationStack() *schema.Resource {
return &schema.Resource{
Create: resourceAwsCloudFormationStackCreate,
Read: resourceAwsCloudFormationStackRead,
Update: resourceAwsCloudFormationStackUpdate,
Delete: resourceAwsCloudFormationStackDelete,
Schema: map[string]*schema.Schema{
"name": &schema.Schema{
Type: schema.TypeString,
Required: true,
ForceNew: true,
},
"template_body": &schema.Schema{
Type: schema.TypeString,
Optional: true,
Computed: true,
StateFunc: normalizeJson,
},
"template_url": &schema.Schema{
Type: schema.TypeString,
Optional: true,
},
"capabilities": &schema.Schema{
Type: schema.TypeSet,
Optional: true,
Elem: &schema.Schema{Type: schema.TypeString},
Set: schema.HashString,
},
"disable_rollback": &schema.Schema{
Type: schema.TypeBool,
Optional: true,
ForceNew: true,
},
"notification_arns": &schema.Schema{
Type: schema.TypeSet,
Optional: true,
Elem: &schema.Schema{Type: schema.TypeString},
Set: schema.HashString,
},
"on_failure": &schema.Schema{
Type: schema.TypeString,
Optional: true,
ForceNew: true,
},
"parameters": &schema.Schema{
Type: schema.TypeMap,
Optional: true,
Computed: true,
},
"outputs": &schema.Schema{
Type: schema.TypeMap,
Computed: true,
},
"policy_body": &schema.Schema{
Type: schema.TypeString,
Optional: true,
Computed: true,
StateFunc: normalizeJson,
},
"policy_url": &schema.Schema{
Type: schema.TypeString,
Optional: true,
},
"timeout_in_minutes": &schema.Schema{
Type: schema.TypeInt,
Optional: true,
ForceNew: true,
},
"tags": &schema.Schema{
Type: schema.TypeMap,
Optional: true,
ForceNew: true,
},
},
}
}
func resourceAwsCloudFormationStackCreate(d *schema.ResourceData, meta interface{}) error {
conn := meta.(*AWSClient).cfconn
input := cloudformation.CreateStackInput{
StackName: aws.String(d.Get("name").(string)),
}
if v, ok := d.GetOk("template_body"); ok {
input.TemplateBody = aws.String(normalizeJson(v.(string)))
}
if v, ok := d.GetOk("template_url"); ok {
input.TemplateURL = aws.String(v.(string))
}
if v, ok := d.GetOk("capabilities"); ok {
input.Capabilities = expandStringList(v.(*schema.Set).List())
}
if v, ok := d.GetOk("disable_rollback"); ok {
input.DisableRollback = aws.Bool(v.(bool))
}
if v, ok := d.GetOk("notification_arns"); ok {
input.NotificationARNs = expandStringList(v.(*schema.Set).List())
}
if v, ok := d.GetOk("on_failure"); ok {
input.OnFailure = aws.String(v.(string))
}
if v, ok := d.GetOk("parameters"); ok {
input.Parameters = expandCloudFormationParameters(v.(map[string]interface{}))
}
if v, ok := d.GetOk("policy_body"); ok {
input.StackPolicyBody = aws.String(normalizeJson(v.(string)))
}
if v, ok := d.GetOk("policy_url"); ok {
input.StackPolicyURL = aws.String(v.(string))
}
if v, ok := d.GetOk("tags"); ok {
input.Tags = expandCloudFormationTags(v.(map[string]interface{}))
}
if v, ok := d.GetOk("timeout_in_minutes"); ok {
input.TimeoutInMinutes = aws.Int64(int64(v.(int)))
}
log.Printf("[DEBUG] Creating CloudFormation Stack: %s", input)
resp, err := conn.CreateStack(&input)
if err != nil {
return fmt.Errorf("Creating CloudFormation stack failed: %s", err.Error())
}
d.SetId(*resp.StackId)
wait := resource.StateChangeConf{
Pending: []string{"CREATE_IN_PROGRESS", "ROLLBACK_IN_PROGRESS", "ROLLBACK_COMPLETE"},
Target: "CREATE_COMPLETE",
Timeout: 30 * time.Minute,
MinTimeout: 5 * time.Second,
Refresh: func() (interface{}, string, error) {
resp, err := conn.DescribeStacks(&cloudformation.DescribeStacksInput{
StackName: aws.String(d.Get("name").(string)),
})
status := *resp.Stacks[0].StackStatus
log.Printf("[DEBUG] Current CloudFormation stack status: %q", status)
if status == "ROLLBACK_COMPLETE" {
stack := resp.Stacks[0]
failures, err := getCloudFormationFailures(stack.StackName, *stack.CreationTime, conn)
if err != nil {
return resp, "", fmt.Errorf(
"Failed getting details about rollback: %q", err.Error())
}
return resp, "", fmt.Errorf("ROLLBACK_COMPLETE:\n%q", failures)
}
return resp, status, err
},
}
_, err = wait.WaitForState()
if err != nil {
return err
}
log.Printf("[INFO] CloudFormation Stack %q created", d.Get("name").(string))
return resourceAwsCloudFormationStackRead(d, meta)
}
func resourceAwsCloudFormationStackRead(d *schema.ResourceData, meta interface{}) error {
conn := meta.(*AWSClient).cfconn
stackName := d.Get("name").(string)
input := &cloudformation.DescribeStacksInput{
StackName: aws.String(stackName),
}
resp, err := conn.DescribeStacks(input)
if err != nil {
return err
}
stacks := resp.Stacks
if len(stacks) < 1 {
return nil
}
tInput := cloudformation.GetTemplateInput{
StackName: aws.String(stackName),
}
out, err := conn.GetTemplate(&tInput)
if err != nil {
return err
}
d.Set("template_body", normalizeJson(*out.TemplateBody))
stack := stacks[0]
log.Printf("[DEBUG] Received CloudFormation stack: %s", stack)
d.Set("name", stack.StackName)
d.Set("arn", stack.StackId)
if stack.TimeoutInMinutes != nil {
d.Set("timeout_in_minutes", int(*stack.TimeoutInMinutes))
}
if stack.Description != nil {
d.Set("description", stack.Description)
}
if stack.DisableRollback != nil {
d.Set("disable_rollback", stack.DisableRollback)
}
if len(stack.NotificationARNs) > 0 {
err = d.Set("notification_arns", schema.NewSet(schema.HashString, flattenStringList(stack.NotificationARNs)))
if err != nil {
return err
}
}
originalParams := d.Get("parameters").(map[string]interface{})
err = d.Set("parameters", flattenCloudFormationParameters(stack.Parameters, originalParams))
if err != nil {
return err
}
err = d.Set("tags", flattenCloudFormationTags(stack.Tags))
if err != nil {
return err
}
err = d.Set("outputs", flattenCloudFormationOutputs(stack.Outputs))
if err != nil {
return err
}
if len(stack.Capabilities) > 0 {
err = d.Set("capabilities", schema.NewSet(schema.HashString, flattenStringList(stack.Capabilities)))
if err != nil {
return err
}
}
return nil
}
func resourceAwsCloudFormationStackUpdate(d *schema.ResourceData, meta interface{}) error {
conn := meta.(*AWSClient).cfconn
input := &cloudformation.UpdateStackInput{
StackName: aws.String(d.Get("name").(string)),
}
if d.HasChange("template_body") {
input.TemplateBody = aws.String(normalizeJson(d.Get("template_body").(string)))
}
if d.HasChange("template_url") {
input.TemplateURL = aws.String(d.Get("template_url").(string))
}
if d.HasChange("capabilities") {
input.Capabilities = expandStringList(d.Get("capabilities").(*schema.Set).List())
}
if d.HasChange("notification_arns") {
input.NotificationARNs = expandStringList(d.Get("notification_arns").(*schema.Set).List())
}
if d.HasChange("parameters") {
input.Parameters = expandCloudFormationParameters(d.Get("parameters").(map[string]interface{}))
}
if d.HasChange("policy_body") {
input.StackPolicyBody = aws.String(normalizeJson(d.Get("policy_body").(string)))
}
if d.HasChange("policy_url") {
input.StackPolicyURL = aws.String(d.Get("policy_url").(string))
}
log.Printf("[DEBUG] Updating CloudFormation stack: %s", input)
stack, err := conn.UpdateStack(input)
if err != nil {
return err
}
lastUpdatedTime, err := getLastCfEventTimestamp(d.Get("name").(string), conn)
if err != nil {
return err
}
wait := resource.StateChangeConf{
Pending: []string{
"UPDATE_COMPLETE_CLEANUP_IN_PROGRESS",
"UPDATE_IN_PROGRESS",
"UPDATE_ROLLBACK_IN_PROGRESS",
"UPDATE_ROLLBACK_COMPLETE_CLEANUP_IN_PROGRESS",
"UPDATE_ROLLBACK_COMPLETE",
},
Target: "UPDATE_COMPLETE",
Timeout: 15 * time.Minute,
MinTimeout: 5 * time.Second,
Refresh: func() (interface{}, string, error) {
resp, err := conn.DescribeStacks(&cloudformation.DescribeStacksInput{
StackName: aws.String(d.Get("name").(string)),
})
stack := resp.Stacks[0]
status := *stack.StackStatus
log.Printf("[DEBUG] Current CloudFormation stack status: %q", status)
if status == "UPDATE_ROLLBACK_COMPLETE" {
failures, err := getCloudFormationFailures(stack.StackName, *lastUpdatedTime, conn)
if err != nil {
return resp, "", fmt.Errorf(
"Failed getting details about rollback: %q", err.Error())
}
return resp, "", fmt.Errorf(
"UPDATE_ROLLBACK_COMPLETE:\n%q", failures)
}
return resp, status, err
},
}
_, err = wait.WaitForState()
if err != nil {
return err
}
log.Printf("[DEBUG] CloudFormation stack %q has been updated", *stack.StackId)
return resourceAwsCloudFormationStackRead(d, meta)
}
func resourceAwsCloudFormationStackDelete(d *schema.ResourceData, meta interface{}) error {
conn := meta.(*AWSClient).cfconn
input := &cloudformation.DeleteStackInput{
StackName: aws.String(d.Get("name").(string)),
}
log.Printf("[DEBUG] Deleting CloudFormation stack %s", input)
_, err := conn.DeleteStack(input)
if err != nil {
awsErr, ok := err.(awserr.Error)
if !ok {
return err
}
if awsErr.Code() == "ValidationError" {
// Ignore stack which has been already deleted
return nil
}
return err
}
wait := resource.StateChangeConf{
Pending: []string{"DELETE_IN_PROGRESS", "ROLLBACK_IN_PROGRESS"},
Target: "DELETE_COMPLETE",
Timeout: 30 * time.Minute,
MinTimeout: 5 * time.Second,
Refresh: func() (interface{}, string, error) {
resp, err := conn.DescribeStacks(&cloudformation.DescribeStacksInput{
StackName: aws.String(d.Get("name").(string)),
})
if err != nil {
awsErr, ok := err.(awserr.Error)
if !ok {
return resp, "DELETE_FAILED", err
}
log.Printf("[DEBUG] Error when deleting CloudFormation stack: %s: %s",
awsErr.Code(), awsErr.Message())
if awsErr.Code() == "ValidationError" {
return resp, "DELETE_COMPLETE", nil
}
}
if len(resp.Stacks) == 0 {
log.Printf("[DEBUG] CloudFormation stack %q is already gone", d.Get("name"))
return resp, "DELETE_COMPLETE", nil
}
status := *resp.Stacks[0].StackStatus
log.Printf("[DEBUG] Current CloudFormation stack status: %q", status)
return resp, status, err
},
}
_, err = wait.WaitForState()
if err != nil {
return err
}
log.Printf("[DEBUG] CloudFormation stack %q has been deleted", d.Id())
d.SetId("")
return nil
}
// getLastCfEventTimestamp takes the first event in a list
// of events ordered from the newest to the oldest
// and extracts timestamp from it
// LastUpdatedTime only provides last >successful< updated time
func getLastCfEventTimestamp(stackName string, conn *cloudformation.CloudFormation) (
*time.Time, error) {
output, err := conn.DescribeStackEvents(&cloudformation.DescribeStackEventsInput{
StackName: aws.String(stackName),
})
if err != nil {
return nil, err
}
return output.StackEvents[0].Timestamp, nil
}
// getCloudFormationFailures returns ResourceStatusReason(s)
// of events that should be failures based on regexp match of status
func getCloudFormationFailures(stackName *string, afterTime time.Time,
conn *cloudformation.CloudFormation) ([]string, error) {
var failures []string
// Only catching failures from last 100 events
// Some extra iteration logic via NextToken could be added
// but in reality it's nearly impossible to generate >100
// events by a single stack update
events, err := conn.DescribeStackEvents(&cloudformation.DescribeStackEventsInput{
StackName: stackName,
})
if err != nil {
return nil, err
}
failRe := regexp.MustCompile("_FAILED$")
rollbackRe := regexp.MustCompile("^ROLLBACK_")
for _, e := range events.StackEvents {
if (failRe.MatchString(*e.ResourceStatus) || rollbackRe.MatchString(*e.ResourceStatus)) &&
e.Timestamp.After(afterTime) && e.ResourceStatusReason != nil {
failures = append(failures, *e.ResourceStatusReason)
}
}
return failures, nil
}

View File

@ -0,0 +1,228 @@
package aws
import (
"fmt"
"testing"
"github.com/aws/aws-sdk-go/aws"
"github.com/aws/aws-sdk-go/service/cloudformation"
"github.com/hashicorp/terraform/helper/resource"
"github.com/hashicorp/terraform/terraform"
)
func TestAccAWSCloudFormation_basic(t *testing.T) {
var stack cloudformation.Stack
resource.Test(t, resource.TestCase{
PreCheck: func() { testAccPreCheck(t) },
Providers: testAccProviders,
CheckDestroy: testAccCheckAWSCloudFormationDestroy,
Steps: []resource.TestStep{
resource.TestStep{
Config: testAccAWSCloudFormationConfig,
Check: resource.ComposeTestCheckFunc(
testAccCheckCloudFormationStackExists("aws_cloudformation_stack.network", &stack),
),
},
},
})
}
func TestAccAWSCloudFormation_defaultParams(t *testing.T) {
var stack cloudformation.Stack
resource.Test(t, resource.TestCase{
PreCheck: func() { testAccPreCheck(t) },
Providers: testAccProviders,
CheckDestroy: testAccCheckAWSCloudFormationDestroy,
Steps: []resource.TestStep{
resource.TestStep{
Config: testAccAWSCloudFormationConfig_defaultParams,
Check: resource.ComposeTestCheckFunc(
testAccCheckCloudFormationStackExists("aws_cloudformation_stack.asg-demo", &stack),
),
},
},
})
}
func TestAccAWSCloudFormation_allAttributes(t *testing.T) {
var stack cloudformation.Stack
resource.Test(t, resource.TestCase{
PreCheck: func() { testAccPreCheck(t) },
Providers: testAccProviders,
CheckDestroy: testAccCheckAWSCloudFormationDestroy,
Steps: []resource.TestStep{
resource.TestStep{
Config: testAccAWSCloudFormationConfig_allAttributes,
Check: resource.ComposeTestCheckFunc(
testAccCheckCloudFormationStackExists("aws_cloudformation_stack.full", &stack),
),
},
},
})
}
func testAccCheckCloudFormationStackExists(n string, stack *cloudformation.Stack) resource.TestCheckFunc {
return func(s *terraform.State) error {
rs, ok := s.RootModule().Resources[n]
if !ok {
rs = rs
return fmt.Errorf("Not found: %s", n)
}
conn := testAccProvider.Meta().(*AWSClient).cfconn
params := &cloudformation.DescribeStacksInput{
StackName: aws.String(rs.Primary.ID),
}
resp, err := conn.DescribeStacks(params)
if err != nil {
return err
}
if len(resp.Stacks) == 0 {
return fmt.Errorf("CloudFormation stack not found")
}
return nil
}
}
func testAccCheckAWSCloudFormationDestroy(s *terraform.State) error {
conn := testAccProvider.Meta().(*AWSClient).cfconn
for _, rs := range s.RootModule().Resources {
if rs.Type != "aws_cloudformation_stack" {
continue
}
params := cloudformation.DescribeStacksInput{
StackName: aws.String(rs.Primary.ID),
}
resp, err := conn.DescribeStacks(&params)
if err == nil {
if len(resp.Stacks) != 0 &&
*resp.Stacks[0].StackId == rs.Primary.ID {
return fmt.Errorf("CloudFormation stack still exists: %q", rs.Primary.ID)
}
}
}
return nil
}
var testAccAWSCloudFormationConfig = `
resource "aws_cloudformation_stack" "network" {
name = "tf-networking-stack"
template_body = <<STACK
{
"Resources" : {
"MyVPC": {
"Type" : "AWS::EC2::VPC",
"Properties" : {
"CidrBlock" : "10.0.0.0/16",
"Tags" : [
{"Key": "Name", "Value": "Primary_CF_VPC"}
]
}
}
},
"Outputs" : {
"DefaultSgId" : {
"Description": "The ID of default security group",
"Value" : { "Fn::GetAtt" : [ "MyVPC", "DefaultSecurityGroup" ]}
},
"VpcID" : {
"Description": "The VPC ID",
"Value" : { "Ref" : "MyVPC" }
}
}
}
STACK
}`
var testAccAWSCloudFormationConfig_defaultParams = `
resource "aws_cloudformation_stack" "asg-demo" {
name = "tf-asg-demo-stack"
template_body = <<BODY
{
"Parameters": {
"TopicName": {
"Type": "String"
},
"VPCCIDR": {
"Type": "String",
"Default": "10.10.0.0/16"
}
},
"Resources": {
"NotificationTopic": {
"Type": "AWS::SNS::Topic",
"Properties": {
"TopicName": {
"Ref": "TopicName"
}
}
},
"MyVPC": {
"Type": "AWS::EC2::VPC",
"Properties": {
"CidrBlock": {
"Ref": "VPCCIDR"
},
"Tags": [
{
"Key": "Name",
"Value": "Primary_CF_VPC"
}
]
}
}
},
"Outputs": {
"VPCCIDR": {
"Value": {
"Ref": "VPCCIDR"
}
}
}
}
BODY
parameters {
TopicName = "ExampleTopic"
}
}
`
var testAccAWSCloudFormationConfig_allAttributes = `
resource "aws_cloudformation_stack" "full" {
name = "tf-full-stack"
template_body = <<STACK
{
"Resources" : {
"MyVPC": {
"Type" : "AWS::EC2::VPC",
"Properties" : {
"CidrBlock" : "10.0.0.0/16",
"Tags" : [
{"Key": "Name", "Value": "Primary_CF_VPC"}
]
}
}
}
}
STACK
capabilities = ["CAPABILITY_IAM"]
notification_arns = ["${aws_sns_topic.cf-updates.arn}"]
on_failure = "DELETE"
timeout_in_minutes = 1
}
resource "aws_sns_topic" "cf-updates" {
name = "tf-cf-notifications"
}
`

View File

@ -0,0 +1,167 @@
package aws
import (
"fmt"
"log"
"github.com/aws/aws-sdk-go/aws"
"github.com/aws/aws-sdk-go/service/cloudtrail"
"github.com/hashicorp/terraform/helper/schema"
)
func resourceAwsCloudTrail() *schema.Resource {
return &schema.Resource{
Create: resourceAwsCloudTrailCreate,
Read: resourceAwsCloudTrailRead,
Update: resourceAwsCloudTrailUpdate,
Delete: resourceAwsCloudTrailDelete,
Schema: map[string]*schema.Schema{
"name": &schema.Schema{
Type: schema.TypeString,
Required: true,
ForceNew: true,
},
"s3_bucket_name": &schema.Schema{
Type: schema.TypeString,
Required: true,
},
"s3_key_prefix": &schema.Schema{
Type: schema.TypeString,
Optional: true,
},
"cloud_watch_logs_role_arn": &schema.Schema{
Type: schema.TypeString,
Optional: true,
},
"cloud_watch_logs_group_arn": &schema.Schema{
Type: schema.TypeString,
Optional: true,
},
"include_global_service_events": &schema.Schema{
Type: schema.TypeBool,
Optional: true,
Default: true,
},
"sns_topic_name": &schema.Schema{
Type: schema.TypeString,
Optional: true,
},
},
}
}
func resourceAwsCloudTrailCreate(d *schema.ResourceData, meta interface{}) error {
conn := meta.(*AWSClient).cloudtrailconn
input := cloudtrail.CreateTrailInput{
Name: aws.String(d.Get("name").(string)),
S3BucketName: aws.String(d.Get("s3_bucket_name").(string)),
}
if v, ok := d.GetOk("cloud_watch_logs_group_arn"); ok {
input.CloudWatchLogsLogGroupArn = aws.String(v.(string))
}
if v, ok := d.GetOk("cloud_watch_logs_role_arn"); ok {
input.CloudWatchLogsRoleArn = aws.String(v.(string))
}
if v, ok := d.GetOk("include_global_service_events"); ok {
input.IncludeGlobalServiceEvents = aws.Bool(v.(bool))
}
if v, ok := d.GetOk("s3_key_prefix"); ok {
input.S3KeyPrefix = aws.String(v.(string))
}
if v, ok := d.GetOk("sns_topic_name"); ok {
input.SnsTopicName = aws.String(v.(string))
}
t, err := conn.CreateTrail(&input)
if err != nil {
return err
}
log.Printf("[DEBUG] CloudTrail created: %s", t)
d.SetId(*t.Name)
return resourceAwsCloudTrailRead(d, meta)
}
func resourceAwsCloudTrailRead(d *schema.ResourceData, meta interface{}) error {
conn := meta.(*AWSClient).cloudtrailconn
name := d.Get("name").(string)
input := cloudtrail.DescribeTrailsInput{
TrailNameList: []*string{
aws.String(name),
},
}
resp, err := conn.DescribeTrails(&input)
if err != nil {
return err
}
if len(resp.TrailList) == 0 {
return fmt.Errorf("No CloudTrail found, using name %q", name)
}
trail := resp.TrailList[0]
log.Printf("[DEBUG] CloudTrail received: %s", trail)
d.Set("name", trail.Name)
d.Set("s3_bucket_name", trail.S3BucketName)
d.Set("s3_key_prefix", trail.S3KeyPrefix)
d.Set("cloud_watch_logs_role_arn", trail.CloudWatchLogsRoleArn)
d.Set("cloud_watch_logs_group_arn", trail.CloudWatchLogsLogGroupArn)
d.Set("include_global_service_events", trail.IncludeGlobalServiceEvents)
d.Set("sns_topic_name", trail.SnsTopicName)
return nil
}
func resourceAwsCloudTrailUpdate(d *schema.ResourceData, meta interface{}) error {
conn := meta.(*AWSClient).cloudtrailconn
input := cloudtrail.UpdateTrailInput{
Name: aws.String(d.Get("name").(string)),
}
if d.HasChange("s3_bucket_name") {
input.S3BucketName = aws.String(d.Get("s3_bucket_name").(string))
}
if d.HasChange("s3_key_prefix") {
input.S3KeyPrefix = aws.String(d.Get("s3_key_prefix").(string))
}
if d.HasChange("cloud_watch_logs_role_arn") {
input.CloudWatchLogsRoleArn = aws.String(d.Get("cloud_watch_logs_role_arn").(string))
}
if d.HasChange("cloud_watch_logs_group_arn") {
input.CloudWatchLogsLogGroupArn = aws.String(d.Get("cloud_watch_logs_group_arn").(string))
}
if d.HasChange("include_global_service_events") {
input.IncludeGlobalServiceEvents = aws.Bool(d.Get("include_global_service_events").(bool))
}
if d.HasChange("sns_topic_name") {
input.SnsTopicName = aws.String(d.Get("sns_topic_name").(string))
}
log.Printf("[DEBUG] Updating CloudTrail: %s", input)
t, err := conn.UpdateTrail(&input)
if err != nil {
return err
}
log.Printf("[DEBUG] CloudTrail updated: %s", t)
return resourceAwsCloudTrailRead(d, meta)
}
func resourceAwsCloudTrailDelete(d *schema.ResourceData, meta interface{}) error {
conn := meta.(*AWSClient).cloudtrailconn
name := d.Get("name").(string)
log.Printf("[DEBUG] Deleting CloudTrail: %q", name)
_, err := conn.DeleteTrail(&cloudtrail.DeleteTrailInput{
Name: aws.String(name),
})
return err
}

View File

@ -0,0 +1,169 @@
package aws
import (
"fmt"
"math/rand"
"testing"
"time"
"github.com/aws/aws-sdk-go/aws"
"github.com/aws/aws-sdk-go/service/cloudtrail"
"github.com/hashicorp/terraform/helper/resource"
"github.com/hashicorp/terraform/terraform"
)
func TestAccAWSCloudTrail_basic(t *testing.T) {
var trail cloudtrail.Trail
resource.Test(t, resource.TestCase{
PreCheck: func() { testAccPreCheck(t) },
Providers: testAccProviders,
CheckDestroy: testAccCheckAWSCloudTrailDestroy,
Steps: []resource.TestStep{
resource.TestStep{
Config: testAccAWSCloudTrailConfig,
Check: resource.ComposeTestCheckFunc(
testAccCheckCloudTrailExists("aws_cloudtrail.foobar", &trail),
resource.TestCheckResourceAttr("aws_cloudtrail.foobar", "include_global_service_events", "true"),
),
},
resource.TestStep{
Config: testAccAWSCloudTrailConfigModified,
Check: resource.ComposeTestCheckFunc(
testAccCheckCloudTrailExists("aws_cloudtrail.foobar", &trail),
resource.TestCheckResourceAttr("aws_cloudtrail.foobar", "s3_key_prefix", "/prefix"),
resource.TestCheckResourceAttr("aws_cloudtrail.foobar", "include_global_service_events", "false"),
),
},
},
})
}
func testAccCheckCloudTrailExists(n string, trail *cloudtrail.Trail) resource.TestCheckFunc {
return func(s *terraform.State) error {
rs, ok := s.RootModule().Resources[n]
if !ok {
return fmt.Errorf("Not found: %s", n)
}
conn := testAccProvider.Meta().(*AWSClient).cloudtrailconn
params := cloudtrail.DescribeTrailsInput{
TrailNameList: []*string{aws.String(rs.Primary.ID)},
}
resp, err := conn.DescribeTrails(&params)
if err != nil {
return err
}
if len(resp.TrailList) == 0 {
return fmt.Errorf("Trail not found")
}
*trail = *resp.TrailList[0]
return nil
}
}
func testAccCheckAWSCloudTrailDestroy(s *terraform.State) error {
conn := testAccProvider.Meta().(*AWSClient).cloudtrailconn
for _, rs := range s.RootModule().Resources {
if rs.Type != "aws_cloudtrail" {
continue
}
params := cloudtrail.DescribeTrailsInput{
TrailNameList: []*string{aws.String(rs.Primary.ID)},
}
resp, err := conn.DescribeTrails(&params)
if err == nil {
if len(resp.TrailList) != 0 &&
*resp.TrailList[0].Name == rs.Primary.ID {
return fmt.Errorf("CloudTrail still exists: %s", rs.Primary.ID)
}
}
}
return nil
}
var cloudTrailRandInt = rand.New(rand.NewSource(time.Now().UnixNano())).Int()
var testAccAWSCloudTrailConfig = fmt.Sprintf(`
resource "aws_cloudtrail" "foobar" {
name = "tf-trail-foobar"
s3_bucket_name = "${aws_s3_bucket.foo.id}"
}
resource "aws_s3_bucket" "foo" {
bucket = "tf-test-trail-%d"
force_destroy = true
policy = <<POLICY
{
"Version": "2012-10-17",
"Statement": [
{
"Sid": "AWSCloudTrailAclCheck",
"Effect": "Allow",
"Principal": "*",
"Action": "s3:GetBucketAcl",
"Resource": "arn:aws:s3:::tf-test-trail-%d"
},
{
"Sid": "AWSCloudTrailWrite",
"Effect": "Allow",
"Principal": "*",
"Action": "s3:PutObject",
"Resource": "arn:aws:s3:::tf-test-trail-%d/*",
"Condition": {
"StringEquals": {
"s3:x-amz-acl": "bucket-owner-full-control"
}
}
}
]
}
POLICY
}
`, cloudTrailRandInt, cloudTrailRandInt, cloudTrailRandInt)
var testAccAWSCloudTrailConfigModified = fmt.Sprintf(`
resource "aws_cloudtrail" "foobar" {
name = "tf-trail-foobar"
s3_bucket_name = "${aws_s3_bucket.foo.id}"
s3_key_prefix = "/prefix"
include_global_service_events = false
}
resource "aws_s3_bucket" "foo" {
bucket = "tf-test-trail-%d"
force_destroy = true
policy = <<POLICY
{
"Version": "2012-10-17",
"Statement": [
{
"Sid": "AWSCloudTrailAclCheck",
"Effect": "Allow",
"Principal": "*",
"Action": "s3:GetBucketAcl",
"Resource": "arn:aws:s3:::tf-test-trail-%d"
},
{
"Sid": "AWSCloudTrailWrite",
"Effect": "Allow",
"Principal": "*",
"Action": "s3:PutObject",
"Resource": "arn:aws:s3:::tf-test-trail-%d/*",
"Condition": {
"StringEquals": {
"s3:x-amz-acl": "bucket-owner-full-control"
}
}
}
]
}
POLICY
}
`, cloudTrailRandInt, cloudTrailRandInt, cloudTrailRandInt)

View File

@ -0,0 +1,127 @@
package aws
import (
"fmt"
"log"
"strings"
"github.com/hashicorp/terraform/helper/schema"
"github.com/aws/aws-sdk-go/aws"
"github.com/aws/aws-sdk-go/aws/awserr"
"github.com/aws/aws-sdk-go/service/codedeploy"
)
func resourceAwsCodeDeployApp() *schema.Resource {
return &schema.Resource{
Create: resourceAwsCodeDeployAppCreate,
Read: resourceAwsCodeDeployAppRead,
Update: resourceAwsCodeDeployUpdate,
Delete: resourceAwsCodeDeployAppDelete,
Schema: map[string]*schema.Schema{
"name": &schema.Schema{
Type: schema.TypeString,
Required: true,
ForceNew: true,
},
// The unique ID is set by AWS on create.
"unique_id": &schema.Schema{
Type: schema.TypeString,
Optional: true,
Computed: true,
},
},
}
}
func resourceAwsCodeDeployAppCreate(d *schema.ResourceData, meta interface{}) error {
conn := meta.(*AWSClient).codedeployconn
application := d.Get("name").(string)
log.Printf("[DEBUG] Creating CodeDeploy application %s", application)
resp, err := conn.CreateApplication(&codedeploy.CreateApplicationInput{
ApplicationName: aws.String(application),
})
if err != nil {
return err
}
log.Printf("[DEBUG] CodeDeploy application %s created", *resp.ApplicationId)
// Despite giving the application a unique ID, AWS doesn't actually use
// it in API calls. Use it and the app name to identify the resource in
// the state file. This allows us to reliably detect both when the TF
// config file changes and when the user deletes the app without removing
// it first from the TF config.
d.SetId(fmt.Sprintf("%s:%s", *resp.ApplicationId, application))
return resourceAwsCodeDeployAppRead(d, meta)
}
func resourceAwsCodeDeployAppRead(d *schema.ResourceData, meta interface{}) error {
conn := meta.(*AWSClient).codedeployconn
_, application := resourceAwsCodeDeployAppParseId(d.Id())
log.Printf("[DEBUG] Reading CodeDeploy application %s", application)
resp, err := conn.GetApplication(&codedeploy.GetApplicationInput{
ApplicationName: aws.String(application),
})
if err != nil {
if codedeployerr, ok := err.(awserr.Error); ok && codedeployerr.Code() == "ApplicationDoesNotExistException" {
d.SetId("")
return nil
} else {
log.Printf("[ERROR] Error finding CodeDeploy application: %s", err)
return err
}
}
d.Set("name", *resp.Application.ApplicationName)
return nil
}
func resourceAwsCodeDeployUpdate(d *schema.ResourceData, meta interface{}) error {
conn := meta.(*AWSClient).codedeployconn
o, n := d.GetChange("name")
_, err := conn.UpdateApplication(&codedeploy.UpdateApplicationInput{
ApplicationName: aws.String(o.(string)),
NewApplicationName: aws.String(n.(string)),
})
if err != nil {
return err
}
log.Printf("[DEBUG] CodeDeploy application %s updated", n)
d.Set("name", n)
return nil
}
func resourceAwsCodeDeployAppDelete(d *schema.ResourceData, meta interface{}) error {
conn := meta.(*AWSClient).codedeployconn
_, err := conn.DeleteApplication(&codedeploy.DeleteApplicationInput{
ApplicationName: aws.String(d.Get("name").(string)),
})
if err != nil {
if cderr, ok := err.(awserr.Error); ok && cderr.Code() == "InvalidApplicationNameException" {
d.SetId("")
return nil
} else {
log.Printf("[ERROR] Error deleting CodeDeploy application: %s", err)
return err
}
}
return nil
}
func resourceAwsCodeDeployAppParseId(id string) (string, string) {
parts := strings.SplitN(id, ":", 2)
return parts[0], parts[1]
}

View File

@ -0,0 +1,78 @@
package aws
import (
"fmt"
"testing"
"github.com/aws/aws-sdk-go/aws"
"github.com/aws/aws-sdk-go/service/codedeploy"
"github.com/hashicorp/terraform/helper/resource"
"github.com/hashicorp/terraform/terraform"
)
func TestAccAWSCodeDeployApp_basic(t *testing.T) {
resource.Test(t, resource.TestCase{
PreCheck: func() { testAccPreCheck(t) },
Providers: testAccProviders,
CheckDestroy: testAccCheckAWSCodeDeployAppDestroy,
Steps: []resource.TestStep{
resource.TestStep{
Config: testAccAWSCodeDeployApp,
Check: resource.ComposeTestCheckFunc(
testAccCheckAWSCodeDeployAppExists("aws_codedeploy_app.foo"),
),
},
resource.TestStep{
Config: testAccAWSCodeDeployAppModifier,
Check: resource.ComposeTestCheckFunc(
testAccCheckAWSCodeDeployAppExists("aws_codedeploy_app.foo"),
),
},
},
})
}
func testAccCheckAWSCodeDeployAppDestroy(s *terraform.State) error {
conn := testAccProvider.Meta().(*AWSClient).codedeployconn
for _, rs := range s.RootModule().Resources {
if rs.Type != "aws_codedeploy_app" {
continue
}
resp, err := conn.GetApplication(&codedeploy.GetApplicationInput{
ApplicationName: aws.String(rs.Primary.ID),
})
if err == nil {
if resp.Application != nil {
return fmt.Errorf("CodeDeploy app still exists:\n%#v", *resp.Application.ApplicationId)
}
}
return err
}
return nil
}
func testAccCheckAWSCodeDeployAppExists(name string) resource.TestCheckFunc {
return func(s *terraform.State) error {
_, ok := s.RootModule().Resources[name]
if !ok {
return fmt.Errorf("Not found: %s", name)
}
return nil
}
}
var testAccAWSCodeDeployApp = `
resource "aws_codedeploy_app" "foo" {
name = "foo"
}`
var testAccAWSCodeDeployAppModifier = `
resource "aws_codedeploy_app" "foo" {
name = "bar"
}`

View File

@ -0,0 +1,375 @@
package aws
import (
"bytes"
"fmt"
"log"
"time"
"github.com/hashicorp/terraform/helper/hashcode"
"github.com/hashicorp/terraform/helper/resource"
"github.com/hashicorp/terraform/helper/schema"
"github.com/aws/aws-sdk-go/aws"
"github.com/aws/aws-sdk-go/aws/awserr"
"github.com/aws/aws-sdk-go/service/codedeploy"
)
func resourceAwsCodeDeployDeploymentGroup() *schema.Resource {
return &schema.Resource{
Create: resourceAwsCodeDeployDeploymentGroupCreate,
Read: resourceAwsCodeDeployDeploymentGroupRead,
Update: resourceAwsCodeDeployDeploymentGroupUpdate,
Delete: resourceAwsCodeDeployDeploymentGroupDelete,
Schema: map[string]*schema.Schema{
"app_name": &schema.Schema{
Type: schema.TypeString,
Required: true,
ValidateFunc: func(v interface{}, k string) (ws []string, errors []error) {
value := v.(string)
if len(value) > 100 {
errors = append(errors, fmt.Errorf(
"%q cannot exceed 100 characters", k))
}
return
},
},
"deployment_group_name": &schema.Schema{
Type: schema.TypeString,
Required: true,
ForceNew: true,
ValidateFunc: func(v interface{}, k string) (ws []string, errors []error) {
value := v.(string)
if len(value) > 100 {
errors = append(errors, fmt.Errorf(
"%q cannot exceed 100 characters", k))
}
return
},
},
"service_role_arn": &schema.Schema{
Type: schema.TypeString,
Required: true,
},
"autoscaling_groups": &schema.Schema{
Type: schema.TypeSet,
Optional: true,
Elem: &schema.Schema{Type: schema.TypeString},
Set: schema.HashString,
},
"deployment_config_name": &schema.Schema{
Type: schema.TypeString,
Optional: true,
Default: "CodeDeployDefault.OneAtATime",
ValidateFunc: func(v interface{}, k string) (ws []string, errors []error) {
value := v.(string)
if len(value) > 100 {
errors = append(errors, fmt.Errorf(
"%q cannot exceed 100 characters", k))
}
return
},
},
"ec2_tag_filter": &schema.Schema{
Type: schema.TypeSet,
Optional: true,
Elem: &schema.Resource{
Schema: map[string]*schema.Schema{
"key": &schema.Schema{
Type: schema.TypeString,
Optional: true,
},
"type": &schema.Schema{
Type: schema.TypeString,
Optional: true,
ValidateFunc: validateTagFilters,
},
"value": &schema.Schema{
Type: schema.TypeString,
Optional: true,
},
},
},
Set: resourceAwsCodeDeployTagFilterHash,
},
"on_premises_instance_tag_filter": &schema.Schema{
Type: schema.TypeSet,
Optional: true,
Elem: &schema.Resource{
Schema: map[string]*schema.Schema{
"key": &schema.Schema{
Type: schema.TypeString,
Optional: true,
},
"type": &schema.Schema{
Type: schema.TypeString,
Optional: true,
ValidateFunc: validateTagFilters,
},
"value": &schema.Schema{
Type: schema.TypeString,
Optional: true,
},
},
},
Set: resourceAwsCodeDeployTagFilterHash,
},
},
}
}
func resourceAwsCodeDeployDeploymentGroupCreate(d *schema.ResourceData, meta interface{}) error {
conn := meta.(*AWSClient).codedeployconn
application := d.Get("app_name").(string)
deploymentGroup := d.Get("deployment_group_name").(string)
input := codedeploy.CreateDeploymentGroupInput{
ApplicationName: aws.String(application),
DeploymentGroupName: aws.String(deploymentGroup),
ServiceRoleArn: aws.String(d.Get("service_role_arn").(string)),
}
if attr, ok := d.GetOk("deployment_config_name"); ok {
input.DeploymentConfigName = aws.String(attr.(string))
}
if attr, ok := d.GetOk("autoscaling_groups"); ok {
input.AutoScalingGroups = expandStringList(attr.(*schema.Set).List())
}
if attr, ok := d.GetOk("on_premises_instance_tag_filters"); ok {
onPremFilters := buildOnPremTagFilters(attr.(*schema.Set).List())
input.OnPremisesInstanceTagFilters = onPremFilters
}
if attr, ok := d.GetOk("ec2_tag_filter"); ok {
ec2TagFilters := buildEC2TagFilters(attr.(*schema.Set).List())
input.Ec2TagFilters = ec2TagFilters
}
// Retry to handle IAM role eventual consistency.
var resp *codedeploy.CreateDeploymentGroupOutput
var err error
err = resource.Retry(2*time.Minute, func() error {
resp, err = conn.CreateDeploymentGroup(&input)
if err != nil {
codedeployErr, ok := err.(awserr.Error)
if !ok {
return &resource.RetryError{Err: err}
}
if codedeployErr.Code() == "InvalidRoleException" {
log.Printf("[DEBUG] Trying to create deployment group again: %q",
codedeployErr.Message())
return err
}
return &resource.RetryError{Err: err}
}
return nil
})
if err != nil {
return err
}
d.SetId(*resp.DeploymentGroupId)
return resourceAwsCodeDeployDeploymentGroupRead(d, meta)
}
func resourceAwsCodeDeployDeploymentGroupRead(d *schema.ResourceData, meta interface{}) error {
conn := meta.(*AWSClient).codedeployconn
log.Printf("[DEBUG] Reading CodeDeploy DeploymentGroup %s", d.Id())
resp, err := conn.GetDeploymentGroup(&codedeploy.GetDeploymentGroupInput{
ApplicationName: aws.String(d.Get("app_name").(string)),
DeploymentGroupName: aws.String(d.Get("deployment_group_name").(string)),
})
if err != nil {
return err
}
d.Set("app_name", *resp.DeploymentGroupInfo.ApplicationName)
d.Set("autoscaling_groups", resp.DeploymentGroupInfo.AutoScalingGroups)
d.Set("deployment_config_name", *resp.DeploymentGroupInfo.DeploymentConfigName)
d.Set("deployment_group_name", *resp.DeploymentGroupInfo.DeploymentGroupName)
d.Set("service_role_arn", *resp.DeploymentGroupInfo.ServiceRoleArn)
if err := d.Set("ec2_tag_filter", ec2TagFiltersToMap(resp.DeploymentGroupInfo.Ec2TagFilters)); err != nil {
return err
}
if err := d.Set("on_premises_instance_tag_filter", onPremisesTagFiltersToMap(resp.DeploymentGroupInfo.OnPremisesInstanceTagFilters)); err != nil {
return err
}
return nil
}
func resourceAwsCodeDeployDeploymentGroupUpdate(d *schema.ResourceData, meta interface{}) error {
conn := meta.(*AWSClient).codedeployconn
input := codedeploy.UpdateDeploymentGroupInput{
ApplicationName: aws.String(d.Get("app_name").(string)),
CurrentDeploymentGroupName: aws.String(d.Get("deployment_group_name").(string)),
}
if d.HasChange("autoscaling_groups") {
_, n := d.GetChange("autoscaling_groups")
input.AutoScalingGroups = expandStringList(n.(*schema.Set).List())
}
if d.HasChange("deployment_config_name") {
_, n := d.GetChange("deployment_config_name")
input.DeploymentConfigName = aws.String(n.(string))
}
if d.HasChange("deployment_group_name") {
_, n := d.GetChange("deployment_group_name")
input.NewDeploymentGroupName = aws.String(n.(string))
}
// TagFilters aren't like tags. They don't append. They simply replace.
if d.HasChange("on_premises_instance_tag_filter") {
_, n := d.GetChange("on_premises_instance_tag_filter")
onPremFilters := buildOnPremTagFilters(n.(*schema.Set).List())
input.OnPremisesInstanceTagFilters = onPremFilters
}
if d.HasChange("ec2_tag_filter") {
_, n := d.GetChange("ec2_tag_filter")
ec2Filters := buildEC2TagFilters(n.(*schema.Set).List())
input.Ec2TagFilters = ec2Filters
}
log.Printf("[DEBUG] Updating CodeDeploy DeploymentGroup %s", d.Id())
_, err := conn.UpdateDeploymentGroup(&input)
if err != nil {
return err
}
return resourceAwsCodeDeployDeploymentGroupRead(d, meta)
}
func resourceAwsCodeDeployDeploymentGroupDelete(d *schema.ResourceData, meta interface{}) error {
conn := meta.(*AWSClient).codedeployconn
log.Printf("[DEBUG] Deleting CodeDeploy DeploymentGroup %s", d.Id())
_, err := conn.DeleteDeploymentGroup(&codedeploy.DeleteDeploymentGroupInput{
ApplicationName: aws.String(d.Get("app_name").(string)),
DeploymentGroupName: aws.String(d.Get("deployment_group_name").(string)),
})
if err != nil {
return err
}
d.SetId("")
return nil
}
// buildOnPremTagFilters converts raw schema lists into a list of
// codedeploy.TagFilters.
func buildOnPremTagFilters(configured []interface{}) []*codedeploy.TagFilter {
filters := make([]*codedeploy.TagFilter, 0)
for _, raw := range configured {
var filter codedeploy.TagFilter
m := raw.(map[string]interface{})
filter.Key = aws.String(m["key"].(string))
filter.Type = aws.String(m["type"].(string))
filter.Value = aws.String(m["value"].(string))
filters = append(filters, &filter)
}
return filters
}
// buildEC2TagFilters converts raw schema lists into a list of
// codedeploy.EC2TagFilters.
func buildEC2TagFilters(configured []interface{}) []*codedeploy.EC2TagFilter {
filters := make([]*codedeploy.EC2TagFilter, 0)
for _, raw := range configured {
var filter codedeploy.EC2TagFilter
m := raw.(map[string]interface{})
filter.Key = aws.String(m["key"].(string))
filter.Type = aws.String(m["type"].(string))
filter.Value = aws.String(m["value"].(string))
filters = append(filters, &filter)
}
return filters
}
// ec2TagFiltersToMap converts lists of tag filters into a []map[string]string.
func ec2TagFiltersToMap(list []*codedeploy.EC2TagFilter) []map[string]string {
result := make([]map[string]string, 0, len(list))
for _, tf := range list {
l := make(map[string]string)
if *tf.Key != "" {
l["key"] = *tf.Key
}
if *tf.Value != "" {
l["value"] = *tf.Value
}
if *tf.Type != "" {
l["type"] = *tf.Type
}
result = append(result, l)
}
return result
}
// onPremisesTagFiltersToMap converts lists of on-prem tag filters into a []map[string]string.
func onPremisesTagFiltersToMap(list []*codedeploy.TagFilter) []map[string]string {
result := make([]map[string]string, 0, len(list))
for _, tf := range list {
l := make(map[string]string)
if *tf.Key != "" {
l["key"] = *tf.Key
}
if *tf.Value != "" {
l["value"] = *tf.Value
}
if *tf.Type != "" {
l["type"] = *tf.Type
}
result = append(result, l)
}
return result
}
// validateTagFilters confirms the "value" component of a tag filter is one of
// AWS's three allowed types.
func validateTagFilters(v interface{}, k string) (ws []string, errors []error) {
value := v.(string)
if value != "KEY_ONLY" && value != "VALUE_ONLY" && value != "KEY_AND_VALUE" {
errors = append(errors, fmt.Errorf(
"%q must be one of \"KEY_ONLY\", \"VALUE_ONLY\", or \"KEY_AND_VALUE\"", k))
}
return
}
func resourceAwsCodeDeployTagFilterHash(v interface{}) int {
var buf bytes.Buffer
m := v.(map[string]interface{})
// Nothing's actually required in tag filters, so we must check the
// presence of all values before attempting a hash.
if v, ok := m["key"]; ok {
buf.WriteString(fmt.Sprintf("%s-", v.(string)))
}
if v, ok := m["type"]; ok {
buf.WriteString(fmt.Sprintf("%s-", v.(string)))
}
if v, ok := m["value"]; ok {
buf.WriteString(fmt.Sprintf("%s-", v.(string)))
}
return hashcode.String(buf.String())
}

View File

@ -0,0 +1,199 @@
package aws
import (
"fmt"
"testing"
"github.com/aws/aws-sdk-go/aws"
"github.com/aws/aws-sdk-go/service/codedeploy"
"github.com/hashicorp/terraform/helper/resource"
"github.com/hashicorp/terraform/terraform"
)
func TestAccAWSCodeDeployDeploymentGroup_basic(t *testing.T) {
resource.Test(t, resource.TestCase{
PreCheck: func() { testAccPreCheck(t) },
Providers: testAccProviders,
CheckDestroy: testAccCheckAWSCodeDeployDeploymentGroupDestroy,
Steps: []resource.TestStep{
resource.TestStep{
Config: testAccAWSCodeDeployDeploymentGroup,
Check: resource.ComposeTestCheckFunc(
testAccCheckAWSCodeDeployDeploymentGroupExists("aws_codedeploy_deployment_group.foo"),
),
},
resource.TestStep{
Config: testAccAWSCodeDeployDeploymentGroupModifier,
Check: resource.ComposeTestCheckFunc(
testAccCheckAWSCodeDeployDeploymentGroupExists("aws_codedeploy_deployment_group.foo"),
),
},
},
})
}
func testAccCheckAWSCodeDeployDeploymentGroupDestroy(s *terraform.State) error {
conn := testAccProvider.Meta().(*AWSClient).codedeployconn
for _, rs := range s.RootModule().Resources {
if rs.Type != "aws_codedeploy_deployment_group" {
continue
}
resp, err := conn.GetDeploymentGroup(&codedeploy.GetDeploymentGroupInput{
ApplicationName: aws.String(rs.Primary.Attributes["app_name"]),
DeploymentGroupName: aws.String(rs.Primary.Attributes["deployment_group_name"]),
})
if err == nil {
if resp.DeploymentGroupInfo.DeploymentGroupName != nil {
return fmt.Errorf("CodeDeploy deployment group still exists:\n%#v", *resp.DeploymentGroupInfo.DeploymentGroupName)
}
}
return err
}
return nil
}
func testAccCheckAWSCodeDeployDeploymentGroupExists(name string) resource.TestCheckFunc {
return func(s *terraform.State) error {
_, ok := s.RootModule().Resources[name]
if !ok {
return fmt.Errorf("Not found: %s", name)
}
return nil
}
}
var testAccAWSCodeDeployDeploymentGroup = `
resource "aws_codedeploy_app" "foo_app" {
name = "foo_app"
}
resource "aws_iam_role_policy" "foo_policy" {
name = "foo_policy"
role = "${aws_iam_role.foo_role.id}"
policy = <<EOF
{
"Version": "2012-10-17",
"Statement": [
{
"Effect": "Allow",
"Action": [
"autoscaling:CompleteLifecycleAction",
"autoscaling:DeleteLifecycleHook",
"autoscaling:DescribeAutoScalingGroups",
"autoscaling:DescribeLifecycleHooks",
"autoscaling:PutLifecycleHook",
"autoscaling:RecordLifecycleActionHeartbeat",
"ec2:DescribeInstances",
"ec2:DescribeInstanceStatus",
"tag:GetTags",
"tag:GetResources"
],
"Resource": "*"
}
]
}
EOF
}
resource "aws_iam_role" "foo_role" {
name = "foo_role"
assume_role_policy = <<EOF
{
"Version": "2012-10-17",
"Statement": [
{
"Sid": "",
"Effect": "Allow",
"Principal": {
"Service": [
"codedeploy.amazonaws.com"
]
},
"Action": "sts:AssumeRole"
}
]
}
EOF
}
resource "aws_codedeploy_deployment_group" "foo" {
app_name = "${aws_codedeploy_app.foo_app.name}"
deployment_group_name = "foo"
service_role_arn = "${aws_iam_role.foo_role.arn}"
ec2_tag_filter {
key = "filterkey"
type = "KEY_AND_VALUE"
value = "filtervalue"
}
}`
var testAccAWSCodeDeployDeploymentGroupModifier = `
resource "aws_codedeploy_app" "foo_app" {
name = "foo_app"
}
resource "aws_iam_role_policy" "foo_policy" {
name = "foo_policy"
role = "${aws_iam_role.foo_role.id}"
policy = <<EOF
{
"Version": "2012-10-17",
"Statement": [
{
"Effect": "Allow",
"Action": [
"autoscaling:CompleteLifecycleAction",
"autoscaling:DeleteLifecycleHook",
"autoscaling:DescribeAutoScalingGroups",
"autoscaling:DescribeLifecycleHooks",
"autoscaling:PutLifecycleHook",
"autoscaling:RecordLifecycleActionHeartbeat",
"ec2:DescribeInstances",
"ec2:DescribeInstanceStatus",
"tag:GetTags",
"tag:GetResources"
],
"Resource": "*"
}
]
}
EOF
}
resource "aws_iam_role" "foo_role" {
name = "foo_role"
assume_role_policy = <<EOF
{
"Version": "2012-10-17",
"Statement": [
{
"Sid": "",
"Effect": "Allow",
"Principal": {
"Service": [
"codedeploy.amazonaws.com"
]
},
"Action": "sts:AssumeRole"
}
]
}
EOF
}
resource "aws_codedeploy_deployment_group" "foo" {
app_name = "${aws_codedeploy_app.foo_app.name}"
deployment_group_name = "bar"
service_role_arn = "${aws_iam_role.foo_role.arn}"
ec2_tag_filter {
key = "filterkey"
type = "KEY_AND_VALUE"
value = "filtervalue"
}
}`

View File

@ -575,14 +575,23 @@ func resourceAwsDynamoDbTableRead(d *schema.ResourceData, meta interface{}) erro
} }
} }
gsi["projection_type"] = *gsiObject.Projection.ProjectionType gsi["projection_type"] = *(gsiObject.Projection.ProjectionType)
gsi["non_key_attributes"] = gsiObject.Projection.NonKeyAttributes
nonKeyAttrs := make([]string, 0, len(gsiObject.Projection.NonKeyAttributes))
for _, nonKeyAttr := range gsiObject.Projection.NonKeyAttributes {
nonKeyAttrs = append(nonKeyAttrs, *nonKeyAttr)
}
gsi["non_key_attributes"] = nonKeyAttrs
gsiList = append(gsiList, gsi) gsiList = append(gsiList, gsi)
log.Printf("[DEBUG] Added GSI: %s - Read: %d / Write: %d", gsi["name"], gsi["read_capacity"], gsi["write_capacity"]) log.Printf("[DEBUG] Added GSI: %s - Read: %d / Write: %d", gsi["name"], gsi["read_capacity"], gsi["write_capacity"])
} }
d.Set("global_secondary_index", gsiList) err = d.Set("global_secondary_index", gsiList)
if err != nil {
return err
}
d.Set("arn", table.TableArn) d.Set("arn", table.TableArn)
return nil return nil

View File

@ -137,7 +137,6 @@ func resourceAwsEcsServiceCreate(d *schema.ResourceData, meta interface{}) error
log.Printf("[DEBUG] ECS service created: %s", *service.ServiceArn) log.Printf("[DEBUG] ECS service created: %s", *service.ServiceArn)
d.SetId(*service.ServiceArn) d.SetId(*service.ServiceArn)
d.Set("cluster", *service.ClusterArn)
return resourceAwsEcsServiceUpdate(d, meta) return resourceAwsEcsServiceUpdate(d, meta)
} }
@ -175,14 +174,21 @@ func resourceAwsEcsServiceRead(d *schema.ResourceData, meta interface{}) error {
} }
d.Set("desired_count", *service.DesiredCount) d.Set("desired_count", *service.DesiredCount)
// Save cluster in the same format
if strings.HasPrefix(d.Get("cluster").(string), "arn:aws:ecs:") {
d.Set("cluster", *service.ClusterArn) d.Set("cluster", *service.ClusterArn)
} else {
clusterARN := getNameFromARN(*service.ClusterArn)
d.Set("cluster", clusterARN)
}
// Save IAM role in the same format // Save IAM role in the same format
if service.RoleArn != nil { if service.RoleArn != nil {
if strings.HasPrefix(d.Get("iam_role").(string), "arn:aws:iam:") { if strings.HasPrefix(d.Get("iam_role").(string), "arn:aws:iam:") {
d.Set("iam_role", *service.RoleArn) d.Set("iam_role", *service.RoleArn)
} else { } else {
roleARN := buildIamRoleNameFromARN(*service.RoleArn) roleARN := getNameFromARN(*service.RoleArn)
d.Set("iam_role", roleARN) d.Set("iam_role", roleARN)
} }
} }
@ -306,8 +312,10 @@ func buildFamilyAndRevisionFromARN(arn string) string {
return strings.Split(arn, "/")[1] return strings.Split(arn, "/")[1]
} }
func buildIamRoleNameFromARN(arn string) string { // Expects the following ARNs:
// arn:aws:iam::0123456789:role/EcsService // arn:aws:iam::0123456789:role/EcsService
// arn:aws:ecs:us-west-2:0123456789:cluster/radek-cluster
func getNameFromARN(arn string) string {
return strings.Split(arn, "/")[1] return strings.Split(arn, "/")[1]
} }

View File

@ -178,6 +178,26 @@ func TestAccAWSEcsService_withIamRole(t *testing.T) {
}) })
} }
// Regression for https://github.com/hashicorp/terraform/issues/3361
func TestAccAWSEcsService_withEcsClusterName(t *testing.T) {
clusterName := regexp.MustCompile("^terraformecstestcluster$")
resource.Test(t, resource.TestCase{
PreCheck: func() { testAccPreCheck(t) },
Providers: testAccProviders,
CheckDestroy: testAccCheckAWSEcsServiceDestroy,
Steps: []resource.TestStep{
resource.TestStep{
Config: testAccAWSEcsServiceWithEcsClusterName,
Check: resource.ComposeTestCheckFunc(
testAccCheckAWSEcsServiceExists("aws_ecs_service.jenkins"),
resource.TestMatchResourceAttr(
"aws_ecs_service.jenkins", "cluster", clusterName),
),
},
},
})
}
func testAccCheckAWSEcsServiceDestroy(s *terraform.State) error { func testAccCheckAWSEcsServiceDestroy(s *terraform.State) error {
conn := testAccProvider.Meta().(*AWSClient).ecsconn conn := testAccProvider.Meta().(*AWSClient).ecsconn
@ -471,3 +491,31 @@ resource "aws_ecs_service" "ghost" {
desired_count = 1 desired_count = 1
} }
` `
var testAccAWSEcsServiceWithEcsClusterName = `
resource "aws_ecs_cluster" "default" {
name = "terraformecstestcluster"
}
resource "aws_ecs_task_definition" "jenkins" {
family = "jenkins"
container_definitions = <<DEFINITION
[
{
"cpu": 128,
"essential": true,
"image": "jenkins:latest",
"memory": 128,
"name": "jenkins"
}
]
DEFINITION
}
resource "aws_ecs_service" "jenkins" {
name = "jenkins"
cluster = "${aws_ecs_cluster.default.name}"
task_definition = "${aws_ecs_task_definition.jenkins.arn}"
desired_count = 1
}
`

View File

@ -118,7 +118,10 @@ func resourceAwsElasticacheCluster() *schema.Resource {
}, },
}, },
}, },
"notification_topic_arn": &schema.Schema{
Type: schema.TypeString,
Optional: true,
},
// A single-element string list containing an Amazon Resource Name (ARN) that // A single-element string list containing an Amazon Resource Name (ARN) that
// uniquely identifies a Redis RDB snapshot file stored in Amazon S3. The snapshot // uniquely identifies a Redis RDB snapshot file stored in Amazon S3. The snapshot
// file will be used to populate the node group. // file will be used to populate the node group.
@ -188,6 +191,10 @@ func resourceAwsElasticacheClusterCreate(d *schema.ResourceData, meta interface{
req.PreferredMaintenanceWindow = aws.String(v.(string)) req.PreferredMaintenanceWindow = aws.String(v.(string))
} }
if v, ok := d.GetOk("notification_topic_arn"); ok {
req.NotificationTopicArn = aws.String(v.(string))
}
snaps := d.Get("snapshot_arns").(*schema.Set).List() snaps := d.Get("snapshot_arns").(*schema.Set).List()
if len(snaps) > 0 { if len(snaps) > 0 {
s := expandStringList(snaps) s := expandStringList(snaps)
@ -254,6 +261,11 @@ func resourceAwsElasticacheClusterRead(d *schema.ResourceData, meta interface{})
d.Set("security_group_ids", c.SecurityGroups) d.Set("security_group_ids", c.SecurityGroups)
d.Set("parameter_group_name", c.CacheParameterGroup) d.Set("parameter_group_name", c.CacheParameterGroup)
d.Set("maintenance_window", c.PreferredMaintenanceWindow) d.Set("maintenance_window", c.PreferredMaintenanceWindow)
if c.NotificationConfiguration != nil {
if *c.NotificationConfiguration.TopicStatus == "active" {
d.Set("notification_topic_arn", c.NotificationConfiguration.TopicArn)
}
}
if err := setCacheNodeData(d, c); err != nil { if err := setCacheNodeData(d, c); err != nil {
return err return err
@ -317,6 +329,16 @@ func resourceAwsElasticacheClusterUpdate(d *schema.ResourceData, meta interface{
requestUpdate = true requestUpdate = true
} }
if d.HasChange("notification_topic_arn") {
v := d.Get("notification_topic_arn").(string)
req.NotificationTopicArn = aws.String(v)
if v == "" {
inactive := "inactive"
req.NotificationTopicStatus = &inactive
}
requestUpdate = true
}
if d.HasChange("engine_version") { if d.HasChange("engine_version") {
req.EngineVersion = aws.String(d.Get("engine_version").(string)) req.EngineVersion = aws.String(d.Get("engine_version").(string))
requestUpdate = true requestUpdate = true

View File

@ -3,6 +3,7 @@ package aws
import ( import (
"fmt" "fmt"
"math/rand" "math/rand"
"strings"
"testing" "testing"
"time" "time"
@ -13,6 +14,7 @@ import (
) )
func TestAccAWSElasticacheCluster_basic(t *testing.T) { func TestAccAWSElasticacheCluster_basic(t *testing.T) {
var ec elasticache.CacheCluster
resource.Test(t, resource.TestCase{ resource.Test(t, resource.TestCase{
PreCheck: func() { testAccPreCheck(t) }, PreCheck: func() { testAccPreCheck(t) },
Providers: testAccProviders, Providers: testAccProviders,
@ -22,7 +24,7 @@ func TestAccAWSElasticacheCluster_basic(t *testing.T) {
Config: testAccAWSElasticacheClusterConfig, Config: testAccAWSElasticacheClusterConfig,
Check: resource.ComposeTestCheckFunc( Check: resource.ComposeTestCheckFunc(
testAccCheckAWSElasticacheSecurityGroupExists("aws_elasticache_security_group.bar"), testAccCheckAWSElasticacheSecurityGroupExists("aws_elasticache_security_group.bar"),
testAccCheckAWSElasticacheClusterExists("aws_elasticache_cluster.bar"), testAccCheckAWSElasticacheClusterExists("aws_elasticache_cluster.bar", &ec),
resource.TestCheckResourceAttr( resource.TestCheckResourceAttr(
"aws_elasticache_cluster.bar", "cache_nodes.0.id", "0001"), "aws_elasticache_cluster.bar", "cache_nodes.0.id", "0001"),
), ),
@ -33,6 +35,7 @@ func TestAccAWSElasticacheCluster_basic(t *testing.T) {
func TestAccAWSElasticacheCluster_vpc(t *testing.T) { func TestAccAWSElasticacheCluster_vpc(t *testing.T) {
var csg elasticache.CacheSubnetGroup var csg elasticache.CacheSubnetGroup
var ec elasticache.CacheCluster
resource.Test(t, resource.TestCase{ resource.Test(t, resource.TestCase{
PreCheck: func() { testAccPreCheck(t) }, PreCheck: func() { testAccPreCheck(t) },
Providers: testAccProviders, Providers: testAccProviders,
@ -42,13 +45,28 @@ func TestAccAWSElasticacheCluster_vpc(t *testing.T) {
Config: testAccAWSElasticacheClusterInVPCConfig, Config: testAccAWSElasticacheClusterInVPCConfig,
Check: resource.ComposeTestCheckFunc( Check: resource.ComposeTestCheckFunc(
testAccCheckAWSElasticacheSubnetGroupExists("aws_elasticache_subnet_group.bar", &csg), testAccCheckAWSElasticacheSubnetGroupExists("aws_elasticache_subnet_group.bar", &csg),
testAccCheckAWSElasticacheClusterExists("aws_elasticache_cluster.bar"), testAccCheckAWSElasticacheClusterExists("aws_elasticache_cluster.bar", &ec),
testAccCheckAWSElasticacheClusterAttributes(&ec),
), ),
}, },
}, },
}) })
} }
func testAccCheckAWSElasticacheClusterAttributes(v *elasticache.CacheCluster) resource.TestCheckFunc {
return func(s *terraform.State) error {
if v.NotificationConfiguration == nil {
return fmt.Errorf("Expected NotificationConfiguration for ElastiCache Cluster (%s)", *v.CacheClusterId)
}
if strings.ToLower(*v.NotificationConfiguration.TopicStatus) != "active" {
return fmt.Errorf("Expected NotificationConfiguration status to be 'active', got (%s)", *v.NotificationConfiguration.TopicStatus)
}
return nil
}
}
func testAccCheckAWSElasticacheClusterDestroy(s *terraform.State) error { func testAccCheckAWSElasticacheClusterDestroy(s *terraform.State) error {
conn := testAccProvider.Meta().(*AWSClient).elasticacheconn conn := testAccProvider.Meta().(*AWSClient).elasticacheconn
@ -69,7 +87,7 @@ func testAccCheckAWSElasticacheClusterDestroy(s *terraform.State) error {
return nil return nil
} }
func testAccCheckAWSElasticacheClusterExists(n string) resource.TestCheckFunc { func testAccCheckAWSElasticacheClusterExists(n string, v *elasticache.CacheCluster) resource.TestCheckFunc {
return func(s *terraform.State) error { return func(s *terraform.State) error {
rs, ok := s.RootModule().Resources[n] rs, ok := s.RootModule().Resources[n]
if !ok { if !ok {
@ -81,12 +99,19 @@ func testAccCheckAWSElasticacheClusterExists(n string) resource.TestCheckFunc {
} }
conn := testAccProvider.Meta().(*AWSClient).elasticacheconn conn := testAccProvider.Meta().(*AWSClient).elasticacheconn
_, err := conn.DescribeCacheClusters(&elasticache.DescribeCacheClustersInput{ resp, err := conn.DescribeCacheClusters(&elasticache.DescribeCacheClustersInput{
CacheClusterId: aws.String(rs.Primary.ID), CacheClusterId: aws.String(rs.Primary.ID),
}) })
if err != nil { if err != nil {
return fmt.Errorf("Elasticache error: %v", err) return fmt.Errorf("Elasticache error: %v", err)
} }
for _, c := range resp.CacheClusters {
if *c.CacheClusterId == rs.Primary.ID {
*v = *c
}
}
return nil return nil
} }
} }
@ -175,5 +200,10 @@ resource "aws_elasticache_cluster" "bar" {
subnet_group_name = "${aws_elasticache_subnet_group.bar.name}" subnet_group_name = "${aws_elasticache_subnet_group.bar.name}"
security_group_ids = ["${aws_security_group.bar.id}"] security_group_ids = ["${aws_security_group.bar.id}"]
parameter_group_name = "default.redis2.8" parameter_group_name = "default.redis2.8"
notification_topic_arn = "${aws_sns_topic.topic_example.arn}"
}
resource "aws_sns_topic" "topic_example" {
name = "tf-ecache-cluster-test"
} }
`, genRandInt(), genRandInt(), genRandInt()) `, genRandInt(), genRandInt(), genRandInt())

View File

@ -41,6 +41,39 @@ func resourceAwsS3Bucket() *schema.Resource {
StateFunc: normalizeJson, StateFunc: normalizeJson,
}, },
"cors_rule": &schema.Schema{
Type: schema.TypeList,
Optional: true,
Elem: &schema.Resource{
Schema: map[string]*schema.Schema{
"allowed_headers": &schema.Schema{
Type: schema.TypeList,
Optional: true,
Elem: &schema.Schema{Type: schema.TypeString},
},
"allowed_methods": &schema.Schema{
Type: schema.TypeList,
Required: true,
Elem: &schema.Schema{Type: schema.TypeString},
},
"allowed_origins": &schema.Schema{
Type: schema.TypeList,
Required: true,
Elem: &schema.Schema{Type: schema.TypeString},
},
"expose_headers": &schema.Schema{
Type: schema.TypeList,
Optional: true,
Elem: &schema.Schema{Type: schema.TypeString},
},
"max_age_seconds": &schema.Schema{
Type: schema.TypeInt,
Optional: true,
},
},
},
},
"website": &schema.Schema{ "website": &schema.Schema{
Type: schema.TypeList, Type: schema.TypeList,
Optional: true, Optional: true,
@ -168,6 +201,12 @@ func resourceAwsS3BucketUpdate(d *schema.ResourceData, meta interface{}) error {
} }
} }
if d.HasChange("cors_rule") {
if err := resourceAwsS3BucketCorsUpdate(s3conn, d); err != nil {
return err
}
}
if d.HasChange("website") { if d.HasChange("website") {
if err := resourceAwsS3BucketWebsiteUpdate(s3conn, d); err != nil { if err := resourceAwsS3BucketWebsiteUpdate(s3conn, d); err != nil {
return err return err
@ -221,6 +260,27 @@ func resourceAwsS3BucketRead(d *schema.ResourceData, meta interface{}) error {
} }
} }
// Read the CORS
cors, err := s3conn.GetBucketCors(&s3.GetBucketCorsInput{
Bucket: aws.String(d.Id()),
})
log.Printf("[DEBUG] S3 bucket: %s, read CORS: %v", d.Id(), cors)
if err != nil {
rules := make([]map[string]interface{}, 0, len(cors.CORSRules))
for _, ruleObject := range cors.CORSRules {
rule := make(map[string]interface{})
rule["allowed_headers"] = ruleObject.AllowedHeaders
rule["allowed_methods"] = ruleObject.AllowedMethods
rule["allowed_origins"] = ruleObject.AllowedOrigins
rule["expose_headers"] = ruleObject.ExposeHeaders
rule["max_age_seconds"] = ruleObject.MaxAgeSeconds
rules = append(rules, rule)
}
if err := d.Set("cors_rule", rules); err != nil {
return fmt.Errorf("error reading S3 bucket \"%s\" CORS rules: %s", d.Id(), err)
}
}
// Read the website configuration // Read the website configuration
ws, err := s3conn.GetBucketWebsite(&s3.GetBucketWebsiteInput{ ws, err := s3conn.GetBucketWebsite(&s3.GetBucketWebsiteInput{
Bucket: aws.String(d.Id()), Bucket: aws.String(d.Id()),
@ -400,6 +460,65 @@ func resourceAwsS3BucketPolicyUpdate(s3conn *s3.S3, d *schema.ResourceData) erro
return nil return nil
} }
func resourceAwsS3BucketCorsUpdate(s3conn *s3.S3, d *schema.ResourceData) error {
bucket := d.Get("bucket").(string)
rawCors := d.Get("cors_rule").([]interface{})
if len(rawCors) == 0 {
// Delete CORS
log.Printf("[DEBUG] S3 bucket: %s, delete CORS", bucket)
_, err := s3conn.DeleteBucketCors(&s3.DeleteBucketCorsInput{
Bucket: aws.String(bucket),
})
if err != nil {
return fmt.Errorf("Error deleting S3 CORS: %s", err)
}
} else {
// Put CORS
rules := make([]*s3.CORSRule, 0, len(rawCors))
for _, cors := range rawCors {
corsMap := cors.(map[string]interface{})
r := &s3.CORSRule{}
for k, v := range corsMap {
log.Printf("[DEBUG] S3 bucket: %s, put CORS: %#v, %#v", bucket, k, v)
if k == "max_age_seconds" {
r.MaxAgeSeconds = aws.Int64(int64(v.(int)))
} else {
vMap := make([]*string, len(v.([]interface{})))
for i, vv := range v.([]interface{}) {
str := vv.(string)
vMap[i] = aws.String(str)
}
switch k {
case "allowed_headers":
r.AllowedHeaders = vMap
case "allowed_methods":
r.AllowedMethods = vMap
case "allowed_origins":
r.AllowedOrigins = vMap
case "expose_headers":
r.ExposeHeaders = vMap
}
}
}
rules = append(rules, r)
}
corsInput := &s3.PutBucketCorsInput{
Bucket: aws.String(bucket),
CORSConfiguration: &s3.CORSConfiguration{
CORSRules: rules,
},
}
log.Printf("[DEBUG] S3 bucket: %s, put CORS: %#v", bucket, corsInput)
_, err := s3conn.PutBucketCors(corsInput)
if err != nil {
return fmt.Errorf("Error putting S3 CORS: %s", err)
}
}
return nil
}
func resourceAwsS3BucketWebsiteUpdate(s3conn *s3.S3, d *schema.ResourceData) error { func resourceAwsS3BucketWebsiteUpdate(s3conn *s3.S3, d *schema.ResourceData) error {
ws := d.Get("website").([]interface{}) ws := d.Get("website").([]interface{})

View File

@ -188,6 +188,34 @@ func TestAccAWSS3Bucket_Versioning(t *testing.T) {
}) })
} }
func TestAccAWSS3Bucket_Cors(t *testing.T) {
resource.Test(t, resource.TestCase{
PreCheck: func() { testAccPreCheck(t) },
Providers: testAccProviders,
CheckDestroy: testAccCheckAWSS3BucketDestroy,
Steps: []resource.TestStep{
resource.TestStep{
Config: testAccAWSS3BucketConfigWithCORS,
Check: resource.ComposeTestCheckFunc(
testAccCheckAWSS3BucketExists("aws_s3_bucket.bucket"),
testAccCheckAWSS3BucketCors(
"aws_s3_bucket.bucket",
[]*s3.CORSRule{
&s3.CORSRule{
AllowedHeaders: []*string{aws.String("*")},
AllowedMethods: []*string{aws.String("PUT"), aws.String("POST")},
AllowedOrigins: []*string{aws.String("https://www.example.com")},
ExposeHeaders: []*string{aws.String("x-amz-server-side-encryption"), aws.String("ETag")},
MaxAgeSeconds: aws.Int64(3000),
},
},
),
),
},
},
})
}
func testAccCheckAWSS3BucketDestroy(s *terraform.State) error { func testAccCheckAWSS3BucketDestroy(s *terraform.State) error {
conn := testAccProvider.Meta().(*AWSClient).s3conn conn := testAccProvider.Meta().(*AWSClient).s3conn
@ -370,6 +398,26 @@ func testAccCheckAWSS3BucketVersioning(n string, versioningStatus string) resour
return nil return nil
} }
} }
func testAccCheckAWSS3BucketCors(n string, corsRules []*s3.CORSRule) resource.TestCheckFunc {
return func(s *terraform.State) error {
rs, _ := s.RootModule().Resources[n]
conn := testAccProvider.Meta().(*AWSClient).s3conn
out, err := conn.GetBucketCors(&s3.GetBucketCorsInput{
Bucket: aws.String(rs.Primary.ID),
})
if err != nil {
return fmt.Errorf("GetBucketCors error: %v", err)
}
if !reflect.DeepEqual(out.CORSRules, corsRules) {
return fmt.Errorf("bad error cors rule, expected: %v, got %v", corsRules, out.CORSRules)
}
return nil
}
}
// These need a bit of randomness as the name can only be used once globally // These need a bit of randomness as the name can only be used once globally
// within AWS // within AWS
@ -452,3 +500,17 @@ resource "aws_s3_bucket" "bucket" {
} }
} }
`, randInt) `, randInt)
var testAccAWSS3BucketConfigWithCORS = fmt.Sprintf(`
resource "aws_s3_bucket" "bucket" {
bucket = "tf-test-bucket-%d"
acl = "public-read"
cors_rule {
allowed_headers = ["*"]
allowed_methods = ["PUT","POST"]
allowed_origins = ["https://www.example.com"]
expose_headers = ["x-amz-server-side-encryption","ETag"]
max_age_seconds = 3000
}
}
`, randInt)

View File

@ -9,6 +9,7 @@ import (
"strings" "strings"
"github.com/aws/aws-sdk-go/aws" "github.com/aws/aws-sdk-go/aws"
"github.com/aws/aws-sdk-go/service/cloudformation"
"github.com/aws/aws-sdk-go/service/directoryservice" "github.com/aws/aws-sdk-go/service/directoryservice"
"github.com/aws/aws-sdk-go/service/ec2" "github.com/aws/aws-sdk-go/service/ec2"
"github.com/aws/aws-sdk-go/service/ecs" "github.com/aws/aws-sdk-go/service/ecs"
@ -601,3 +602,57 @@ func flattenDSVpcSettings(
return []map[string]interface{}{settings} return []map[string]interface{}{settings}
} }
func expandCloudFormationParameters(params map[string]interface{}) []*cloudformation.Parameter {
var cfParams []*cloudformation.Parameter
for k, v := range params {
cfParams = append(cfParams, &cloudformation.Parameter{
ParameterKey: aws.String(k),
ParameterValue: aws.String(v.(string)),
})
}
return cfParams
}
// flattenCloudFormationParameters is flattening list of
// *cloudformation.Parameters and only returning existing
// parameters to avoid clash with default values
func flattenCloudFormationParameters(cfParams []*cloudformation.Parameter,
originalParams map[string]interface{}) map[string]interface{} {
params := make(map[string]interface{}, len(cfParams))
for _, p := range cfParams {
_, isConfigured := originalParams[*p.ParameterKey]
if isConfigured {
params[*p.ParameterKey] = *p.ParameterValue
}
}
return params
}
func expandCloudFormationTags(tags map[string]interface{}) []*cloudformation.Tag {
var cfTags []*cloudformation.Tag
for k, v := range tags {
cfTags = append(cfTags, &cloudformation.Tag{
Key: aws.String(k),
Value: aws.String(v.(string)),
})
}
return cfTags
}
func flattenCloudFormationTags(cfTags []*cloudformation.Tag) map[string]string {
tags := make(map[string]string, len(cfTags))
for _, t := range cfTags {
tags[*t.Key] = *t.Value
}
return tags
}
func flattenCloudFormationOutputs(cfOutputs []*cloudformation.Output) map[string]string {
outputs := make(map[string]string, len(cfOutputs))
for _, o := range cfOutputs {
outputs[*o.OutputKey] = *o.OutputValue
}
return outputs
}

View File

@ -3,9 +3,11 @@ package azure
import ( import (
"io" "io"
"io/ioutil" "io/ioutil"
"math/rand"
"os" "os"
"strings" "strings"
"testing" "testing"
"time"
"github.com/hashicorp/terraform/config" "github.com/hashicorp/terraform/config"
"github.com/hashicorp/terraform/helper/schema" "github.com/hashicorp/terraform/helper/schema"
@ -193,6 +195,10 @@ func TestAzure_isFile(t *testing.T) {
} }
} }
func genRandInt() int {
return rand.New(rand.NewSource(time.Now().UnixNano())).Int() % 100000
}
// testAzurePublishSettingsStr is a revoked publishsettings file // testAzurePublishSettingsStr is a revoked publishsettings file
const testAzurePublishSettingsStr = ` const testAzurePublishSettingsStr = `
<?xml version="1.0" encoding="utf-8"?> <?xml version="1.0" encoding="utf-8"?>

View File

@ -13,6 +13,7 @@ import (
func TestAccAzureDataDisk_basic(t *testing.T) { func TestAccAzureDataDisk_basic(t *testing.T) {
var disk virtualmachinedisk.DataDiskResponse var disk virtualmachinedisk.DataDiskResponse
name := fmt.Sprintf("terraform-test%d", genRandInt())
resource.Test(t, resource.TestCase{ resource.Test(t, resource.TestCase{
PreCheck: func() { testAccPreCheck(t) }, PreCheck: func() { testAccPreCheck(t) },
@ -20,13 +21,13 @@ func TestAccAzureDataDisk_basic(t *testing.T) {
CheckDestroy: testAccCheckAzureDataDiskDestroy, CheckDestroy: testAccCheckAzureDataDiskDestroy,
Steps: []resource.TestStep{ Steps: []resource.TestStep{
resource.TestStep{ resource.TestStep{
Config: testAccAzureDataDisk_basic, Config: testAccAzureDataDisk_basic(name),
Check: resource.ComposeTestCheckFunc( Check: resource.ComposeTestCheckFunc(
testAccCheckAzureDataDiskExists( testAccCheckAzureDataDiskExists(
"azure_data_disk.foo", &disk), "azure_data_disk.foo", &disk),
testAccCheckAzureDataDiskAttributes(&disk), testAccCheckAzureDataDiskAttributes(&disk),
resource.TestCheckResourceAttr( resource.TestCheckResourceAttr(
"azure_data_disk.foo", "label", "terraform-test-0"), "azure_data_disk.foo", "label", fmt.Sprintf("%s-0", name)),
resource.TestCheckResourceAttr( resource.TestCheckResourceAttr(
"azure_data_disk.foo", "size", "10"), "azure_data_disk.foo", "size", "10"),
), ),
@ -37,6 +38,7 @@ func TestAccAzureDataDisk_basic(t *testing.T) {
func TestAccAzureDataDisk_update(t *testing.T) { func TestAccAzureDataDisk_update(t *testing.T) {
var disk virtualmachinedisk.DataDiskResponse var disk virtualmachinedisk.DataDiskResponse
name := fmt.Sprintf("terraform-test%d", genRandInt())
resource.Test(t, resource.TestCase{ resource.Test(t, resource.TestCase{
PreCheck: func() { testAccPreCheck(t) }, PreCheck: func() { testAccPreCheck(t) },
@ -44,12 +46,12 @@ func TestAccAzureDataDisk_update(t *testing.T) {
CheckDestroy: testAccCheckAzureDataDiskDestroy, CheckDestroy: testAccCheckAzureDataDiskDestroy,
Steps: []resource.TestStep{ Steps: []resource.TestStep{
resource.TestStep{ resource.TestStep{
Config: testAccAzureDataDisk_advanced, Config: testAccAzureDataDisk_advanced(name),
Check: resource.ComposeTestCheckFunc( Check: resource.ComposeTestCheckFunc(
testAccCheckAzureDataDiskExists( testAccCheckAzureDataDiskExists(
"azure_data_disk.foo", &disk), "azure_data_disk.foo", &disk),
resource.TestCheckResourceAttr( resource.TestCheckResourceAttr(
"azure_data_disk.foo", "label", "terraform-test1-1"), "azure_data_disk.foo", "label", fmt.Sprintf("%s-1", name)),
resource.TestCheckResourceAttr( resource.TestCheckResourceAttr(
"azure_data_disk.foo", "lun", "1"), "azure_data_disk.foo", "lun", "1"),
resource.TestCheckResourceAttr( resource.TestCheckResourceAttr(
@ -57,17 +59,17 @@ func TestAccAzureDataDisk_update(t *testing.T) {
resource.TestCheckResourceAttr( resource.TestCheckResourceAttr(
"azure_data_disk.foo", "caching", "ReadOnly"), "azure_data_disk.foo", "caching", "ReadOnly"),
resource.TestCheckResourceAttr( resource.TestCheckResourceAttr(
"azure_data_disk.foo", "virtual_machine", "terraform-test1"), "azure_data_disk.foo", "virtual_machine", name),
), ),
}, },
resource.TestStep{ resource.TestStep{
Config: testAccAzureDataDisk_update, Config: testAccAzureDataDisk_update(name),
Check: resource.ComposeTestCheckFunc( Check: resource.ComposeTestCheckFunc(
testAccCheckAzureDataDiskExists( testAccCheckAzureDataDiskExists(
"azure_data_disk.foo", &disk), "azure_data_disk.foo", &disk),
resource.TestCheckResourceAttr( resource.TestCheckResourceAttr(
"azure_data_disk.foo", "label", "terraform-test1-1"), "azure_data_disk.foo", "label", fmt.Sprintf("%s-1", name)),
resource.TestCheckResourceAttr( resource.TestCheckResourceAttr(
"azure_data_disk.foo", "lun", "2"), "azure_data_disk.foo", "lun", "2"),
resource.TestCheckResourceAttr( resource.TestCheckResourceAttr(
@ -168,55 +170,60 @@ func testAccCheckAzureDataDiskDestroy(s *terraform.State) error {
return nil return nil
} }
var testAccAzureDataDisk_basic = fmt.Sprintf(` func testAccAzureDataDisk_basic(name string) string {
resource "azure_instance" "foo" { return fmt.Sprintf(`
name = "terraform-test" resource "azure_instance" "foo" {
name = "%s"
image = "Ubuntu Server 14.04 LTS" image = "Ubuntu Server 14.04 LTS"
size = "Basic_A1" size = "Basic_A1"
storage_service_name = "%s" storage_service_name = "%s"
location = "West US" location = "West US"
username = "terraform" username = "terraform"
password = "Pass!admin123" password = "Pass!admin123"
} }
resource "azure_data_disk" "foo" { resource "azure_data_disk" "foo" {
lun = 0 lun = 0
size = 10 size = 10
storage_service_name = "${azure_instance.foo.storage_service_name}" storage_service_name = "${azure_instance.foo.storage_service_name}"
virtual_machine = "${azure_instance.foo.id}" virtual_machine = "${azure_instance.foo.id}"
}`, testAccStorageServiceName) }`, name, testAccStorageServiceName)
}
var testAccAzureDataDisk_advanced = fmt.Sprintf(` func testAccAzureDataDisk_advanced(name string) string {
resource "azure_instance" "foo" { return fmt.Sprintf(`
name = "terraform-test1" resource "azure_instance" "foo" {
name = "%s"
image = "Ubuntu Server 14.04 LTS" image = "Ubuntu Server 14.04 LTS"
size = "Basic_A1" size = "Basic_A1"
storage_service_name = "%s" storage_service_name = "%s"
location = "West US" location = "West US"
username = "terraform" username = "terraform"
password = "Pass!admin123" password = "Pass!admin123"
} }
resource "azure_data_disk" "foo" { resource "azure_data_disk" "foo" {
lun = 1 lun = 1
size = 10 size = 10
caching = "ReadOnly" caching = "ReadOnly"
storage_service_name = "${azure_instance.foo.storage_service_name}" storage_service_name = "${azure_instance.foo.storage_service_name}"
virtual_machine = "${azure_instance.foo.id}" virtual_machine = "${azure_instance.foo.id}"
}`, testAccStorageServiceName) }`, name, testAccStorageServiceName)
}
var testAccAzureDataDisk_update = fmt.Sprintf(` func testAccAzureDataDisk_update(name string) string {
resource "azure_instance" "foo" { return fmt.Sprintf(`
name = "terraform-test1" resource "azure_instance" "foo" {
name = "%s"
image = "Ubuntu Server 14.04 LTS" image = "Ubuntu Server 14.04 LTS"
size = "Basic_A1" size = "Basic_A1"
storage_service_name = "%s" storage_service_name = "%s"
location = "West US" location = "West US"
username = "terraform" username = "terraform"
password = "Pass!admin123" password = "Pass!admin123"
} }
resource "azure_instance" "bar" { resource "azure_instance" "bar" {
name = "terraform-test2" name = "terraform-test2"
image = "Ubuntu Server 14.04 LTS" image = "Ubuntu Server 14.04 LTS"
size = "Basic_A1" size = "Basic_A1"
@ -224,12 +231,13 @@ resource "azure_instance" "bar" {
location = "West US" location = "West US"
username = "terraform" username = "terraform"
password = "Pass!admin123" password = "Pass!admin123"
} }
resource "azure_data_disk" "foo" { resource "azure_data_disk" "foo" {
lun = 2 lun = 2
size = 20 size = 20
caching = "ReadWrite" caching = "ReadWrite"
storage_service_name = "${azure_instance.bar.storage_service_name}" storage_service_name = "${azure_instance.bar.storage_service_name}"
virtual_machine = "${azure_instance.bar.id}" virtual_machine = "${azure_instance.bar.id}"
}`, testAccStorageServiceName) }`, name, testAccStorageServiceName)
}

View File

@ -446,7 +446,7 @@ resource "azure_security_group_rule" "foo" {
resource "azure_instance" "foo" { resource "azure_instance" "foo" {
name = "terraform-test1" name = "terraform-test1"
image = "Windows Server 2012 R2 Datacenter, April 2015" image = "Windows Server 2012 R2 Datacenter, September 2015"
size = "Basic_A1" size = "Basic_A1"
storage_service_name = "%s" storage_service_name = "%s"
location = "West US" location = "West US"
@ -520,7 +520,7 @@ resource "azure_security_group_rule" "bar" {
resource "azure_instance" "foo" { resource "azure_instance" "foo" {
name = "terraform-test1" name = "terraform-test1"
image = "Windows Server 2012 R2 Datacenter, April 2015" image = "Windows Server 2012 R2 Datacenter, September 2015"
size = "Basic_A2" size = "Basic_A2"
storage_service_name = "%s" storage_service_name = "%s"
location = "West US" location = "West US"

View File

@ -43,6 +43,7 @@ func resourceCloudStackVPC() *schema.Resource {
"network_domain": &schema.Schema{ "network_domain": &schema.Schema{
Type: schema.TypeString, Type: schema.TypeString,
Optional: true, Optional: true,
Computed: true,
ForceNew: true, ForceNew: true,
}, },

View File

@ -3,7 +3,8 @@ package digitalocean
import ( import (
"log" "log"
"github.com/pearkes/digitalocean" "github.com/digitalocean/godo"
"golang.org/x/oauth2"
) )
type Config struct { type Config struct {
@ -11,14 +12,14 @@ type Config struct {
} }
// Client() returns a new client for accessing digital ocean. // Client() returns a new client for accessing digital ocean.
func (c *Config) Client() (*digitalocean.Client, error) { func (c *Config) Client() (*godo.Client, error) {
client, err := digitalocean.NewClient(c.Token) tokenSrc := oauth2.StaticTokenSource(&oauth2.Token{
AccessToken: c.Token,
})
log.Printf("[INFO] DigitalOcean Client configured for URL: %s", client.URL) client := godo.NewClient(oauth2.NewClient(oauth2.NoContext, tokenSrc))
if err != nil { log.Printf("[INFO] DigitalOcean Client configured for URL: %s", client.BaseURL.String())
return nil, err
}
return client, nil return client, nil
} }

View File

@ -5,8 +5,8 @@ import (
"log" "log"
"strings" "strings"
"github.com/digitalocean/godo"
"github.com/hashicorp/terraform/helper/schema" "github.com/hashicorp/terraform/helper/schema"
"github.com/pearkes/digitalocean"
) )
func resourceDigitalOceanDomain() *schema.Resource { func resourceDigitalOceanDomain() *schema.Resource {
@ -32,30 +32,31 @@ func resourceDigitalOceanDomain() *schema.Resource {
} }
func resourceDigitalOceanDomainCreate(d *schema.ResourceData, meta interface{}) error { func resourceDigitalOceanDomainCreate(d *schema.ResourceData, meta interface{}) error {
client := meta.(*digitalocean.Client) client := meta.(*godo.Client)
// Build up our creation options // Build up our creation options
opts := &digitalocean.CreateDomain{
opts := &godo.DomainCreateRequest{
Name: d.Get("name").(string), Name: d.Get("name").(string),
IPAddress: d.Get("ip_address").(string), IPAddress: d.Get("ip_address").(string),
} }
log.Printf("[DEBUG] Domain create configuration: %#v", opts) log.Printf("[DEBUG] Domain create configuration: %#v", opts)
name, err := client.CreateDomain(opts) domain, _, err := client.Domains.Create(opts)
if err != nil { if err != nil {
return fmt.Errorf("Error creating Domain: %s", err) return fmt.Errorf("Error creating Domain: %s", err)
} }
d.SetId(name) d.SetId(domain.Name)
log.Printf("[INFO] Domain Name: %s", name) log.Printf("[INFO] Domain Name: %s", domain.Name)
return resourceDigitalOceanDomainRead(d, meta) return resourceDigitalOceanDomainRead(d, meta)
} }
func resourceDigitalOceanDomainRead(d *schema.ResourceData, meta interface{}) error { func resourceDigitalOceanDomainRead(d *schema.ResourceData, meta interface{}) error {
client := meta.(*digitalocean.Client) client := meta.(*godo.Client)
domain, err := client.RetrieveDomain(d.Id()) domain, _, err := client.Domains.Get(d.Id())
if err != nil { if err != nil {
// If the domain is somehow already destroyed, mark as // If the domain is somehow already destroyed, mark as
// successfully gone // successfully gone
@ -73,10 +74,10 @@ func resourceDigitalOceanDomainRead(d *schema.ResourceData, meta interface{}) er
} }
func resourceDigitalOceanDomainDelete(d *schema.ResourceData, meta interface{}) error { func resourceDigitalOceanDomainDelete(d *schema.ResourceData, meta interface{}) error {
client := meta.(*digitalocean.Client) client := meta.(*godo.Client)
log.Printf("[INFO] Deleting Domain: %s", d.Id()) log.Printf("[INFO] Deleting Domain: %s", d.Id())
err := client.DestroyDomain(d.Id()) _, err := client.Domains.Delete(d.Id())
if err != nil { if err != nil {
return fmt.Errorf("Error deleting Domain: %s", err) return fmt.Errorf("Error deleting Domain: %s", err)
} }

View File

@ -4,13 +4,13 @@ import (
"fmt" "fmt"
"testing" "testing"
"github.com/digitalocean/godo"
"github.com/hashicorp/terraform/helper/resource" "github.com/hashicorp/terraform/helper/resource"
"github.com/hashicorp/terraform/terraform" "github.com/hashicorp/terraform/terraform"
"github.com/pearkes/digitalocean"
) )
func TestAccDigitalOceanDomain_Basic(t *testing.T) { func TestAccDigitalOceanDomain_Basic(t *testing.T) {
var domain digitalocean.Domain var domain godo.Domain
resource.Test(t, resource.TestCase{ resource.Test(t, resource.TestCase{
PreCheck: func() { testAccPreCheck(t) }, PreCheck: func() { testAccPreCheck(t) },
@ -33,7 +33,7 @@ func TestAccDigitalOceanDomain_Basic(t *testing.T) {
} }
func testAccCheckDigitalOceanDomainDestroy(s *terraform.State) error { func testAccCheckDigitalOceanDomainDestroy(s *terraform.State) error {
client := testAccProvider.Meta().(*digitalocean.Client) client := testAccProvider.Meta().(*godo.Client)
for _, rs := range s.RootModule().Resources { for _, rs := range s.RootModule().Resources {
if rs.Type != "digitalocean_domain" { if rs.Type != "digitalocean_domain" {
@ -41,17 +41,17 @@ func testAccCheckDigitalOceanDomainDestroy(s *terraform.State) error {
} }
// Try to find the domain // Try to find the domain
_, err := client.RetrieveDomain(rs.Primary.ID) _, _, err := client.Domains.Get(rs.Primary.ID)
if err == nil { if err == nil {
fmt.Errorf("Domain still exists") return fmt.Errorf("Domain still exists")
} }
} }
return nil return nil
} }
func testAccCheckDigitalOceanDomainAttributes(domain *digitalocean.Domain) resource.TestCheckFunc { func testAccCheckDigitalOceanDomainAttributes(domain *godo.Domain) resource.TestCheckFunc {
return func(s *terraform.State) error { return func(s *terraform.State) error {
if domain.Name != "foobar-test-terraform.com" { if domain.Name != "foobar-test-terraform.com" {
@ -62,7 +62,7 @@ func testAccCheckDigitalOceanDomainAttributes(domain *digitalocean.Domain) resou
} }
} }
func testAccCheckDigitalOceanDomainExists(n string, domain *digitalocean.Domain) resource.TestCheckFunc { func testAccCheckDigitalOceanDomainExists(n string, domain *godo.Domain) resource.TestCheckFunc {
return func(s *terraform.State) error { return func(s *terraform.State) error {
rs, ok := s.RootModule().Resources[n] rs, ok := s.RootModule().Resources[n]
@ -74,9 +74,9 @@ func testAccCheckDigitalOceanDomainExists(n string, domain *digitalocean.Domain)
return fmt.Errorf("No Record ID is set") return fmt.Errorf("No Record ID is set")
} }
client := testAccProvider.Meta().(*digitalocean.Client) client := testAccProvider.Meta().(*godo.Client)
foundDomain, err := client.RetrieveDomain(rs.Primary.ID) foundDomain, _, err := client.Domains.Get(rs.Primary.ID)
if err != nil { if err != nil {
return err return err
@ -86,7 +86,7 @@ func testAccCheckDigitalOceanDomainExists(n string, domain *digitalocean.Domain)
return fmt.Errorf("Record not found") return fmt.Errorf("Record not found")
} }
*domain = foundDomain *domain = *foundDomain
return nil return nil
} }

View File

@ -3,12 +3,13 @@ package digitalocean
import ( import (
"fmt" "fmt"
"log" "log"
"strconv"
"strings" "strings"
"time" "time"
"github.com/digitalocean/godo"
"github.com/hashicorp/terraform/helper/resource" "github.com/hashicorp/terraform/helper/resource"
"github.com/hashicorp/terraform/helper/schema" "github.com/hashicorp/terraform/helper/schema"
"github.com/pearkes/digitalocean"
) )
func resourceDigitalOceanDroplet() *schema.Resource { func resourceDigitalOceanDroplet() *schema.Resource {
@ -105,11 +106,13 @@ func resourceDigitalOceanDroplet() *schema.Resource {
} }
func resourceDigitalOceanDropletCreate(d *schema.ResourceData, meta interface{}) error { func resourceDigitalOceanDropletCreate(d *schema.ResourceData, meta interface{}) error {
client := meta.(*digitalocean.Client) client := meta.(*godo.Client)
// Build up our creation options // Build up our creation options
opts := &digitalocean.CreateDroplet{ opts := &godo.DropletCreateRequest{
Image: d.Get("image").(string), Image: godo.DropletCreateImage{
Slug: d.Get("image").(string),
},
Name: d.Get("name").(string), Name: d.Get("name").(string),
Region: d.Get("region").(string), Region: d.Get("region").(string),
Size: d.Get("size").(string), Size: d.Get("size").(string),
@ -120,7 +123,7 @@ func resourceDigitalOceanDropletCreate(d *schema.ResourceData, meta interface{})
} }
if attr, ok := d.GetOk("ipv6"); ok { if attr, ok := d.GetOk("ipv6"); ok {
opts.IPV6 = attr.(bool) opts.IPv6 = attr.(bool)
} }
if attr, ok := d.GetOk("private_networking"); ok { if attr, ok := d.GetOk("private_networking"); ok {
@ -132,25 +135,32 @@ func resourceDigitalOceanDropletCreate(d *schema.ResourceData, meta interface{})
} }
// Get configured ssh_keys // Get configured ssh_keys
ssh_keys := d.Get("ssh_keys.#").(int) sshKeys := d.Get("ssh_keys.#").(int)
if ssh_keys > 0 { if sshKeys > 0 {
opts.SSHKeys = make([]string, 0, ssh_keys) opts.SSHKeys = make([]godo.DropletCreateSSHKey, 0, sshKeys)
for i := 0; i < ssh_keys; i++ { for i := 0; i < sshKeys; i++ {
key := fmt.Sprintf("ssh_keys.%d", i) key := fmt.Sprintf("ssh_keys.%d", i)
opts.SSHKeys = append(opts.SSHKeys, d.Get(key).(string)) id, err := strconv.Atoi(d.Get(key).(string))
if err != nil {
return err
}
opts.SSHKeys = append(opts.SSHKeys, godo.DropletCreateSSHKey{
ID: id,
})
} }
} }
log.Printf("[DEBUG] Droplet create configuration: %#v", opts) log.Printf("[DEBUG] Droplet create configuration: %#v", opts)
id, err := client.CreateDroplet(opts) droplet, _, err := client.Droplets.Create(opts)
if err != nil { if err != nil {
return fmt.Errorf("Error creating droplet: %s", err) return fmt.Errorf("Error creating droplet: %s", err)
} }
// Assign the droplets id // Assign the droplets id
d.SetId(id) d.SetId(strconv.Itoa(droplet.ID))
log.Printf("[INFO] Droplet ID: %s", d.Id()) log.Printf("[INFO] Droplet ID: %s", d.Id())
@ -164,10 +174,15 @@ func resourceDigitalOceanDropletCreate(d *schema.ResourceData, meta interface{})
} }
func resourceDigitalOceanDropletRead(d *schema.ResourceData, meta interface{}) error { func resourceDigitalOceanDropletRead(d *schema.ResourceData, meta interface{}) error {
client := meta.(*digitalocean.Client) client := meta.(*godo.Client)
id, err := strconv.Atoi(d.Id())
if err != nil {
return fmt.Errorf("invalid droplet id: %v", err)
}
// Retrieve the droplet properties for updating the state // Retrieve the droplet properties for updating the state
droplet, err := client.RetrieveDroplet(d.Id()) droplet, _, err := client.Droplets.Get(id)
if err != nil { if err != nil {
// check if the droplet no longer exists. // check if the droplet no longer exists.
if err.Error() == "Error retrieving droplet: API Error: 404 Not Found" { if err.Error() == "Error retrieving droplet: API Error: 404 Not Found" {
@ -178,48 +193,70 @@ func resourceDigitalOceanDropletRead(d *schema.ResourceData, meta interface{}) e
return fmt.Errorf("Error retrieving droplet: %s", err) return fmt.Errorf("Error retrieving droplet: %s", err)
} }
if droplet.ImageSlug() != "" { if droplet.Image.Slug != "" {
d.Set("image", droplet.ImageSlug()) d.Set("image", droplet.Image.Slug)
} else { } else {
d.Set("image", droplet.ImageId()) d.Set("image", droplet.Image.ID)
} }
d.Set("name", droplet.Name) d.Set("name", droplet.Name)
d.Set("region", droplet.RegionSlug()) d.Set("region", droplet.Region.Slug)
d.Set("size", droplet.SizeSlug) d.Set("size", droplet.Size.Slug)
d.Set("status", droplet.Status) d.Set("status", droplet.Status)
d.Set("locked", droplet.IsLocked()) d.Set("locked", strconv.FormatBool(droplet.Locked))
if droplet.IPV6Address("public") != "" { if publicIPv6 := findIPv6AddrByType(droplet, "public"); publicIPv6 != "" {
d.Set("ipv6", true) d.Set("ipv6", true)
d.Set("ipv6_address", droplet.IPV6Address("public")) d.Set("ipv6_address", publicIPv6)
d.Set("ipv6_address_private", droplet.IPV6Address("private")) d.Set("ipv6_address_private", findIPv6AddrByType(droplet, "private"))
} }
d.Set("ipv4_address", droplet.IPV4Address("public")) d.Set("ipv4_address", findIPv4AddrByType(droplet, "public"))
if droplet.NetworkingType() == "private" { if privateIPv4 := findIPv4AddrByType(droplet, "private"); privateIPv4 != "" {
d.Set("private_networking", true) d.Set("private_networking", true)
d.Set("ipv4_address_private", droplet.IPV4Address("private")) d.Set("ipv4_address_private", privateIPv4)
} }
// Initialize the connection info // Initialize the connection info
d.SetConnInfo(map[string]string{ d.SetConnInfo(map[string]string{
"type": "ssh", "type": "ssh",
"host": droplet.IPV4Address("public"), "host": findIPv4AddrByType(droplet, "public"),
}) })
return nil return nil
} }
func findIPv6AddrByType(d *godo.Droplet, addrType string) string {
for _, addr := range d.Networks.V6 {
if addr.Type == addrType {
return addr.IPAddress
}
}
return ""
}
func findIPv4AddrByType(d *godo.Droplet, addrType string) string {
for _, addr := range d.Networks.V4 {
if addr.Type == addrType {
return addr.IPAddress
}
}
return ""
}
func resourceDigitalOceanDropletUpdate(d *schema.ResourceData, meta interface{}) error { func resourceDigitalOceanDropletUpdate(d *schema.ResourceData, meta interface{}) error {
client := meta.(*digitalocean.Client) client := meta.(*godo.Client)
id, err := strconv.Atoi(d.Id())
if err != nil {
return fmt.Errorf("invalid droplet id: %v", err)
}
if d.HasChange("size") { if d.HasChange("size") {
oldSize, newSize := d.GetChange("size") oldSize, newSize := d.GetChange("size")
err := client.PowerOff(d.Id()) _, _, err = client.DropletActions.PowerOff(id)
if err != nil && !strings.Contains(err.Error(), "Droplet is already powered off") { if err != nil && !strings.Contains(err.Error(), "Droplet is already powered off") {
return fmt.Errorf( return fmt.Errorf(
"Error powering off droplet (%s): %s", d.Id(), err) "Error powering off droplet (%s): %s", d.Id(), err)
@ -233,7 +270,7 @@ func resourceDigitalOceanDropletUpdate(d *schema.ResourceData, meta interface{})
} }
// Resize the droplet // Resize the droplet
err = client.Resize(d.Id(), newSize.(string)) _, _, err = client.DropletActions.Resize(id, newSize.(string), true)
if err != nil { if err != nil {
newErr := powerOnAndWait(d, meta) newErr := powerOnAndWait(d, meta)
if newErr != nil { if newErr != nil {
@ -258,7 +295,7 @@ func resourceDigitalOceanDropletUpdate(d *schema.ResourceData, meta interface{})
"Error waiting for resize droplet (%s) to finish: %s", d.Id(), err) "Error waiting for resize droplet (%s) to finish: %s", d.Id(), err)
} }
err = client.PowerOn(d.Id()) _, _, err = client.DropletActions.PowerOn(id)
if err != nil { if err != nil {
return fmt.Errorf( return fmt.Errorf(
@ -276,7 +313,7 @@ func resourceDigitalOceanDropletUpdate(d *schema.ResourceData, meta interface{})
oldName, newName := d.GetChange("name") oldName, newName := d.GetChange("name")
// Rename the droplet // Rename the droplet
err := client.Rename(d.Id(), newName.(string)) _, _, err = client.DropletActions.Rename(id, newName.(string))
if err != nil { if err != nil {
return fmt.Errorf( return fmt.Errorf(
@ -296,7 +333,7 @@ func resourceDigitalOceanDropletUpdate(d *schema.ResourceData, meta interface{})
// As there is no way to disable private networking, // As there is no way to disable private networking,
// we only check if it needs to be enabled // we only check if it needs to be enabled
if d.HasChange("private_networking") && d.Get("private_networking").(bool) { if d.HasChange("private_networking") && d.Get("private_networking").(bool) {
err := client.EnablePrivateNetworking(d.Id()) _, _, err = client.DropletActions.EnablePrivateNetworking(id)
if err != nil { if err != nil {
return fmt.Errorf( return fmt.Errorf(
@ -313,7 +350,7 @@ func resourceDigitalOceanDropletUpdate(d *schema.ResourceData, meta interface{})
// As there is no way to disable IPv6, we only check if it needs to be enabled // As there is no way to disable IPv6, we only check if it needs to be enabled
if d.HasChange("ipv6") && d.Get("ipv6").(bool) { if d.HasChange("ipv6") && d.Get("ipv6").(bool) {
err := client.EnableIPV6s(d.Id()) _, _, err = client.DropletActions.EnableIPv6(id)
if err != nil { if err != nil {
return fmt.Errorf( return fmt.Errorf(
@ -334,9 +371,14 @@ func resourceDigitalOceanDropletUpdate(d *schema.ResourceData, meta interface{})
} }
func resourceDigitalOceanDropletDelete(d *schema.ResourceData, meta interface{}) error { func resourceDigitalOceanDropletDelete(d *schema.ResourceData, meta interface{}) error {
client := meta.(*digitalocean.Client) client := meta.(*godo.Client)
_, err := WaitForDropletAttribute( id, err := strconv.Atoi(d.Id())
if err != nil {
return fmt.Errorf("invalid droplet id: %v", err)
}
_, err = WaitForDropletAttribute(
d, "false", []string{"", "true"}, "locked", meta) d, "false", []string{"", "true"}, "locked", meta)
if err != nil { if err != nil {
@ -347,7 +389,7 @@ func resourceDigitalOceanDropletDelete(d *schema.ResourceData, meta interface{})
log.Printf("[INFO] Deleting droplet: %s", d.Id()) log.Printf("[INFO] Deleting droplet: %s", d.Id())
// Destroy the droplet // Destroy the droplet
err = client.DestroyDroplet(d.Id()) _, err = client.Droplets.Delete(id)
// Handle remotely destroyed droplets // Handle remotely destroyed droplets
if err != nil && strings.Contains(err.Error(), "404 Not Found") { if err != nil && strings.Contains(err.Error(), "404 Not Found") {
@ -390,9 +432,14 @@ func WaitForDropletAttribute(
// cleaner and more efficient // cleaner and more efficient
func newDropletStateRefreshFunc( func newDropletStateRefreshFunc(
d *schema.ResourceData, attribute string, meta interface{}) resource.StateRefreshFunc { d *schema.ResourceData, attribute string, meta interface{}) resource.StateRefreshFunc {
client := meta.(*digitalocean.Client) client := meta.(*godo.Client)
return func() (interface{}, string, error) { return func() (interface{}, string, error) {
err := resourceDigitalOceanDropletRead(d, meta) id, err := strconv.Atoi(d.Id())
if err != nil {
return nil, "", err
}
err = resourceDigitalOceanDropletRead(d, meta)
if err != nil { if err != nil {
return nil, "", err return nil, "", err
} }
@ -408,7 +455,7 @@ func newDropletStateRefreshFunc(
// See if we can access our attribute // See if we can access our attribute
if attr, ok := d.GetOk(attribute); ok { if attr, ok := d.GetOk(attribute); ok {
// Retrieve the droplet properties // Retrieve the droplet properties
droplet, err := client.RetrieveDroplet(d.Id()) droplet, _, err := client.Droplets.Get(id)
if err != nil { if err != nil {
return nil, "", fmt.Errorf("Error retrieving droplet: %s", err) return nil, "", fmt.Errorf("Error retrieving droplet: %s", err)
} }
@ -422,8 +469,13 @@ func newDropletStateRefreshFunc(
// Powers on the droplet and waits for it to be active // Powers on the droplet and waits for it to be active
func powerOnAndWait(d *schema.ResourceData, meta interface{}) error { func powerOnAndWait(d *schema.ResourceData, meta interface{}) error {
client := meta.(*digitalocean.Client) id, err := strconv.Atoi(d.Id())
err := client.PowerOn(d.Id()) if err != nil {
return fmt.Errorf("invalid droplet id: %v", err)
}
client := meta.(*godo.Client)
_, _, err = client.DropletActions.PowerOn(id)
if err != nil { if err != nil {
return err return err
} }

View File

@ -2,16 +2,17 @@ package digitalocean
import ( import (
"fmt" "fmt"
"strconv"
"strings" "strings"
"testing" "testing"
"github.com/digitalocean/godo"
"github.com/hashicorp/terraform/helper/resource" "github.com/hashicorp/terraform/helper/resource"
"github.com/hashicorp/terraform/terraform" "github.com/hashicorp/terraform/terraform"
"github.com/pearkes/digitalocean"
) )
func TestAccDigitalOceanDroplet_Basic(t *testing.T) { func TestAccDigitalOceanDroplet_Basic(t *testing.T) {
var droplet digitalocean.Droplet var droplet godo.Droplet
resource.Test(t, resource.TestCase{ resource.Test(t, resource.TestCase{
PreCheck: func() { testAccPreCheck(t) }, PreCheck: func() { testAccPreCheck(t) },
@ -40,7 +41,7 @@ func TestAccDigitalOceanDroplet_Basic(t *testing.T) {
} }
func TestAccDigitalOceanDroplet_Update(t *testing.T) { func TestAccDigitalOceanDroplet_Update(t *testing.T) {
var droplet digitalocean.Droplet var droplet godo.Droplet
resource.Test(t, resource.TestCase{ resource.Test(t, resource.TestCase{
PreCheck: func() { testAccPreCheck(t) }, PreCheck: func() { testAccPreCheck(t) },
@ -71,7 +72,7 @@ func TestAccDigitalOceanDroplet_Update(t *testing.T) {
} }
func TestAccDigitalOceanDroplet_PrivateNetworkingIpv6(t *testing.T) { func TestAccDigitalOceanDroplet_PrivateNetworkingIpv6(t *testing.T) {
var droplet digitalocean.Droplet var droplet godo.Droplet
resource.Test(t, resource.TestCase{ resource.Test(t, resource.TestCase{
PreCheck: func() { testAccPreCheck(t) }, PreCheck: func() { testAccPreCheck(t) },
@ -94,15 +95,20 @@ func TestAccDigitalOceanDroplet_PrivateNetworkingIpv6(t *testing.T) {
} }
func testAccCheckDigitalOceanDropletDestroy(s *terraform.State) error { func testAccCheckDigitalOceanDropletDestroy(s *terraform.State) error {
client := testAccProvider.Meta().(*digitalocean.Client) client := testAccProvider.Meta().(*godo.Client)
for _, rs := range s.RootModule().Resources { for _, rs := range s.RootModule().Resources {
if rs.Type != "digitalocean_droplet" { if rs.Type != "digitalocean_droplet" {
continue continue
} }
id, err := strconv.Atoi(rs.Primary.ID)
if err != nil {
return err
}
// Try to find the Droplet // Try to find the Droplet
_, err := client.RetrieveDroplet(rs.Primary.ID) _, _, err = client.Droplets.Get(id)
// Wait // Wait
@ -116,19 +122,19 @@ func testAccCheckDigitalOceanDropletDestroy(s *terraform.State) error {
return nil return nil
} }
func testAccCheckDigitalOceanDropletAttributes(droplet *digitalocean.Droplet) resource.TestCheckFunc { func testAccCheckDigitalOceanDropletAttributes(droplet *godo.Droplet) resource.TestCheckFunc {
return func(s *terraform.State) error { return func(s *terraform.State) error {
if droplet.ImageSlug() != "centos-5-8-x32" { if droplet.Image.Slug != "centos-5-8-x32" {
return fmt.Errorf("Bad image_slug: %s", droplet.ImageSlug()) return fmt.Errorf("Bad image_slug: %s", droplet.Image.Slug)
} }
if droplet.SizeSlug != "512mb" { if droplet.Size.Slug != "512mb" {
return fmt.Errorf("Bad size_slug: %s", droplet.SizeSlug) return fmt.Errorf("Bad size_slug: %s", droplet.Size.Slug)
} }
if droplet.RegionSlug() != "nyc3" { if droplet.Region.Slug != "nyc3" {
return fmt.Errorf("Bad region_slug: %s", droplet.RegionSlug()) return fmt.Errorf("Bad region_slug: %s", droplet.Region.Slug)
} }
if droplet.Name != "foo" { if droplet.Name != "foo" {
@ -138,10 +144,10 @@ func testAccCheckDigitalOceanDropletAttributes(droplet *digitalocean.Droplet) re
} }
} }
func testAccCheckDigitalOceanDropletRenamedAndResized(droplet *digitalocean.Droplet) resource.TestCheckFunc { func testAccCheckDigitalOceanDropletRenamedAndResized(droplet *godo.Droplet) resource.TestCheckFunc {
return func(s *terraform.State) error { return func(s *terraform.State) error {
if droplet.SizeSlug != "1gb" { if droplet.Size.Slug != "1gb" {
return fmt.Errorf("Bad size_slug: %s", droplet.SizeSlug) return fmt.Errorf("Bad size_slug: %s", droplet.SizeSlug)
} }
@ -153,50 +159,46 @@ func testAccCheckDigitalOceanDropletRenamedAndResized(droplet *digitalocean.Drop
} }
} }
func testAccCheckDigitalOceanDropletAttributes_PrivateNetworkingIpv6(droplet *digitalocean.Droplet) resource.TestCheckFunc { func testAccCheckDigitalOceanDropletAttributes_PrivateNetworkingIpv6(droplet *godo.Droplet) resource.TestCheckFunc {
return func(s *terraform.State) error { return func(s *terraform.State) error {
if droplet.ImageSlug() != "centos-5-8-x32" { if droplet.Image.Slug != "centos-5-8-x32" {
return fmt.Errorf("Bad image_slug: %s", droplet.ImageSlug()) return fmt.Errorf("Bad image_slug: %s", droplet.Image.Slug)
} }
if droplet.SizeSlug != "1gb" { if droplet.Size.Slug != "1gb" {
return fmt.Errorf("Bad size_slug: %s", droplet.SizeSlug) return fmt.Errorf("Bad size_slug: %s", droplet.Size.Slug)
} }
if droplet.RegionSlug() != "sgp1" { if droplet.Region.Slug != "sgp1" {
return fmt.Errorf("Bad region_slug: %s", droplet.RegionSlug()) return fmt.Errorf("Bad region_slug: %s", droplet.Region.Slug)
} }
if droplet.Name != "baz" { if droplet.Name != "baz" {
return fmt.Errorf("Bad name: %s", droplet.Name) return fmt.Errorf("Bad name: %s", droplet.Name)
} }
if droplet.IPV4Address("private") == "" { if findIPv4AddrByType(droplet, "private") == "" {
return fmt.Errorf("No ipv4 private: %s", droplet.IPV4Address("private")) return fmt.Errorf("No ipv4 private: %s", findIPv4AddrByType(droplet, "private"))
} }
// if droplet.IPV6Address("private") == "" { // if droplet.IPV6Address("private") == "" {
// return fmt.Errorf("No ipv6 private: %s", droplet.IPV6Address("private")) // return fmt.Errorf("No ipv6 private: %s", droplet.IPV6Address("private"))
// } // }
if droplet.NetworkingType() != "private" { if findIPv4AddrByType(droplet, "public") == "" {
return fmt.Errorf("Bad networking type: %s", droplet.NetworkingType()) return fmt.Errorf("No ipv4 public: %s", findIPv4AddrByType(droplet, "public"))
} }
if droplet.IPV4Address("public") == "" { if findIPv6AddrByType(droplet, "public") == "" {
return fmt.Errorf("No ipv4 public: %s", droplet.IPV4Address("public")) return fmt.Errorf("No ipv6 public: %s", findIPv6AddrByType(droplet, "public"))
}
if droplet.IPV6Address("public") == "" {
return fmt.Errorf("No ipv6 public: %s", droplet.IPV6Address("public"))
} }
return nil return nil
} }
} }
func testAccCheckDigitalOceanDropletExists(n string, droplet *digitalocean.Droplet) resource.TestCheckFunc { func testAccCheckDigitalOceanDropletExists(n string, droplet *godo.Droplet) resource.TestCheckFunc {
return func(s *terraform.State) error { return func(s *terraform.State) error {
rs, ok := s.RootModule().Resources[n] rs, ok := s.RootModule().Resources[n]
if !ok { if !ok {
@ -207,19 +209,25 @@ func testAccCheckDigitalOceanDropletExists(n string, droplet *digitalocean.Dropl
return fmt.Errorf("No Droplet ID is set") return fmt.Errorf("No Droplet ID is set")
} }
client := testAccProvider.Meta().(*digitalocean.Client) client := testAccProvider.Meta().(*godo.Client)
retrieveDroplet, err := client.RetrieveDroplet(rs.Primary.ID) id, err := strconv.Atoi(rs.Primary.ID)
if err != nil {
return err
}
// Try to find the Droplet
retrieveDroplet, _, err := client.Droplets.Get(id)
if err != nil { if err != nil {
return err return err
} }
if retrieveDroplet.StringId() != rs.Primary.ID { if strconv.Itoa(retrieveDroplet.ID) != rs.Primary.ID {
return fmt.Errorf("Droplet not found") return fmt.Errorf("Droplet not found")
} }
*droplet = retrieveDroplet *droplet = *retrieveDroplet
return nil return nil
} }
@ -230,7 +238,7 @@ func testAccCheckDigitalOceanDropletExists(n string, droplet *digitalocean.Dropl
// other test already // other test already
// //
//func Test_new_droplet_state_refresh_func(t *testing.T) { //func Test_new_droplet_state_refresh_func(t *testing.T) {
// droplet := digitalocean.Droplet{ // droplet := godo.Droplet{
// Name: "foobar", // Name: "foobar",
// } // }
// resourceMap, _ := resource_digitalocean_droplet_update_state( // resourceMap, _ := resource_digitalocean_droplet_update_state(

View File

@ -3,10 +3,11 @@ package digitalocean
import ( import (
"fmt" "fmt"
"log" "log"
"strconv"
"strings" "strings"
"github.com/digitalocean/godo"
"github.com/hashicorp/terraform/helper/schema" "github.com/hashicorp/terraform/helper/schema"
"github.com/pearkes/digitalocean"
) )
func resourceDigitalOceanRecord() *schema.Resource { func resourceDigitalOceanRecord() *schema.Resource {
@ -66,34 +67,55 @@ func resourceDigitalOceanRecord() *schema.Resource {
} }
func resourceDigitalOceanRecordCreate(d *schema.ResourceData, meta interface{}) error { func resourceDigitalOceanRecordCreate(d *schema.ResourceData, meta interface{}) error {
client := meta.(*digitalocean.Client) client := meta.(*godo.Client)
newRecord := digitalocean.CreateRecord{ newRecord := godo.DomainRecordEditRequest{
Type: d.Get("type").(string), Type: d.Get("type").(string),
Name: d.Get("name").(string), Name: d.Get("name").(string),
Data: d.Get("value").(string), Data: d.Get("value").(string),
Priority: d.Get("priority").(string), }
Port: d.Get("port").(string),
Weight: d.Get("weight").(string), var err error
if priority := d.Get("priority").(string); priority != "" {
newRecord.Priority, err = strconv.Atoi(priority)
if err != nil {
return fmt.Errorf("Failed to parse priority as an integer: %v", err)
}
}
if port := d.Get("port").(string); port != "" {
newRecord.Port, err = strconv.Atoi(port)
if err != nil {
return fmt.Errorf("Failed to parse port as an integer: %v", err)
}
}
if weight := d.Get("weight").(string); weight != "" {
newRecord.Weight, err = strconv.Atoi(weight)
if err != nil {
return fmt.Errorf("Failed to parse weight as an integer: %v", err)
}
} }
log.Printf("[DEBUG] record create configuration: %#v", newRecord) log.Printf("[DEBUG] record create configuration: %#v", newRecord)
recId, err := client.CreateRecord(d.Get("domain").(string), &newRecord) rec, _, err := client.Domains.CreateRecord(d.Get("domain").(string), &newRecord)
if err != nil { if err != nil {
return fmt.Errorf("Failed to create record: %s", err) return fmt.Errorf("Failed to create record: %s", err)
} }
d.SetId(recId) d.SetId(strconv.Itoa(rec.ID))
log.Printf("[INFO] Record ID: %s", d.Id()) log.Printf("[INFO] Record ID: %s", d.Id())
return resourceDigitalOceanRecordRead(d, meta) return resourceDigitalOceanRecordRead(d, meta)
} }
func resourceDigitalOceanRecordRead(d *schema.ResourceData, meta interface{}) error { func resourceDigitalOceanRecordRead(d *schema.ResourceData, meta interface{}) error {
client := meta.(*digitalocean.Client) client := meta.(*godo.Client)
domain := d.Get("domain").(string) domain := d.Get("domain").(string)
id, err := strconv.Atoi(d.Id())
if err != nil {
return fmt.Errorf("invalid record ID: %v", err)
}
rec, err := client.RetrieveRecord(domain, d.Id()) rec, _, err := client.Domains.Record(domain, id)
if err != nil { if err != nil {
// If the record is somehow already destroyed, mark as // If the record is somehow already destroyed, mark as
// successfully gone // successfully gone
@ -120,23 +142,29 @@ func resourceDigitalOceanRecordRead(d *schema.ResourceData, meta interface{}) er
d.Set("name", rec.Name) d.Set("name", rec.Name)
d.Set("type", rec.Type) d.Set("type", rec.Type)
d.Set("value", rec.Data) d.Set("value", rec.Data)
d.Set("weight", rec.StringWeight()) d.Set("weight", strconv.Itoa(rec.Weight))
d.Set("priority", rec.StringPriority()) d.Set("priority", strconv.Itoa(rec.Priority))
d.Set("port", rec.StringPort()) d.Set("port", strconv.Itoa(rec.Port))
return nil return nil
} }
func resourceDigitalOceanRecordUpdate(d *schema.ResourceData, meta interface{}) error { func resourceDigitalOceanRecordUpdate(d *schema.ResourceData, meta interface{}) error {
client := meta.(*digitalocean.Client) client := meta.(*godo.Client)
var updateRecord digitalocean.UpdateRecord domain := d.Get("domain").(string)
if v, ok := d.GetOk("name"); ok { id, err := strconv.Atoi(d.Id())
updateRecord.Name = v.(string) if err != nil {
return fmt.Errorf("invalid record ID: %v", err)
} }
log.Printf("[DEBUG] record update configuration: %#v", updateRecord) var editRecord godo.DomainRecordEditRequest
err := client.UpdateRecord(d.Get("domain").(string), d.Id(), &updateRecord) if v, ok := d.GetOk("name"); ok {
editRecord.Name = v.(string)
}
log.Printf("[DEBUG] record update configuration: %#v", editRecord)
_, _, err = client.Domains.EditRecord(domain, id, &editRecord)
if err != nil { if err != nil {
return fmt.Errorf("Failed to update record: %s", err) return fmt.Errorf("Failed to update record: %s", err)
} }
@ -145,11 +173,17 @@ func resourceDigitalOceanRecordUpdate(d *schema.ResourceData, meta interface{})
} }
func resourceDigitalOceanRecordDelete(d *schema.ResourceData, meta interface{}) error { func resourceDigitalOceanRecordDelete(d *schema.ResourceData, meta interface{}) error {
client := meta.(*digitalocean.Client) client := meta.(*godo.Client)
log.Printf( domain := d.Get("domain").(string)
"[INFO] Deleting record: %s, %s", d.Get("domain").(string), d.Id()) id, err := strconv.Atoi(d.Id())
err := client.DestroyRecord(d.Get("domain").(string), d.Id()) if err != nil {
return fmt.Errorf("invalid record ID: %v", err)
}
log.Printf("[INFO] Deleting record: %s, %d", domain, id)
_, err = client.Domains.DeleteRecord(domain, id)
if err != nil { if err != nil {
// If the record is somehow already destroyed, mark as // If the record is somehow already destroyed, mark as
// successfully gone // successfully gone

View File

@ -2,15 +2,16 @@ package digitalocean
import ( import (
"fmt" "fmt"
"strconv"
"testing" "testing"
"github.com/digitalocean/godo"
"github.com/hashicorp/terraform/helper/resource" "github.com/hashicorp/terraform/helper/resource"
"github.com/hashicorp/terraform/terraform" "github.com/hashicorp/terraform/terraform"
"github.com/pearkes/digitalocean"
) )
func TestAccDigitalOceanRecord_Basic(t *testing.T) { func TestAccDigitalOceanRecord_Basic(t *testing.T) {
var record digitalocean.Record var record godo.DomainRecord
resource.Test(t, resource.TestCase{ resource.Test(t, resource.TestCase{
PreCheck: func() { testAccPreCheck(t) }, PreCheck: func() { testAccPreCheck(t) },
@ -35,7 +36,7 @@ func TestAccDigitalOceanRecord_Basic(t *testing.T) {
} }
func TestAccDigitalOceanRecord_Updated(t *testing.T) { func TestAccDigitalOceanRecord_Updated(t *testing.T) {
var record digitalocean.Record var record godo.DomainRecord
resource.Test(t, resource.TestCase{ resource.Test(t, resource.TestCase{
PreCheck: func() { testAccPreCheck(t) }, PreCheck: func() { testAccPreCheck(t) },
@ -77,7 +78,7 @@ func TestAccDigitalOceanRecord_Updated(t *testing.T) {
} }
func TestAccDigitalOceanRecord_HostnameValue(t *testing.T) { func TestAccDigitalOceanRecord_HostnameValue(t *testing.T) {
var record digitalocean.Record var record godo.DomainRecord
resource.Test(t, resource.TestCase{ resource.Test(t, resource.TestCase{
PreCheck: func() { testAccPreCheck(t) }, PreCheck: func() { testAccPreCheck(t) },
@ -104,7 +105,7 @@ func TestAccDigitalOceanRecord_HostnameValue(t *testing.T) {
} }
func TestAccDigitalOceanRecord_RelativeHostnameValue(t *testing.T) { func TestAccDigitalOceanRecord_RelativeHostnameValue(t *testing.T) {
var record digitalocean.Record var record godo.DomainRecord
resource.Test(t, resource.TestCase{ resource.Test(t, resource.TestCase{
PreCheck: func() { testAccPreCheck(t) }, PreCheck: func() { testAccPreCheck(t) },
@ -131,7 +132,7 @@ func TestAccDigitalOceanRecord_RelativeHostnameValue(t *testing.T) {
} }
func TestAccDigitalOceanRecord_ExternalHostnameValue(t *testing.T) { func TestAccDigitalOceanRecord_ExternalHostnameValue(t *testing.T) {
var record digitalocean.Record var record godo.DomainRecord
resource.Test(t, resource.TestCase{ resource.Test(t, resource.TestCase{
PreCheck: func() { testAccPreCheck(t) }, PreCheck: func() { testAccPreCheck(t) },
@ -158,14 +159,19 @@ func TestAccDigitalOceanRecord_ExternalHostnameValue(t *testing.T) {
} }
func testAccCheckDigitalOceanRecordDestroy(s *terraform.State) error { func testAccCheckDigitalOceanRecordDestroy(s *terraform.State) error {
client := testAccProvider.Meta().(*digitalocean.Client) client := testAccProvider.Meta().(*godo.Client)
for _, rs := range s.RootModule().Resources { for _, rs := range s.RootModule().Resources {
if rs.Type != "digitalocean_record" { if rs.Type != "digitalocean_record" {
continue continue
} }
domain := rs.Primary.Attributes["domain"]
id, err := strconv.Atoi(rs.Primary.ID)
if err != nil {
return err
}
_, err := client.RetrieveRecord(rs.Primary.Attributes["domain"], rs.Primary.ID) _, _, err = client.Domains.Record(domain, id)
if err == nil { if err == nil {
return fmt.Errorf("Record still exists") return fmt.Errorf("Record still exists")
@ -175,7 +181,7 @@ func testAccCheckDigitalOceanRecordDestroy(s *terraform.State) error {
return nil return nil
} }
func testAccCheckDigitalOceanRecordAttributes(record *digitalocean.Record) resource.TestCheckFunc { func testAccCheckDigitalOceanRecordAttributes(record *godo.DomainRecord) resource.TestCheckFunc {
return func(s *terraform.State) error { return func(s *terraform.State) error {
if record.Data != "192.168.0.10" { if record.Data != "192.168.0.10" {
@ -186,7 +192,7 @@ func testAccCheckDigitalOceanRecordAttributes(record *digitalocean.Record) resou
} }
} }
func testAccCheckDigitalOceanRecordAttributesUpdated(record *digitalocean.Record) resource.TestCheckFunc { func testAccCheckDigitalOceanRecordAttributesUpdated(record *godo.DomainRecord) resource.TestCheckFunc {
return func(s *terraform.State) error { return func(s *terraform.State) error {
if record.Data != "192.168.0.11" { if record.Data != "192.168.0.11" {
@ -197,7 +203,7 @@ func testAccCheckDigitalOceanRecordAttributesUpdated(record *digitalocean.Record
} }
} }
func testAccCheckDigitalOceanRecordExists(n string, record *digitalocean.Record) resource.TestCheckFunc { func testAccCheckDigitalOceanRecordExists(n string, record *godo.DomainRecord) resource.TestCheckFunc {
return func(s *terraform.State) error { return func(s *terraform.State) error {
rs, ok := s.RootModule().Resources[n] rs, ok := s.RootModule().Resources[n]
@ -209,25 +215,31 @@ func testAccCheckDigitalOceanRecordExists(n string, record *digitalocean.Record)
return fmt.Errorf("No Record ID is set") return fmt.Errorf("No Record ID is set")
} }
client := testAccProvider.Meta().(*digitalocean.Client) client := testAccProvider.Meta().(*godo.Client)
foundRecord, err := client.RetrieveRecord(rs.Primary.Attributes["domain"], rs.Primary.ID) domain := rs.Primary.Attributes["domain"]
id, err := strconv.Atoi(rs.Primary.ID)
if err != nil {
return err
}
foundRecord, _, err := client.Domains.Record(domain, id)
if err != nil { if err != nil {
return err return err
} }
if foundRecord.StringId() != rs.Primary.ID { if strconv.Itoa(foundRecord.ID) != rs.Primary.ID {
return fmt.Errorf("Record not found") return fmt.Errorf("Record not found")
} }
*record = foundRecord *record = *foundRecord
return nil return nil
} }
} }
func testAccCheckDigitalOceanRecordAttributesHostname(data string, record *digitalocean.Record) resource.TestCheckFunc { func testAccCheckDigitalOceanRecordAttributesHostname(data string, record *godo.DomainRecord) resource.TestCheckFunc {
return func(s *terraform.State) error { return func(s *terraform.State) error {
if record.Data != data { if record.Data != data {

View File

@ -3,10 +3,11 @@ package digitalocean
import ( import (
"fmt" "fmt"
"log" "log"
"strconv"
"strings" "strings"
"github.com/digitalocean/godo"
"github.com/hashicorp/terraform/helper/schema" "github.com/hashicorp/terraform/helper/schema"
"github.com/pearkes/digitalocean"
) )
func resourceDigitalOceanSSHKey() *schema.Resource { func resourceDigitalOceanSSHKey() *schema.Resource {
@ -42,30 +43,35 @@ func resourceDigitalOceanSSHKey() *schema.Resource {
} }
func resourceDigitalOceanSSHKeyCreate(d *schema.ResourceData, meta interface{}) error { func resourceDigitalOceanSSHKeyCreate(d *schema.ResourceData, meta interface{}) error {
client := meta.(*digitalocean.Client) client := meta.(*godo.Client)
// Build up our creation options // Build up our creation options
opts := &digitalocean.CreateSSHKey{ opts := &godo.KeyCreateRequest{
Name: d.Get("name").(string), Name: d.Get("name").(string),
PublicKey: d.Get("public_key").(string), PublicKey: d.Get("public_key").(string),
} }
log.Printf("[DEBUG] SSH Key create configuration: %#v", opts) log.Printf("[DEBUG] SSH Key create configuration: %#v", opts)
id, err := client.CreateSSHKey(opts) key, _, err := client.Keys.Create(opts)
if err != nil { if err != nil {
return fmt.Errorf("Error creating SSH Key: %s", err) return fmt.Errorf("Error creating SSH Key: %s", err)
} }
d.SetId(id) d.SetId(strconv.Itoa(key.ID))
log.Printf("[INFO] SSH Key: %s", id) log.Printf("[INFO] SSH Key: %d", key.ID)
return resourceDigitalOceanSSHKeyRead(d, meta) return resourceDigitalOceanSSHKeyRead(d, meta)
} }
func resourceDigitalOceanSSHKeyRead(d *schema.ResourceData, meta interface{}) error { func resourceDigitalOceanSSHKeyRead(d *schema.ResourceData, meta interface{}) error {
client := meta.(*digitalocean.Client) client := meta.(*godo.Client)
key, err := client.RetrieveSSHKey(d.Id()) id, err := strconv.Atoi(d.Id())
if err != nil {
return fmt.Errorf("invalid SSH key id: %v", err)
}
key, _, err := client.Keys.GetByID(id)
if err != nil { if err != nil {
// If the key is somehow already destroyed, mark as // If the key is somehow already destroyed, mark as
// successfully gone // successfully gone
@ -84,7 +90,12 @@ func resourceDigitalOceanSSHKeyRead(d *schema.ResourceData, meta interface{}) er
} }
func resourceDigitalOceanSSHKeyUpdate(d *schema.ResourceData, meta interface{}) error { func resourceDigitalOceanSSHKeyUpdate(d *schema.ResourceData, meta interface{}) error {
client := meta.(*digitalocean.Client) client := meta.(*godo.Client)
id, err := strconv.Atoi(d.Id())
if err != nil {
return fmt.Errorf("invalid SSH key id: %v", err)
}
var newName string var newName string
if v, ok := d.GetOk("name"); ok { if v, ok := d.GetOk("name"); ok {
@ -92,7 +103,10 @@ func resourceDigitalOceanSSHKeyUpdate(d *schema.ResourceData, meta interface{})
} }
log.Printf("[DEBUG] SSH key update name: %#v", newName) log.Printf("[DEBUG] SSH key update name: %#v", newName)
err := client.RenameSSHKey(d.Id(), newName) opts := &godo.KeyUpdateRequest{
Name: newName,
}
_, _, err = client.Keys.UpdateByID(id, opts)
if err != nil { if err != nil {
return fmt.Errorf("Failed to update SSH key: %s", err) return fmt.Errorf("Failed to update SSH key: %s", err)
} }
@ -101,10 +115,15 @@ func resourceDigitalOceanSSHKeyUpdate(d *schema.ResourceData, meta interface{})
} }
func resourceDigitalOceanSSHKeyDelete(d *schema.ResourceData, meta interface{}) error { func resourceDigitalOceanSSHKeyDelete(d *schema.ResourceData, meta interface{}) error {
client := meta.(*digitalocean.Client) client := meta.(*godo.Client)
log.Printf("[INFO] Deleting SSH key: %s", d.Id()) id, err := strconv.Atoi(d.Id())
err := client.DestroySSHKey(d.Id()) if err != nil {
return fmt.Errorf("invalid SSH key id: %v", err)
}
log.Printf("[INFO] Deleting SSH key: %d", id)
_, err = client.Keys.DeleteByID(id)
if err != nil { if err != nil {
return fmt.Errorf("Error deleting SSH key: %s", err) return fmt.Errorf("Error deleting SSH key: %s", err)
} }

View File

@ -6,13 +6,13 @@ import (
"strings" "strings"
"testing" "testing"
"github.com/digitalocean/godo"
"github.com/hashicorp/terraform/helper/resource" "github.com/hashicorp/terraform/helper/resource"
"github.com/hashicorp/terraform/terraform" "github.com/hashicorp/terraform/terraform"
"github.com/pearkes/digitalocean"
) )
func TestAccDigitalOceanSSHKey_Basic(t *testing.T) { func TestAccDigitalOceanSSHKey_Basic(t *testing.T) {
var key digitalocean.SSHKey var key godo.Key
resource.Test(t, resource.TestCase{ resource.Test(t, resource.TestCase{
PreCheck: func() { testAccPreCheck(t) }, PreCheck: func() { testAccPreCheck(t) },
@ -35,15 +35,20 @@ func TestAccDigitalOceanSSHKey_Basic(t *testing.T) {
} }
func testAccCheckDigitalOceanSSHKeyDestroy(s *terraform.State) error { func testAccCheckDigitalOceanSSHKeyDestroy(s *terraform.State) error {
client := testAccProvider.Meta().(*digitalocean.Client) client := testAccProvider.Meta().(*godo.Client)
for _, rs := range s.RootModule().Resources { for _, rs := range s.RootModule().Resources {
if rs.Type != "digitalocean_ssh_key" { if rs.Type != "digitalocean_ssh_key" {
continue continue
} }
id, err := strconv.Atoi(rs.Primary.ID)
if err != nil {
return err
}
// Try to find the key // Try to find the key
_, err := client.RetrieveSSHKey(rs.Primary.ID) _, _, err = client.Keys.GetByID(id)
if err == nil { if err == nil {
fmt.Errorf("SSH key still exists") fmt.Errorf("SSH key still exists")
@ -53,7 +58,7 @@ func testAccCheckDigitalOceanSSHKeyDestroy(s *terraform.State) error {
return nil return nil
} }
func testAccCheckDigitalOceanSSHKeyAttributes(key *digitalocean.SSHKey) resource.TestCheckFunc { func testAccCheckDigitalOceanSSHKeyAttributes(key *godo.Key) resource.TestCheckFunc {
return func(s *terraform.State) error { return func(s *terraform.State) error {
if key.Name != "foobar" { if key.Name != "foobar" {
@ -64,7 +69,7 @@ func testAccCheckDigitalOceanSSHKeyAttributes(key *digitalocean.SSHKey) resource
} }
} }
func testAccCheckDigitalOceanSSHKeyExists(n string, key *digitalocean.SSHKey) resource.TestCheckFunc { func testAccCheckDigitalOceanSSHKeyExists(n string, key *godo.Key) resource.TestCheckFunc {
return func(s *terraform.State) error { return func(s *terraform.State) error {
rs, ok := s.RootModule().Resources[n] rs, ok := s.RootModule().Resources[n]
@ -76,19 +81,25 @@ func testAccCheckDigitalOceanSSHKeyExists(n string, key *digitalocean.SSHKey) re
return fmt.Errorf("No Record ID is set") return fmt.Errorf("No Record ID is set")
} }
client := testAccProvider.Meta().(*digitalocean.Client) client := testAccProvider.Meta().(*godo.Client)
foundKey, err := client.RetrieveSSHKey(rs.Primary.ID) id, err := strconv.Atoi(rs.Primary.ID)
if err != nil {
return err
}
// Try to find the key
foundKey, _, err := client.Keys.GetByID(id)
if err != nil { if err != nil {
return err return err
} }
if strconv.Itoa(int(foundKey.Id)) != rs.Primary.ID { if strconv.Itoa(foundKey.ID) != rs.Primary.ID {
return fmt.Errorf("Record not found") return fmt.Errorf("Record not found")
} }
*key = foundKey *key = *foundKey
return nil return nil
} }

View File

@ -2,8 +2,10 @@ package dme
import ( import (
"fmt" "fmt"
"github.com/soniah/dnsmadeeasy"
"log" "log"
"github.com/hashicorp/go-cleanhttp"
"github.com/soniah/dnsmadeeasy"
) )
// Config contains DNSMadeEasy provider settings // Config contains DNSMadeEasy provider settings
@ -20,6 +22,8 @@ func (c *Config) Client() (*dnsmadeeasy.Client, error) {
return nil, fmt.Errorf("Error setting up client: %s", err) return nil, fmt.Errorf("Error setting up client: %s", err)
} }
client.HTTP = cleanhttp.DefaultClient()
if c.UseSandbox { if c.UseSandbox {
client.URL = dnsmadeeasy.SandboxURL client.URL = dnsmadeeasy.SandboxURL
} }

View File

@ -10,8 +10,7 @@ import (
"runtime" "runtime"
"strings" "strings"
// TODO(dcunnin): Use version code from version.go "github.com/hashicorp/terraform/terraform"
// "github.com/hashicorp/terraform"
"golang.org/x/oauth2" "golang.org/x/oauth2"
"golang.org/x/oauth2/google" "golang.org/x/oauth2/google"
"golang.org/x/oauth2/jwt" "golang.org/x/oauth2/jwt"
@ -36,6 +35,13 @@ type Config struct {
func (c *Config) loadAndValidate() error { func (c *Config) loadAndValidate() error {
var account accountFile var account accountFile
clientScopes := []string{
"https://www.googleapis.com/auth/compute",
"https://www.googleapis.com/auth/cloud-platform",
"https://www.googleapis.com/auth/ndev.clouddns.readwrite",
"https://www.googleapis.com/auth/devstorage.full_control",
}
if c.AccountFile == "" { if c.AccountFile == "" {
c.AccountFile = os.Getenv("GOOGLE_ACCOUNT_FILE") c.AccountFile = os.Getenv("GOOGLE_ACCOUNT_FILE")
@ -79,13 +85,6 @@ func (c *Config) loadAndValidate() error {
} }
} }
clientScopes := []string{
"https://www.googleapis.com/auth/compute",
"https://www.googleapis.com/auth/cloud-platform",
"https://www.googleapis.com/auth/ndev.clouddns.readwrite",
"https://www.googleapis.com/auth/devstorage.full_control",
}
// Get the token for use in our requests // Get the token for use in our requests
log.Printf("[INFO] Requesting Google token...") log.Printf("[INFO] Requesting Google token...")
log.Printf("[INFO] -- Email: %s", account.ClientEmail) log.Printf("[INFO] -- Email: %s", account.ClientEmail)
@ -105,25 +104,19 @@ func (c *Config) loadAndValidate() error {
client = conf.Client(oauth2.NoContext) client = conf.Client(oauth2.NoContext)
} else { } else {
log.Printf("[INFO] Requesting Google token via GCE Service Role...") log.Printf("[INFO] Authenticating using DefaultClient");
client = &http.Client{ err := error(nil)
Transport: &oauth2.Transport{ client, err = google.DefaultClient(oauth2.NoContext, clientScopes...)
// Fetch from Google Compute Engine's metadata server to retrieve if err != nil {
// an access token for the provided account. return err
// If no account is specified, "default" is used. }
Source: google.ComputeTokenSource(""),
},
} }
versionString := terraform.Version
prerelease := terraform.VersionPrerelease
if len(prerelease) > 0 {
versionString = fmt.Sprintf("%s-%s", versionString, prerelease)
} }
// Build UserAgent
versionString := "0.0.0"
// TODO(dcunnin): Use Terraform's version code from version.go
// versionString := main.Version
// if main.VersionPrerelease != "" {
// versionString = fmt.Sprintf("%s-%s", versionString, main.VersionPrerelease)
// }
userAgent := fmt.Sprintf( userAgent := fmt.Sprintf(
"(%s %s) Terraform/%s", runtime.GOOS, runtime.GOARCH, versionString) "(%s %s) Terraform/%s", runtime.GOOS, runtime.GOARCH, versionString)

View File

@ -15,7 +15,7 @@ func Provider() terraform.ResourceProvider {
Schema: map[string]*schema.Schema{ Schema: map[string]*schema.Schema{
"account_file": &schema.Schema{ "account_file": &schema.Schema{
Type: schema.TypeString, Type: schema.TypeString,
Required: true, Optional: true,
DefaultFunc: schema.EnvDefaultFunc("GOOGLE_ACCOUNT_FILE", nil), DefaultFunc: schema.EnvDefaultFunc("GOOGLE_ACCOUNT_FILE", nil),
ValidateFunc: validateAccountFile, ValidateFunc: validateAccountFile,
}, },
@ -78,6 +78,10 @@ func providerConfigure(d *schema.ResourceData) (interface{}, error) {
} }
func validateAccountFile(v interface{}, k string) (warnings []string, errors []error) { func validateAccountFile(v interface{}, k string) (warnings []string, errors []error) {
if v == nil {
return
}
value := v.(string) value := v.(string)
if value == "" { if value == "" {

View File

@ -231,6 +231,29 @@ func resourceComputeInstance() *schema.Resource {
}, },
}, },
"scheduling": &schema.Schema{
Type: schema.TypeList,
Optional: true,
Elem: &schema.Resource{
Schema: map[string]*schema.Schema{
"on_host_maintenance": &schema.Schema{
Type: schema.TypeString,
Optional: true,
},
"automatic_restart": &schema.Schema{
Type: schema.TypeBool,
Optional: true,
},
"preemptible": &schema.Schema{
Type: schema.TypeBool,
Optional: true,
},
},
},
},
"tags": &schema.Schema{ "tags": &schema.Schema{
Type: schema.TypeSet, Type: schema.TypeSet,
Optional: true, Optional: true,
@ -466,6 +489,21 @@ func resourceComputeInstanceCreate(d *schema.ResourceData, meta interface{}) err
serviceAccounts = append(serviceAccounts, serviceAccount) serviceAccounts = append(serviceAccounts, serviceAccount)
} }
prefix := "scheduling.0"
scheduling := &compute.Scheduling{}
if val, ok := d.GetOk(prefix + ".automatic_restart"); ok {
scheduling.AutomaticRestart = val.(bool)
}
if val, ok := d.GetOk(prefix + ".preemptible"); ok {
scheduling.Preemptible = val.(bool)
}
if val, ok := d.GetOk(prefix + ".on_host_maintenance"); ok {
scheduling.OnHostMaintenance = val.(string)
}
metadata, err := resourceInstanceMetadata(d) metadata, err := resourceInstanceMetadata(d)
if err != nil { if err != nil {
return fmt.Errorf("Error creating metadata: %s", err) return fmt.Errorf("Error creating metadata: %s", err)
@ -482,6 +520,7 @@ func resourceComputeInstanceCreate(d *schema.ResourceData, meta interface{}) err
NetworkInterfaces: networkInterfaces, NetworkInterfaces: networkInterfaces,
Tags: resourceInstanceTags(d), Tags: resourceInstanceTags(d),
ServiceAccounts: serviceAccounts, ServiceAccounts: serviceAccounts,
Scheduling: scheduling,
} }
log.Printf("[INFO] Requesting instance creation") log.Printf("[INFO] Requesting instance creation")
@ -720,6 +759,38 @@ func resourceComputeInstanceUpdate(d *schema.ResourceData, meta interface{}) err
d.SetPartial("tags") d.SetPartial("tags")
} }
if d.HasChange("scheduling") {
prefix := "scheduling.0"
scheduling := &compute.Scheduling{}
if val, ok := d.GetOk(prefix + ".automatic_restart"); ok {
scheduling.AutomaticRestart = val.(bool)
}
if val, ok := d.GetOk(prefix + ".preemptible"); ok {
scheduling.Preemptible = val.(bool)
}
if val, ok := d.GetOk(prefix + ".on_host_maintenance"); ok {
scheduling.OnHostMaintenance = val.(string)
}
op, err := config.clientCompute.Instances.SetScheduling(config.Project,
zone, d.Id(), scheduling).Do()
if err != nil {
return fmt.Errorf("Error updating scheduling policy: %s", err)
}
opErr := computeOperationWaitZone(config, op, zone,
"scheduling policy update")
if opErr != nil {
return opErr
}
d.SetPartial("scheduling");
}
networkInterfacesCount := d.Get("network_interface.#").(int) networkInterfacesCount := d.Get("network_interface.#").(int)
if networkInterfacesCount > 0 { if networkInterfacesCount > 0 {
// Sanity check // Sanity check

View File

@ -3,6 +3,7 @@ package google
import ( import (
"fmt" "fmt"
"log" "log"
"strings"
"google.golang.org/api/compute/v1" "google.golang.org/api/compute/v1"
"google.golang.org/api/googleapi" "google.golang.org/api/googleapi"
@ -247,10 +248,32 @@ func resourceComputeInstanceGroupManagerDelete(d *schema.ResourceData, meta inte
return fmt.Errorf("Error deleting instance group manager: %s", err) return fmt.Errorf("Error deleting instance group manager: %s", err)
} }
currentSize := int64(d.Get("target_size").(int))
// Wait for the operation to complete // Wait for the operation to complete
err = computeOperationWaitZone(config, op, d.Get("zone").(string), "Deleting InstanceGroupManager") err = computeOperationWaitZone(config, op, d.Get("zone").(string), "Deleting InstanceGroupManager")
for err != nil && currentSize > 0 {
if !strings.Contains(err.Error(), "timeout") {
return err;
}
instanceGroup, err := config.clientCompute.InstanceGroups.Get(
config.Project, d.Get("zone").(string), d.Id()).Do()
if err != nil { if err != nil {
return err return fmt.Errorf("Error getting instance group size: %s", err);
}
if instanceGroup.Size >= currentSize {
return fmt.Errorf("Error, instance group isn't shrinking during delete")
}
log.Printf("[INFO] timeout occured, but instance group is shrinking (%d < %d)", instanceGroup.Size, currentSize)
currentSize = instanceGroup.Size
err = computeOperationWaitZone(config, op, d.Get("zone").(string), "Deleting InstanceGroupManager")
} }
d.SetId("") d.SetId("")

View File

@ -272,6 +272,25 @@ func TestAccComputeInstance_service_account(t *testing.T) {
}) })
} }
func TestAccComputeInstance_scheduling(t *testing.T) {
var instance compute.Instance
resource.Test(t, resource.TestCase{
PreCheck: func() { testAccPreCheck(t) },
Providers: testAccProviders,
CheckDestroy: testAccCheckComputeInstanceDestroy,
Steps: []resource.TestStep{
resource.TestStep{
Config: testAccComputeInstance_scheduling,
Check: resource.ComposeTestCheckFunc(
testAccCheckComputeInstanceExists(
"google_compute_instance.foobar", &instance),
),
},
},
})
}
func testAccCheckComputeInstanceDestroy(s *terraform.State) error { func testAccCheckComputeInstanceDestroy(s *terraform.State) error {
config := testAccProvider.Meta().(*Config) config := testAccProvider.Meta().(*Config)
@ -672,3 +691,21 @@ resource "google_compute_instance" "foobar" {
] ]
} }
}` }`
const testAccComputeInstance_scheduling = `
resource "google_compute_instance" "foobar" {
name = "terraform-test"
machine_type = "n1-standard-1"
zone = "us-central1-a"
disk {
image = "debian-7-wheezy-v20140814"
}
network_interface {
network = "default"
}
scheduling {
}
}`

View File

@ -113,7 +113,7 @@ resource "google_container_cluster" "with_node_config" {
} }
node_config { node_config {
machine_type = "f1-micro" machine_type = "g1-small"
disk_size_gb = 15 disk_size_gb = 15
oauth_scopes = [ oauth_scopes = [
"https://www.googleapis.com/auth/compute", "https://www.googleapis.com/auth/compute",

View File

@ -27,8 +27,6 @@ func TestAccStorage_basic(t *testing.T) {
Check: resource.ComposeTestCheckFunc( Check: resource.ComposeTestCheckFunc(
testAccCheckCloudStorageBucketExists( testAccCheckCloudStorageBucketExists(
"google_storage_bucket.bucket", &bucketName), "google_storage_bucket.bucket", &bucketName),
resource.TestCheckResourceAttr(
"google_storage_bucket.bucket", "predefined_acl", "projectPrivate"),
resource.TestCheckResourceAttr( resource.TestCheckResourceAttr(
"google_storage_bucket.bucket", "location", "US"), "google_storage_bucket.bucket", "location", "US"),
resource.TestCheckResourceAttr( resource.TestCheckResourceAttr(
@ -52,8 +50,6 @@ func TestAccStorageCustomAttributes(t *testing.T) {
Check: resource.ComposeTestCheckFunc( Check: resource.ComposeTestCheckFunc(
testAccCheckCloudStorageBucketExists( testAccCheckCloudStorageBucketExists(
"google_storage_bucket.bucket", &bucketName), "google_storage_bucket.bucket", &bucketName),
resource.TestCheckResourceAttr(
"google_storage_bucket.bucket", "predefined_acl", "publicReadWrite"),
resource.TestCheckResourceAttr( resource.TestCheckResourceAttr(
"google_storage_bucket.bucket", "location", "EU"), "google_storage_bucket.bucket", "location", "EU"),
resource.TestCheckResourceAttr( resource.TestCheckResourceAttr(
@ -77,8 +73,6 @@ func TestAccStorageBucketUpdate(t *testing.T) {
Check: resource.ComposeTestCheckFunc( Check: resource.ComposeTestCheckFunc(
testAccCheckCloudStorageBucketExists( testAccCheckCloudStorageBucketExists(
"google_storage_bucket.bucket", &bucketName), "google_storage_bucket.bucket", &bucketName),
resource.TestCheckResourceAttr(
"google_storage_bucket.bucket", "predefined_acl", "projectPrivate"),
resource.TestCheckResourceAttr( resource.TestCheckResourceAttr(
"google_storage_bucket.bucket", "location", "US"), "google_storage_bucket.bucket", "location", "US"),
resource.TestCheckResourceAttr( resource.TestCheckResourceAttr(

View File

@ -5,16 +5,20 @@ func canonicalizeServiceScope(scope string) string {
// to the GCE auth endpoints they alias to. // to the GCE auth endpoints they alias to.
scopeMap := map[string]string{ scopeMap := map[string]string{
"bigquery": "https://www.googleapis.com/auth/bigquery", "bigquery": "https://www.googleapis.com/auth/bigquery",
"cloud-platform": "https://www.googleapis.com/auth/cloud-platform",
"compute-ro": "https://www.googleapis.com/auth/compute.readonly", "compute-ro": "https://www.googleapis.com/auth/compute.readonly",
"compute-rw": "https://www.googleapis.com/auth/compute", "compute-rw": "https://www.googleapis.com/auth/compute",
"datastore": "https://www.googleapis.com/auth/datastore", "datastore": "https://www.googleapis.com/auth/datastore",
"logging-write": "https://www.googleapis.com/auth/logging.write", "logging-write": "https://www.googleapis.com/auth/logging.write",
"monitoring": "https://www.googleapis.com/auth/monitoring",
"sql": "https://www.googleapis.com/auth/sqlservice", "sql": "https://www.googleapis.com/auth/sqlservice",
"sql-admin": "https://www.googleapis.com/auth/sqlservice.admin", "sql-admin": "https://www.googleapis.com/auth/sqlservice.admin",
"storage-full": "https://www.googleapis.com/auth/devstorage.full_control", "storage-full": "https://www.googleapis.com/auth/devstorage.full_control",
"storage-ro": "https://www.googleapis.com/auth/devstorage.read_only", "storage-ro": "https://www.googleapis.com/auth/devstorage.read_only",
"storage-rw": "https://www.googleapis.com/auth/devstorage.read_write", "storage-rw": "https://www.googleapis.com/auth/devstorage.read_write",
"taskqueue": "https://www.googleapis.com/auth/taskqueue", "taskqueue": "https://www.googleapis.com/auth/taskqueue",
"useraccounts-ro": "https://www.googleapis.com/auth/cloud.useraccounts.readonly",
"useraccounts-rw": "https://www.googleapis.com/auth/cloud.useraccounts",
"userinfo-email": "https://www.googleapis.com/auth/userinfo.email", "userinfo-email": "https://www.googleapis.com/auth/userinfo.email",
} }

View File

@ -18,7 +18,13 @@ func resource() *schema.Resource {
Read: resourceRead, Read: resourceRead,
Delete: resourceDelete, Delete: resourceDelete,
Schema: map[string]*schema.Schema{}, Schema: map[string]*schema.Schema{
"triggers": &schema.Schema{
Type: schema.TypeMap,
Optional: true,
ForceNew: true,
},
},
} }
} }

View File

@ -1,6 +1,7 @@
package packet package packet
import ( import (
"github.com/hashicorp/go-cleanhttp"
"github.com/packethost/packngo" "github.com/packethost/packngo"
) )
@ -14,5 +15,5 @@ type Config struct {
// Client() returns a new client for accessing packet. // Client() returns a new client for accessing packet.
func (c *Config) Client() *packngo.Client { func (c *Config) Client() *packngo.Client {
return packngo.NewClient(consumerToken, c.AuthToken) return packngo.NewClient(consumerToken, c.AuthToken, cleanhttp.DefaultClient())
} }

View File

@ -180,7 +180,7 @@ func (r *ResourceProvisioner) Validate(c *terraform.ResourceConfig) (ws []string
if p.NodeName == "" { if p.NodeName == "" {
es = append(es, fmt.Errorf("Key not found: node_name")) es = append(es, fmt.Errorf("Key not found: node_name"))
} }
if p.RunList == nil { if !p.UsePolicyfile && p.RunList == nil {
es = append(es, fmt.Errorf("Key not found: run_list")) es = append(es, fmt.Errorf("Key not found: run_list"))
} }
if p.ServerURL == "" { if p.ServerURL == "" {

View File

@ -7,8 +7,8 @@ import (
"sort" "sort"
"strings" "strings"
"github.com/hashicorp/go-getter"
"github.com/hashicorp/go-multierror" "github.com/hashicorp/go-multierror"
"github.com/hashicorp/terraform/config/module"
"github.com/hashicorp/terraform/terraform" "github.com/hashicorp/terraform/terraform"
) )
@ -76,7 +76,7 @@ func (c *ApplyCommand) Run(args []string) int {
if !c.Destroy && maybeInit { if !c.Destroy && maybeInit {
// Do a detect to determine if we need to do an init + apply. // Do a detect to determine if we need to do an init + apply.
if detected, err := module.Detect(configPath, pwd); err != nil { if detected, err := getter.Detect(configPath, pwd, getter.Detectors); err != nil {
c.Ui.Error(fmt.Sprintf( c.Ui.Error(fmt.Sprintf(
"Invalid path: %s", err)) "Invalid path: %s", err))
return 1 return 1

View File

@ -7,6 +7,7 @@ import (
"strings" "strings"
"testing" "testing"
"github.com/hashicorp/go-getter"
"github.com/hashicorp/terraform/config/module" "github.com/hashicorp/terraform/config/module"
"github.com/hashicorp/terraform/terraform" "github.com/hashicorp/terraform/terraform"
) )
@ -73,7 +74,7 @@ func testModule(t *testing.T, name string) *module.Tree {
t.Fatalf("err: %s", err) t.Fatalf("err: %s", err)
} }
s := &module.FolderStorage{StorageDir: tempDir(t)} s := &getter.FolderStorage{StorageDir: tempDir(t)}
if err := mod.Load(s, module.GetModeGet); err != nil { if err := mod.Load(s, module.GetModeGet); err != nil {
t.Fatalf("err: %s", err) t.Fatalf("err: %s", err)
} }

View File

@ -131,7 +131,7 @@ func formatPlanModuleExpand(
newResource := "" newResource := ""
if attrDiff.RequiresNew && rdiff.Destroy { if attrDiff.RequiresNew && rdiff.Destroy {
newResource = " (forces new resource)" newResource = opts.Color.Color(" [red](forces new resource)")
} }
buf.WriteString(fmt.Sprintf( buf.WriteString(fmt.Sprintf(

View File

@ -6,6 +6,7 @@ import (
"os" "os"
"strings" "strings"
"github.com/hashicorp/go-getter"
"github.com/hashicorp/terraform/config" "github.com/hashicorp/terraform/config"
"github.com/hashicorp/terraform/config/module" "github.com/hashicorp/terraform/config/module"
"github.com/hashicorp/terraform/terraform" "github.com/hashicorp/terraform/terraform"
@ -75,7 +76,7 @@ func (c *InitCommand) Run(args []string) int {
} }
// Detect // Detect
source, err = module.Detect(source, pwd) source, err = getter.Detect(source, pwd, getter.Detectors)
if err != nil { if err != nil {
c.Ui.Error(fmt.Sprintf( c.Ui.Error(fmt.Sprintf(
"Error with module source: %s", err)) "Error with module source: %s", err))

View File

@ -9,6 +9,7 @@ import (
"path/filepath" "path/filepath"
"strconv" "strconv"
"github.com/hashicorp/go-getter"
"github.com/hashicorp/terraform/config/module" "github.com/hashicorp/terraform/config/module"
"github.com/hashicorp/terraform/state" "github.com/hashicorp/terraform/state"
"github.com/hashicorp/terraform/terraform" "github.com/hashicorp/terraform/terraform"
@ -330,9 +331,9 @@ func (m *Meta) flagSet(n string) *flag.FlagSet {
// moduleStorage returns the module.Storage implementation used to store // moduleStorage returns the module.Storage implementation used to store
// modules for commands. // modules for commands.
func (m *Meta) moduleStorage(root string) module.Storage { func (m *Meta) moduleStorage(root string) getter.Storage {
return &uiModuleStorage{ return &uiModuleStorage{
Storage: &module.FolderStorage{ Storage: &getter.FolderStorage{
StorageDir: filepath.Join(root, "modules"), StorageDir: filepath.Join(root, "modules"),
}, },
Ui: m.Ui, Ui: m.Ui,

View File

@ -3,14 +3,14 @@ package command
import ( import (
"fmt" "fmt"
"github.com/hashicorp/terraform/config/module" "github.com/hashicorp/go-getter"
"github.com/mitchellh/cli" "github.com/mitchellh/cli"
) )
// uiModuleStorage implements module.Storage and is just a proxy to output // uiModuleStorage implements module.Storage and is just a proxy to output
// to the UI any Get operations. // to the UI any Get operations.
type uiModuleStorage struct { type uiModuleStorage struct {
Storage module.Storage Storage getter.Storage
Ui cli.Ui Ui cli.Ui
} }

View File

@ -3,9 +3,9 @@ package command
import ( import (
"testing" "testing"
"github.com/hashicorp/terraform/config/module" "github.com/hashicorp/go-getter"
) )
func TestUiModuleStorage_impl(t *testing.T) { func TestUiModuleStorage_impl(t *testing.T) {
var _ module.Storage = new(uiModuleStorage) var _ getter.Storage = new(uiModuleStorage)
} }

View File

@ -338,7 +338,7 @@ func (c *RemoteConfigCommand) enableRemoteState() int {
func (c *RemoteConfigCommand) Help() string { func (c *RemoteConfigCommand) Help() string {
helpText := ` helpText := `
Usage: terraform remote [options] Usage: terraform remote config [options]
Configures Terraform to use a remote state server. This allows state Configures Terraform to use a remote state server. This allows state
to be pulled down when necessary and then pushed to the server when to be pulled down when necessary and then pushed to the server when
@ -348,7 +348,8 @@ Usage: terraform remote [options]
Options: Options:
-backend=Atlas Specifies the type of remote backend. Must be one -backend=Atlas Specifies the type of remote backend. Must be one
of Atlas, Consul, or HTTP. Defaults to Atlas. of Atlas, Consul, Etcd, HTTP, S3, or Swift. Defaults
to Atlas.
-backend-config="k=v" Specifies configuration for the remote storage -backend-config="k=v" Specifies configuration for the remote storage
backend. This can be specified multiple times. backend. This can be specified multiple times.

View File

@ -12,7 +12,7 @@ import (
"github.com/hashicorp/hcl" "github.com/hashicorp/hcl"
"github.com/hashicorp/terraform/plugin" "github.com/hashicorp/terraform/plugin"
"github.com/hashicorp/terraform/terraform" "github.com/hashicorp/terraform/terraform"
"github.com/mitchellh/osext" "github.com/kardianos/osext"
) )
// Config is the structure of the configuration for the Terraform CLI. // Config is the structure of the configuration for the Terraform CLI.

View File

@ -6,11 +6,13 @@ import (
"errors" "errors"
"fmt" "fmt"
"io/ioutil" "io/ioutil"
"net"
"regexp" "regexp"
"sort" "sort"
"strconv" "strconv"
"strings" "strings"
"github.com/apparentlymart/go-cidr/cidr"
"github.com/hashicorp/terraform/config/lang/ast" "github.com/hashicorp/terraform/config/lang/ast"
"github.com/mitchellh/go-homedir" "github.com/mitchellh/go-homedir"
) )
@ -20,6 +22,9 @@ var Funcs map[string]ast.Function
func init() { func init() {
Funcs = map[string]ast.Function{ Funcs = map[string]ast.Function{
"cidrhost": interpolationFuncCidrHost(),
"cidrnetmask": interpolationFuncCidrNetmask(),
"cidrsubnet": interpolationFuncCidrSubnet(),
"compact": interpolationFuncCompact(), "compact": interpolationFuncCompact(),
"concat": interpolationFuncConcat(), "concat": interpolationFuncConcat(),
"element": interpolationFuncElement(), "element": interpolationFuncElement(),
@ -29,10 +34,12 @@ func init() {
"index": interpolationFuncIndex(), "index": interpolationFuncIndex(),
"join": interpolationFuncJoin(), "join": interpolationFuncJoin(),
"length": interpolationFuncLength(), "length": interpolationFuncLength(),
"lower": interpolationFuncLower(),
"replace": interpolationFuncReplace(), "replace": interpolationFuncReplace(),
"split": interpolationFuncSplit(), "split": interpolationFuncSplit(),
"base64encode": interpolationFuncBase64Encode(), "base64encode": interpolationFuncBase64Encode(),
"base64decode": interpolationFuncBase64Decode(), "base64decode": interpolationFuncBase64Decode(),
"upper": interpolationFuncUpper(),
} }
} }
@ -52,6 +59,92 @@ func interpolationFuncCompact() ast.Function {
} }
} }
// interpolationFuncCidrHost implements the "cidrhost" function that
// fills in the host part of a CIDR range address to create a single
// host address
func interpolationFuncCidrHost() ast.Function {
return ast.Function{
ArgTypes: []ast.Type{
ast.TypeString, // starting CIDR mask
ast.TypeInt, // host number to insert
},
ReturnType: ast.TypeString,
Variadic: false,
Callback: func(args []interface{}) (interface{}, error) {
hostNum := args[1].(int)
_, network, err := net.ParseCIDR(args[0].(string))
if err != nil {
return nil, fmt.Errorf("invalid CIDR expression: %s", err)
}
ip, err := cidr.Host(network, hostNum)
if err != nil {
return nil, err
}
return ip.String(), nil
},
}
}
// interpolationFuncCidrNetmask implements the "cidrnetmask" function
// that returns the subnet mask in IP address notation.
func interpolationFuncCidrNetmask() ast.Function {
return ast.Function{
ArgTypes: []ast.Type{
ast.TypeString, // CIDR mask
},
ReturnType: ast.TypeString,
Variadic: false,
Callback: func(args []interface{}) (interface{}, error) {
_, network, err := net.ParseCIDR(args[0].(string))
if err != nil {
return nil, fmt.Errorf("invalid CIDR expression: %s", err)
}
return net.IP(network.Mask).String(), nil
},
}
}
// interpolationFuncCidrSubnet implements the "cidrsubnet" function that
// adds an additional subnet of the given length onto an existing
// IP block expressed in CIDR notation.
func interpolationFuncCidrSubnet() ast.Function {
return ast.Function{
ArgTypes: []ast.Type{
ast.TypeString, // starting CIDR mask
ast.TypeInt, // number of bits to extend the prefix
ast.TypeInt, // network number to append to the prefix
},
ReturnType: ast.TypeString,
Variadic: false,
Callback: func(args []interface{}) (interface{}, error) {
extraBits := args[1].(int)
subnetNum := args[2].(int)
_, network, err := net.ParseCIDR(args[0].(string))
if err != nil {
return nil, fmt.Errorf("invalid CIDR expression: %s", err)
}
// For portability with 32-bit systems where the subnet number
// will be a 32-bit int, we only allow extension of 32 bits in
// one call even if we're running on a 64-bit machine.
// (Of course, this is significant only for IPv6.)
if extraBits > 32 {
return nil, fmt.Errorf("may not extend prefix by more than 32 bits")
}
newNetwork, err := cidr.Subnet(network, extraBits, subnetNum)
if err != nil {
return nil, err
}
return newNetwork.String(), nil
},
}
}
// interpolationFuncConcat implements the "concat" function that // interpolationFuncConcat implements the "concat" function that
// concatenates multiple strings. This isn't actually necessary anymore // concatenates multiple strings. This isn't actually necessary anymore
// since our language supports string concat natively, but for backwards // since our language supports string concat natively, but for backwards
@ -442,3 +535,29 @@ func interpolationFuncBase64Decode() ast.Function {
}, },
} }
} }
// interpolationFuncLower implements the "lower" function that does
// string lower casing.
func interpolationFuncLower() ast.Function {
return ast.Function{
ArgTypes: []ast.Type{ast.TypeString},
ReturnType: ast.TypeString,
Callback: func(args []interface{}) (interface{}, error) {
toLower := args[0].(string)
return strings.ToLower(toLower), nil
},
}
}
// interpolationFuncUpper implements the "upper" function that does
// string upper casing.
func interpolationFuncUpper() ast.Function {
return ast.Function{
ArgTypes: []ast.Type{ast.TypeString},
ReturnType: ast.TypeString,
Callback: func(args []interface{}) (interface{}, error) {
toUpper := args[0].(string)
return strings.ToUpper(toUpper), nil
},
}
}

View File

@ -38,6 +38,115 @@ func TestInterpolateFuncCompact(t *testing.T) {
}) })
} }
func TestInterpolateFuncCidrHost(t *testing.T) {
testFunction(t, testFunctionConfig{
Cases: []testFunctionCase{
{
`${cidrhost("192.168.1.0/24", 5)}`,
"192.168.1.5",
false,
},
{
`${cidrhost("192.168.1.0/30", 255)}`,
nil,
true, // 255 doesn't fit in two bits
},
{
`${cidrhost("not-a-cidr", 6)}`,
nil,
true, // not a valid CIDR mask
},
{
`${cidrhost("10.256.0.0/8", 6)}`,
nil,
true, // can't have an octet >255
},
},
})
}
func TestInterpolateFuncCidrNetmask(t *testing.T) {
testFunction(t, testFunctionConfig{
Cases: []testFunctionCase{
{
`${cidrnetmask("192.168.1.0/24")}`,
"255.255.255.0",
false,
},
{
`${cidrnetmask("192.168.1.0/32")}`,
"255.255.255.255",
false,
},
{
`${cidrnetmask("0.0.0.0/0")}`,
"0.0.0.0",
false,
},
{
// This doesn't really make sense for IPv6 networks
// but it ought to do something sensible anyway.
`${cidrnetmask("1::/64")}`,
"ffff:ffff:ffff:ffff::",
false,
},
{
`${cidrnetmask("not-a-cidr")}`,
nil,
true, // not a valid CIDR mask
},
{
`${cidrnetmask("10.256.0.0/8")}`,
nil,
true, // can't have an octet >255
},
},
})
}
func TestInterpolateFuncCidrSubnet(t *testing.T) {
testFunction(t, testFunctionConfig{
Cases: []testFunctionCase{
{
`${cidrsubnet("192.168.2.0/20", 4, 6)}`,
"192.168.6.0/24",
false,
},
{
`${cidrsubnet("fe80::/48", 16, 6)}`,
"fe80:0:0:6::/64",
false,
},
{
// IPv4 address encoded in IPv6 syntax gets normalized
`${cidrsubnet("::ffff:192.168.0.0/112", 8, 6)}`,
"192.168.6.0/24",
false,
},
{
`${cidrsubnet("192.168.0.0/30", 4, 6)}`,
nil,
true, // not enough bits left
},
{
`${cidrsubnet("192.168.0.0/16", 2, 16)}`,
nil,
true, // can't encode 16 in 2 bits
},
{
`${cidrsubnet("not-a-cidr", 4, 6)}`,
nil,
true, // not a valid CIDR mask
},
{
`${cidrsubnet("10.256.0.0/8", 4, 6)}`,
nil,
true, // can't have an octet >255
},
},
})
}
func TestInterpolateFuncDeprecatedConcat(t *testing.T) { func TestInterpolateFuncDeprecatedConcat(t *testing.T) {
testFunction(t, testFunctionConfig{ testFunction(t, testFunctionConfig{
Cases: []testFunctionCase{ Cases: []testFunctionCase{
@ -644,6 +753,54 @@ func TestInterpolateFuncBase64Decode(t *testing.T) {
}) })
} }
func TestInterpolateFuncLower(t *testing.T) {
testFunction(t, testFunctionConfig{
Cases: []testFunctionCase{
{
`${lower("HELLO")}`,
"hello",
false,
},
{
`${lower("")}`,
"",
false,
},
{
`${lower()}`,
nil,
true,
},
},
})
}
func TestInterpolateFuncUpper(t *testing.T) {
testFunction(t, testFunctionConfig{
Cases: []testFunctionCase{
{
`${upper("hello")}`,
"HELLO",
false,
},
{
`${upper("")}`,
"",
false,
},
{
`${upper()}`,
nil,
true,
},
},
})
}
type testFunctionConfig struct { type testFunctionConfig struct {
Cases []testFunctionCase Cases []testFunctionCase
Vars map[string]ast.Variable Vars map[string]ast.Variable

View File

@ -30,7 +30,10 @@ const INTEGER = 57355
const FLOAT = 57356 const FLOAT = 57356
const STRING = 57357 const STRING = 57357
var parserToknames = []string{ var parserToknames = [...]string{
"$end",
"error",
"$unk",
"PROGRAM_BRACKET_LEFT", "PROGRAM_BRACKET_LEFT",
"PROGRAM_BRACKET_RIGHT", "PROGRAM_BRACKET_RIGHT",
"PROGRAM_STRING_START", "PROGRAM_STRING_START",
@ -44,7 +47,7 @@ var parserToknames = []string{
"FLOAT", "FLOAT",
"STRING", "STRING",
} }
var parserStatenames = []string{} var parserStatenames = [...]string{}
const parserEofCode = 1 const parserEofCode = 1
const parserErrCode = 2 const parserErrCode = 2
@ -53,7 +56,7 @@ const parserMaxDepth = 200
//line lang.y:165 //line lang.y:165
//line yacctab:1 //line yacctab:1
var parserExca = []int{ var parserExca = [...]int{
-1, 1, -1, 1,
1, -1, 1, -1,
-2, 0, -2, 0,
@ -67,75 +70,103 @@ var parserStates []string
const parserLast = 30 const parserLast = 30
var parserAct = []int{ var parserAct = [...]int{
9, 20, 16, 16, 7, 7, 3, 18, 10, 8, 9, 20, 16, 16, 7, 7, 3, 18, 10, 8,
1, 17, 14, 12, 13, 6, 6, 19, 8, 22, 1, 17, 14, 12, 13, 6, 6, 19, 8, 22,
15, 23, 24, 11, 2, 25, 16, 21, 4, 5, 15, 23, 24, 11, 2, 25, 16, 21, 4, 5,
} }
var parserPact = []int{ var parserPact = [...]int{
1, -1000, 1, -1000, -1000, -1000, -1000, 0, -1000, 15, 1, -1000, 1, -1000, -1000, -1000, -1000, 0, -1000, 15,
0, 1, -1000, -1000, -1, -1000, 0, -8, 0, -1000, 0, 1, -1000, -1000, -1, -1000, 0, -8, 0, -1000,
-1000, 12, -9, -1000, 0, -9, -1000, 12, -9, -1000, 0, -9,
} }
var parserPgo = []int{ var parserPgo = [...]int{
0, 0, 29, 28, 23, 6, 27, 10, 0, 0, 29, 28, 23, 6, 27, 10,
} }
var parserR1 = []int{ var parserR1 = [...]int{
0, 7, 7, 4, 4, 5, 5, 2, 1, 1, 0, 7, 7, 4, 4, 5, 5, 2, 1, 1,
1, 1, 1, 1, 1, 6, 6, 6, 3, 1, 1, 1, 1, 1, 6, 6, 6, 3,
} }
var parserR2 = []int{ var parserR2 = [...]int{
0, 0, 1, 1, 2, 1, 1, 3, 3, 1, 0, 0, 1, 1, 2, 1, 1, 3, 3, 1,
1, 1, 3, 1, 4, 0, 3, 1, 1, 1, 1, 3, 1, 4, 0, 3, 1, 1,
} }
var parserChk = []int{ var parserChk = [...]int{
-1000, -7, -4, -5, -3, -2, 15, 4, -5, -1, -1000, -7, -4, -5, -3, -2, 15, 4, -5, -1,
8, -4, 13, 14, 12, 5, 11, -1, 8, -1, 8, -4, 13, 14, 12, 5, 11, -1, 8, -1,
9, -6, -1, 9, 10, -1, 9, -6, -1, 9, 10, -1,
} }
var parserDef = []int{ var parserDef = [...]int{
1, -2, 2, 3, 5, 6, 18, 0, 4, 0, 1, -2, 2, 3, 5, 6, 18, 0, 4, 0,
0, 9, 10, 11, 13, 7, 0, 0, 15, 12, 0, 9, 10, 11, 13, 7, 0, 0, 15, 12,
8, 0, 17, 14, 0, 16, 8, 0, 17, 14, 0, 16,
} }
var parserTok1 = []int{ var parserTok1 = [...]int{
1, 1,
} }
var parserTok2 = []int{ var parserTok2 = [...]int{
2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11,
12, 13, 14, 15, 12, 13, 14, 15,
} }
var parserTok3 = []int{ var parserTok3 = [...]int{
0, 0,
} }
var parserErrorMessages = [...]struct {
state int
token int
msg string
}{}
//line yaccpar:1 //line yaccpar:1
/* parser for yacc output */ /* parser for yacc output */
var parserDebug = 0 var (
parserDebug = 0
parserErrorVerbose = false
)
type parserLexer interface { type parserLexer interface {
Lex(lval *parserSymType) int Lex(lval *parserSymType) int
Error(s string) Error(s string)
} }
type parserParser interface {
Parse(parserLexer) int
Lookahead() int
}
type parserParserImpl struct {
lookahead func() int
}
func (p *parserParserImpl) Lookahead() int {
return p.lookahead()
}
func parserNewParser() parserParser {
p := &parserParserImpl{
lookahead: func() int { return -1 },
}
return p
}
const parserFlag = -1000 const parserFlag = -1000
func parserTokname(c int) string { func parserTokname(c int) string {
// 4 is TOKSTART above if c >= 1 && c-1 < len(parserToknames) {
if c >= 4 && c-4 < len(parserToknames) { if parserToknames[c-1] != "" {
if parserToknames[c-4] != "" { return parserToknames[c-1]
return parserToknames[c-4]
} }
} }
return __yyfmt__.Sprintf("tok-%v", c) return __yyfmt__.Sprintf("tok-%v", c)
@ -150,51 +181,129 @@ func parserStatname(s int) string {
return __yyfmt__.Sprintf("state-%v", s) return __yyfmt__.Sprintf("state-%v", s)
} }
func parserlex1(lex parserLexer, lval *parserSymType) int { func parserErrorMessage(state, lookAhead int) string {
c := 0 const TOKSTART = 4
char := lex.Lex(lval)
if !parserErrorVerbose {
return "syntax error"
}
for _, e := range parserErrorMessages {
if e.state == state && e.token == lookAhead {
return "syntax error: " + e.msg
}
}
res := "syntax error: unexpected " + parserTokname(lookAhead)
// To match Bison, suggest at most four expected tokens.
expected := make([]int, 0, 4)
// Look for shiftable tokens.
base := parserPact[state]
for tok := TOKSTART; tok-1 < len(parserToknames); tok++ {
if n := base + tok; n >= 0 && n < parserLast && parserChk[parserAct[n]] == tok {
if len(expected) == cap(expected) {
return res
}
expected = append(expected, tok)
}
}
if parserDef[state] == -2 {
i := 0
for parserExca[i] != -1 || parserExca[i+1] != state {
i += 2
}
// Look for tokens that we accept or reduce.
for i += 2; parserExca[i] >= 0; i += 2 {
tok := parserExca[i]
if tok < TOKSTART || parserExca[i+1] == 0 {
continue
}
if len(expected) == cap(expected) {
return res
}
expected = append(expected, tok)
}
// If the default action is to accept or reduce, give up.
if parserExca[i+1] != 0 {
return res
}
}
for i, tok := range expected {
if i == 0 {
res += ", expecting "
} else {
res += " or "
}
res += parserTokname(tok)
}
return res
}
func parserlex1(lex parserLexer, lval *parserSymType) (char, token int) {
token = 0
char = lex.Lex(lval)
if char <= 0 { if char <= 0 {
c = parserTok1[0] token = parserTok1[0]
goto out goto out
} }
if char < len(parserTok1) { if char < len(parserTok1) {
c = parserTok1[char] token = parserTok1[char]
goto out goto out
} }
if char >= parserPrivate { if char >= parserPrivate {
if char < parserPrivate+len(parserTok2) { if char < parserPrivate+len(parserTok2) {
c = parserTok2[char-parserPrivate] token = parserTok2[char-parserPrivate]
goto out goto out
} }
} }
for i := 0; i < len(parserTok3); i += 2 { for i := 0; i < len(parserTok3); i += 2 {
c = parserTok3[i+0] token = parserTok3[i+0]
if c == char { if token == char {
c = parserTok3[i+1] token = parserTok3[i+1]
goto out goto out
} }
} }
out: out:
if c == 0 { if token == 0 {
c = parserTok2[1] /* unknown char */ token = parserTok2[1] /* unknown char */
} }
if parserDebug >= 3 { if parserDebug >= 3 {
__yyfmt__.Printf("lex %s(%d)\n", parserTokname(c), uint(char)) __yyfmt__.Printf("lex %s(%d)\n", parserTokname(token), uint(char))
} }
return c return char, token
} }
func parserParse(parserlex parserLexer) int { func parserParse(parserlex parserLexer) int {
return parserNewParser().Parse(parserlex)
}
func (parserrcvr *parserParserImpl) Parse(parserlex parserLexer) int {
var parsern int var parsern int
var parserlval parserSymType var parserlval parserSymType
var parserVAL parserSymType var parserVAL parserSymType
var parserDollar []parserSymType
_ = parserDollar // silence set and not used
parserS := make([]parserSymType, parserMaxDepth) parserS := make([]parserSymType, parserMaxDepth)
Nerrs := 0 /* number of errors */ Nerrs := 0 /* number of errors */
Errflag := 0 /* error recovery flag */ Errflag := 0 /* error recovery flag */
parserstate := 0 parserstate := 0
parserchar := -1 parserchar := -1
parsertoken := -1 // parserchar translated into internal numbering
parserrcvr.lookahead = func() int { return parserchar }
defer func() {
// Make sure we report no lookahead when not parsing.
parserstate = -1
parserchar = -1
parsertoken = -1
}()
parserp := -1 parserp := -1
goto parserstack goto parserstack
@ -207,7 +316,7 @@ ret1:
parserstack: parserstack:
/* put a state and value onto the stack */ /* put a state and value onto the stack */
if parserDebug >= 4 { if parserDebug >= 4 {
__yyfmt__.Printf("char %v in %v\n", parserTokname(parserchar), parserStatname(parserstate)) __yyfmt__.Printf("char %v in %v\n", parserTokname(parsertoken), parserStatname(parserstate))
} }
parserp++ parserp++
@ -225,15 +334,16 @@ parsernewstate:
goto parserdefault /* simple state */ goto parserdefault /* simple state */
} }
if parserchar < 0 { if parserchar < 0 {
parserchar = parserlex1(parserlex, &parserlval) parserchar, parsertoken = parserlex1(parserlex, &parserlval)
} }
parsern += parserchar parsern += parsertoken
if parsern < 0 || parsern >= parserLast { if parsern < 0 || parsern >= parserLast {
goto parserdefault goto parserdefault
} }
parsern = parserAct[parsern] parsern = parserAct[parsern]
if parserChk[parsern] == parserchar { /* valid shift */ if parserChk[parsern] == parsertoken { /* valid shift */
parserchar = -1 parserchar = -1
parsertoken = -1
parserVAL = parserlval parserVAL = parserlval
parserstate = parsern parserstate = parsern
if Errflag > 0 { if Errflag > 0 {
@ -247,7 +357,7 @@ parserdefault:
parsern = parserDef[parserstate] parsern = parserDef[parserstate]
if parsern == -2 { if parsern == -2 {
if parserchar < 0 { if parserchar < 0 {
parserchar = parserlex1(parserlex, &parserlval) parserchar, parsertoken = parserlex1(parserlex, &parserlval)
} }
/* look through exception table */ /* look through exception table */
@ -260,7 +370,7 @@ parserdefault:
} }
for xi += 2; ; xi += 2 { for xi += 2; ; xi += 2 {
parsern = parserExca[xi+0] parsern = parserExca[xi+0]
if parsern < 0 || parsern == parserchar { if parsern < 0 || parsern == parsertoken {
break break
} }
} }
@ -273,11 +383,11 @@ parserdefault:
/* error ... attempt to resume parsing */ /* error ... attempt to resume parsing */
switch Errflag { switch Errflag {
case 0: /* brand new error */ case 0: /* brand new error */
parserlex.Error("syntax error") parserlex.Error(parserErrorMessage(parserstate, parsertoken))
Nerrs++ Nerrs++
if parserDebug >= 1 { if parserDebug >= 1 {
__yyfmt__.Printf("%s", parserStatname(parserstate)) __yyfmt__.Printf("%s", parserStatname(parserstate))
__yyfmt__.Printf(" saw %s\n", parserTokname(parserchar)) __yyfmt__.Printf(" saw %s\n", parserTokname(parsertoken))
} }
fallthrough fallthrough
@ -305,12 +415,13 @@ parserdefault:
case 3: /* no shift yet; clobber input char */ case 3: /* no shift yet; clobber input char */
if parserDebug >= 2 { if parserDebug >= 2 {
__yyfmt__.Printf("error recovery discards %s\n", parserTokname(parserchar)) __yyfmt__.Printf("error recovery discards %s\n", parserTokname(parsertoken))
} }
if parserchar == parserEofCode { if parsertoken == parserEofCode {
goto ret1 goto ret1
} }
parserchar = -1 parserchar = -1
parsertoken = -1
goto parsernewstate /* try again in the same state */ goto parsernewstate /* try again in the same state */
} }
} }
@ -325,6 +436,13 @@ parserdefault:
_ = parserpt // guard against "declared and not used" _ = parserpt // guard against "declared and not used"
parserp -= parserR2[parsern] parserp -= parserR2[parsern]
// parserp is now the index of $0. Perform the default action. Iff the
// reduced production is ε, $1 is possibly out of range.
if parserp+1 >= len(parserS) {
nyys := make([]parserSymType, len(parserS)*2)
copy(nyys, parserS)
parserS = nyys
}
parserVAL = parserS[parserp+1] parserVAL = parserS[parserp+1]
/* consult goto table to find next state */ /* consult goto table to find next state */
@ -344,6 +462,7 @@ parserdefault:
switch parsernt { switch parsernt {
case 1: case 1:
parserDollar = parserS[parserpt-0 : parserpt+1]
//line lang.y:35 //line lang.y:35
{ {
parserResult = &ast.LiteralNode{ parserResult = &ast.LiteralNode{
@ -353,9 +472,10 @@ parserdefault:
} }
} }
case 2: case 2:
parserDollar = parserS[parserpt-1 : parserpt+1]
//line lang.y:43 //line lang.y:43
{ {
parserResult = parserS[parserpt-0].node parserResult = parserDollar[1].node
// We want to make sure that the top value is always a Concat // We want to make sure that the top value is always a Concat
// so that the return value is always a string type from an // so that the return value is always a string type from an
@ -365,28 +485,30 @@ parserdefault:
// because functionally the AST is the same, but we do that because // because functionally the AST is the same, but we do that because
// it makes for an easy literal check later (to check if a string // it makes for an easy literal check later (to check if a string
// has any interpolations). // has any interpolations).
if _, ok := parserS[parserpt-0].node.(*ast.Concat); !ok { if _, ok := parserDollar[1].node.(*ast.Concat); !ok {
if n, ok := parserS[parserpt-0].node.(*ast.LiteralNode); !ok || n.Typex != ast.TypeString { if n, ok := parserDollar[1].node.(*ast.LiteralNode); !ok || n.Typex != ast.TypeString {
parserResult = &ast.Concat{ parserResult = &ast.Concat{
Exprs: []ast.Node{parserS[parserpt-0].node}, Exprs: []ast.Node{parserDollar[1].node},
Posx: parserS[parserpt-0].node.Pos(), Posx: parserDollar[1].node.Pos(),
} }
} }
} }
} }
case 3: case 3:
parserDollar = parserS[parserpt-1 : parserpt+1]
//line lang.y:66 //line lang.y:66
{ {
parserVAL.node = parserS[parserpt-0].node parserVAL.node = parserDollar[1].node
} }
case 4: case 4:
parserDollar = parserS[parserpt-2 : parserpt+1]
//line lang.y:70 //line lang.y:70
{ {
var result []ast.Node var result []ast.Node
if c, ok := parserS[parserpt-1].node.(*ast.Concat); ok { if c, ok := parserDollar[1].node.(*ast.Concat); ok {
result = append(c.Exprs, parserS[parserpt-0].node) result = append(c.Exprs, parserDollar[2].node)
} else { } else {
result = []ast.Node{parserS[parserpt-1].node, parserS[parserpt-0].node} result = []ast.Node{parserDollar[1].node, parserDollar[2].node}
} }
parserVAL.node = &ast.Concat{ parserVAL.node = &ast.Concat{
@ -395,89 +517,103 @@ parserdefault:
} }
} }
case 5: case 5:
parserDollar = parserS[parserpt-1 : parserpt+1]
//line lang.y:86 //line lang.y:86
{ {
parserVAL.node = parserS[parserpt-0].node parserVAL.node = parserDollar[1].node
} }
case 6: case 6:
parserDollar = parserS[parserpt-1 : parserpt+1]
//line lang.y:90 //line lang.y:90
{ {
parserVAL.node = parserS[parserpt-0].node parserVAL.node = parserDollar[1].node
} }
case 7: case 7:
parserDollar = parserS[parserpt-3 : parserpt+1]
//line lang.y:96 //line lang.y:96
{ {
parserVAL.node = parserS[parserpt-1].node parserVAL.node = parserDollar[2].node
} }
case 8: case 8:
parserDollar = parserS[parserpt-3 : parserpt+1]
//line lang.y:102 //line lang.y:102
{ {
parserVAL.node = parserS[parserpt-1].node parserVAL.node = parserDollar[2].node
} }
case 9: case 9:
parserDollar = parserS[parserpt-1 : parserpt+1]
//line lang.y:106 //line lang.y:106
{ {
parserVAL.node = parserS[parserpt-0].node parserVAL.node = parserDollar[1].node
} }
case 10: case 10:
parserDollar = parserS[parserpt-1 : parserpt+1]
//line lang.y:110 //line lang.y:110
{ {
parserVAL.node = &ast.LiteralNode{ parserVAL.node = &ast.LiteralNode{
Value: parserS[parserpt-0].token.Value.(int), Value: parserDollar[1].token.Value.(int),
Typex: ast.TypeInt, Typex: ast.TypeInt,
Posx: parserS[parserpt-0].token.Pos, Posx: parserDollar[1].token.Pos,
} }
} }
case 11: case 11:
parserDollar = parserS[parserpt-1 : parserpt+1]
//line lang.y:118 //line lang.y:118
{ {
parserVAL.node = &ast.LiteralNode{ parserVAL.node = &ast.LiteralNode{
Value: parserS[parserpt-0].token.Value.(float64), Value: parserDollar[1].token.Value.(float64),
Typex: ast.TypeFloat, Typex: ast.TypeFloat,
Posx: parserS[parserpt-0].token.Pos, Posx: parserDollar[1].token.Pos,
} }
} }
case 12: case 12:
parserDollar = parserS[parserpt-3 : parserpt+1]
//line lang.y:126 //line lang.y:126
{ {
parserVAL.node = &ast.Arithmetic{ parserVAL.node = &ast.Arithmetic{
Op: parserS[parserpt-1].token.Value.(ast.ArithmeticOp), Op: parserDollar[2].token.Value.(ast.ArithmeticOp),
Exprs: []ast.Node{parserS[parserpt-2].node, parserS[parserpt-0].node}, Exprs: []ast.Node{parserDollar[1].node, parserDollar[3].node},
Posx: parserS[parserpt-2].node.Pos(), Posx: parserDollar[1].node.Pos(),
} }
} }
case 13: case 13:
parserDollar = parserS[parserpt-1 : parserpt+1]
//line lang.y:134 //line lang.y:134
{ {
parserVAL.node = &ast.VariableAccess{Name: parserS[parserpt-0].token.Value.(string), Posx: parserS[parserpt-0].token.Pos} parserVAL.node = &ast.VariableAccess{Name: parserDollar[1].token.Value.(string), Posx: parserDollar[1].token.Pos}
} }
case 14: case 14:
parserDollar = parserS[parserpt-4 : parserpt+1]
//line lang.y:138 //line lang.y:138
{ {
parserVAL.node = &ast.Call{Func: parserS[parserpt-3].token.Value.(string), Args: parserS[parserpt-1].nodeList, Posx: parserS[parserpt-3].token.Pos} parserVAL.node = &ast.Call{Func: parserDollar[1].token.Value.(string), Args: parserDollar[3].nodeList, Posx: parserDollar[1].token.Pos}
} }
case 15: case 15:
parserDollar = parserS[parserpt-0 : parserpt+1]
//line lang.y:143 //line lang.y:143
{ {
parserVAL.nodeList = nil parserVAL.nodeList = nil
} }
case 16: case 16:
parserDollar = parserS[parserpt-3 : parserpt+1]
//line lang.y:147 //line lang.y:147
{ {
parserVAL.nodeList = append(parserS[parserpt-2].nodeList, parserS[parserpt-0].node) parserVAL.nodeList = append(parserDollar[1].nodeList, parserDollar[3].node)
} }
case 17: case 17:
parserDollar = parserS[parserpt-1 : parserpt+1]
//line lang.y:151 //line lang.y:151
{ {
parserVAL.nodeList = append(parserVAL.nodeList, parserS[parserpt-0].node) parserVAL.nodeList = append(parserVAL.nodeList, parserDollar[1].node)
} }
case 18: case 18:
parserDollar = parserS[parserpt-1 : parserpt+1]
//line lang.y:157 //line lang.y:157
{ {
parserVAL.node = &ast.LiteralNode{ parserVAL.node = &ast.LiteralNode{
Value: parserS[parserpt-0].token.Value.(string), Value: parserDollar[1].token.Value.(string),
Typex: ast.TypeString, Typex: ast.TypeString,
Posx: parserS[parserpt-0].token.Pos, Posx: parserDollar[1].token.Pos,
} }
} }
} }

View File

@ -1,92 +0,0 @@
package module
import (
"fmt"
"path/filepath"
"github.com/hashicorp/terraform/helper/url"
)
// Detector defines the interface that an invalid URL or a URL with a blank
// scheme is passed through in order to determine if its shorthand for
// something else well-known.
type Detector interface {
// Detect will detect whether the string matches a known pattern to
// turn it into a proper URL.
Detect(string, string) (string, bool, error)
}
// Detectors is the list of detectors that are tried on an invalid URL.
// This is also the order they're tried (index 0 is first).
var Detectors []Detector
func init() {
Detectors = []Detector{
new(GitHubDetector),
new(BitBucketDetector),
new(FileDetector),
}
}
// Detect turns a source string into another source string if it is
// detected to be of a known pattern.
//
// This is safe to be called with an already valid source string: Detect
// will just return it.
func Detect(src string, pwd string) (string, error) {
getForce, getSrc := getForcedGetter(src)
// Separate out the subdir if there is one, we don't pass that to detect
getSrc, subDir := getDirSubdir(getSrc)
u, err := url.Parse(getSrc)
if err == nil && u.Scheme != "" {
// Valid URL
return src, nil
}
for _, d := range Detectors {
result, ok, err := d.Detect(getSrc, pwd)
if err != nil {
return "", err
}
if !ok {
continue
}
var detectForce string
detectForce, result = getForcedGetter(result)
result, detectSubdir := getDirSubdir(result)
// If we have a subdir from the detection, then prepend it to our
// requested subdir.
if detectSubdir != "" {
if subDir != "" {
subDir = filepath.Join(detectSubdir, subDir)
} else {
subDir = detectSubdir
}
}
if subDir != "" {
u, err := url.Parse(result)
if err != nil {
return "", fmt.Errorf("Error parsing URL: %s", err)
}
u.Path += "//" + subDir
result = u.String()
}
// Preserve the forced getter if it exists. We try to use the
// original set force first, followed by any force set by the
// detector.
if getForce != "" {
result = fmt.Sprintf("%s::%s", getForce, result)
} else if detectForce != "" {
result = fmt.Sprintf("%s::%s", detectForce, result)
}
return result, nil
}
return "", fmt.Errorf("invalid source string: %s", src)
}

View File

@ -1,66 +0,0 @@
package module
import (
"encoding/json"
"fmt"
"net/http"
"net/url"
"strings"
)
// BitBucketDetector implements Detector to detect BitBucket URLs and turn
// them into URLs that the Git or Hg Getter can understand.
type BitBucketDetector struct{}
func (d *BitBucketDetector) Detect(src, _ string) (string, bool, error) {
if len(src) == 0 {
return "", false, nil
}
if strings.HasPrefix(src, "bitbucket.org/") {
return d.detectHTTP(src)
}
return "", false, nil
}
func (d *BitBucketDetector) detectHTTP(src string) (string, bool, error) {
u, err := url.Parse("https://" + src)
if err != nil {
return "", true, fmt.Errorf("error parsing BitBucket URL: %s", err)
}
// We need to get info on this BitBucket repository to determine whether
// it is Git or Hg.
var info struct {
SCM string `json:"scm"`
}
infoUrl := "https://api.bitbucket.org/1.0/repositories" + u.Path
resp, err := http.Get(infoUrl)
if err != nil {
return "", true, fmt.Errorf("error looking up BitBucket URL: %s", err)
}
if resp.StatusCode == 403 {
// A private repo
return "", true, fmt.Errorf(
"shorthand BitBucket URL can't be used for private repos, " +
"please use a full URL")
}
dec := json.NewDecoder(resp.Body)
if err := dec.Decode(&info); err != nil {
return "", true, fmt.Errorf("error looking up BitBucket URL: %s", err)
}
switch info.SCM {
case "git":
if !strings.HasSuffix(u.Path, ".git") {
u.Path += ".git"
}
return "git::" + u.String(), true, nil
case "hg":
return "hg::" + u.String(), true, nil
default:
return "", true, fmt.Errorf("unknown BitBucket SCM type: %s", info.SCM)
}
}

View File

@ -1,67 +0,0 @@
package module
import (
"net/http"
"strings"
"testing"
)
const testBBUrl = "https://bitbucket.org/hashicorp/tf-test-git"
func TestBitBucketDetector(t *testing.T) {
t.Parallel()
if _, err := http.Get(testBBUrl); err != nil {
t.Log("internet may not be working, skipping BB tests")
t.Skip()
}
cases := []struct {
Input string
Output string
}{
// HTTP
{
"bitbucket.org/hashicorp/tf-test-git",
"git::https://bitbucket.org/hashicorp/tf-test-git.git",
},
{
"bitbucket.org/hashicorp/tf-test-git.git",
"git::https://bitbucket.org/hashicorp/tf-test-git.git",
},
{
"bitbucket.org/hashicorp/tf-test-hg",
"hg::https://bitbucket.org/hashicorp/tf-test-hg",
},
}
pwd := "/pwd"
f := new(BitBucketDetector)
for i, tc := range cases {
var err error
for i := 0; i < 3; i++ {
var output string
var ok bool
output, ok, err = f.Detect(tc.Input, pwd)
if err != nil {
if strings.Contains(err.Error(), "invalid character") {
continue
}
t.Fatalf("err: %s", err)
}
if !ok {
t.Fatal("not ok")
}
if output != tc.Output {
t.Fatalf("%d: bad: %#v", i, output)
}
break
}
if i >= 3 {
t.Fatalf("failure from bitbucket: %s", err)
}
}
}

View File

@ -1,60 +0,0 @@
package module
import (
"fmt"
"os"
"path/filepath"
"runtime"
)
// FileDetector implements Detector to detect file paths.
type FileDetector struct{}
func (d *FileDetector) Detect(src, pwd string) (string, bool, error) {
if len(src) == 0 {
return "", false, nil
}
if !filepath.IsAbs(src) {
if pwd == "" {
return "", true, fmt.Errorf(
"relative paths require a module with a pwd")
}
// Stat the pwd to determine if its a symbolic link. If it is,
// then the pwd becomes the original directory. Otherwise,
// `filepath.Join` below does some weird stuff.
//
// We just ignore if the pwd doesn't exist. That error will be
// caught later when we try to use the URL.
if fi, err := os.Lstat(pwd); !os.IsNotExist(err) {
if err != nil {
return "", true, err
}
if fi.Mode()&os.ModeSymlink != 0 {
pwd, err = os.Readlink(pwd)
if err != nil {
return "", true, err
}
}
}
src = filepath.Join(pwd, src)
}
return fmtFileURL(src), true, nil
}
func fmtFileURL(path string) string {
if runtime.GOOS == "windows" {
// Make sure we're using "/" on Windows. URLs are "/"-based.
path = filepath.ToSlash(path)
return fmt.Sprintf("file://%s", path)
}
// Make sure that we don't start with "/" since we add that below.
if path[0] == '/' {
path = path[1:]
}
return fmt.Sprintf("file:///%s", path)
}

View File

@ -1,88 +0,0 @@
package module
import (
"runtime"
"testing"
)
type fileTest struct {
in, pwd, out string
err bool
}
var fileTests = []fileTest{
{"./foo", "/pwd", "file:///pwd/foo", false},
{"./foo?foo=bar", "/pwd", "file:///pwd/foo?foo=bar", false},
{"foo", "/pwd", "file:///pwd/foo", false},
}
var unixFileTests = []fileTest{
{"/foo", "/pwd", "file:///foo", false},
{"/foo?bar=baz", "/pwd", "file:///foo?bar=baz", false},
}
var winFileTests = []fileTest{
{"/foo", "/pwd", "file:///pwd/foo", false},
{`C:\`, `/pwd`, `file://C:/`, false},
{`C:\?bar=baz`, `/pwd`, `file://C:/?bar=baz`, false},
}
func TestFileDetector(t *testing.T) {
if runtime.GOOS == "windows" {
fileTests = append(fileTests, winFileTests...)
} else {
fileTests = append(fileTests, unixFileTests...)
}
f := new(FileDetector)
for i, tc := range fileTests {
out, ok, err := f.Detect(tc.in, tc.pwd)
if err != nil {
t.Fatalf("err: %s", err)
}
if !ok {
t.Fatal("not ok")
}
if out != tc.out {
t.Fatalf("%d: bad: %#v", i, out)
}
}
}
var noPwdFileTests = []fileTest{
{in: "./foo", pwd: "", out: "", err: true},
{in: "foo", pwd: "", out: "", err: true},
}
var noPwdUnixFileTests = []fileTest{
{in: "/foo", pwd: "", out: "file:///foo", err: false},
}
var noPwdWinFileTests = []fileTest{
{in: "/foo", pwd: "", out: "", err: true},
{in: `C:\`, pwd: ``, out: `file://C:/`, err: false},
}
func TestFileDetector_noPwd(t *testing.T) {
if runtime.GOOS == "windows" {
noPwdFileTests = append(noPwdFileTests, noPwdWinFileTests...)
} else {
noPwdFileTests = append(noPwdFileTests, noPwdUnixFileTests...)
}
f := new(FileDetector)
for i, tc := range noPwdFileTests {
out, ok, err := f.Detect(tc.in, tc.pwd)
if err != nil != tc.err {
t.Fatalf("%d: err: %s", i, err)
}
if !ok {
t.Fatal("not ok")
}
if out != tc.out {
t.Fatalf("%d: bad: %#v", i, out)
}
}
}

View File

@ -1,73 +0,0 @@
package module
import (
"fmt"
"net/url"
"strings"
)
// GitHubDetector implements Detector to detect GitHub URLs and turn
// them into URLs that the Git Getter can understand.
type GitHubDetector struct{}
func (d *GitHubDetector) Detect(src, _ string) (string, bool, error) {
if len(src) == 0 {
return "", false, nil
}
if strings.HasPrefix(src, "github.com/") {
return d.detectHTTP(src)
} else if strings.HasPrefix(src, "git@github.com:") {
return d.detectSSH(src)
}
return "", false, nil
}
func (d *GitHubDetector) detectHTTP(src string) (string, bool, error) {
parts := strings.Split(src, "/")
if len(parts) < 3 {
return "", false, fmt.Errorf(
"GitHub URLs should be github.com/username/repo")
}
urlStr := fmt.Sprintf("https://%s", strings.Join(parts[:3], "/"))
url, err := url.Parse(urlStr)
if err != nil {
return "", true, fmt.Errorf("error parsing GitHub URL: %s", err)
}
if !strings.HasSuffix(url.Path, ".git") {
url.Path += ".git"
}
if len(parts) > 3 {
url.Path += "//" + strings.Join(parts[3:], "/")
}
return "git::" + url.String(), true, nil
}
func (d *GitHubDetector) detectSSH(src string) (string, bool, error) {
idx := strings.Index(src, ":")
qidx := strings.Index(src, "?")
if qidx == -1 {
qidx = len(src)
}
var u url.URL
u.Scheme = "ssh"
u.User = url.User("git")
u.Host = "github.com"
u.Path = src[idx+1 : qidx]
if qidx < len(src) {
q, err := url.ParseQuery(src[qidx+1:])
if err != nil {
return "", true, fmt.Errorf("error parsing GitHub SSH URL: %s", err)
}
u.RawQuery = q.Encode()
}
return "git::" + u.String(), true, nil
}

View File

@ -1,55 +0,0 @@
package module
import (
"testing"
)
func TestGitHubDetector(t *testing.T) {
cases := []struct {
Input string
Output string
}{
// HTTP
{"github.com/hashicorp/foo", "git::https://github.com/hashicorp/foo.git"},
{"github.com/hashicorp/foo.git", "git::https://github.com/hashicorp/foo.git"},
{
"github.com/hashicorp/foo/bar",
"git::https://github.com/hashicorp/foo.git//bar",
},
{
"github.com/hashicorp/foo?foo=bar",
"git::https://github.com/hashicorp/foo.git?foo=bar",
},
{
"github.com/hashicorp/foo.git?foo=bar",
"git::https://github.com/hashicorp/foo.git?foo=bar",
},
// SSH
{"git@github.com:hashicorp/foo.git", "git::ssh://git@github.com/hashicorp/foo.git"},
{
"git@github.com:hashicorp/foo.git//bar",
"git::ssh://git@github.com/hashicorp/foo.git//bar",
},
{
"git@github.com:hashicorp/foo.git?foo=bar",
"git::ssh://git@github.com/hashicorp/foo.git?foo=bar",
},
}
pwd := "/pwd"
f := new(GitHubDetector)
for i, tc := range cases {
output, ok, err := f.Detect(tc.Input, pwd)
if err != nil {
t.Fatalf("err: %s", err)
}
if !ok {
t.Fatal("not ok")
}
if output != tc.Output {
t.Fatalf("%d: bad: %#v", i, output)
}
}
}

View File

@ -1,51 +0,0 @@
package module
import (
"testing"
)
func TestDetect(t *testing.T) {
cases := []struct {
Input string
Pwd string
Output string
Err bool
}{
{"./foo", "/foo", "file:///foo/foo", false},
{"git::./foo", "/foo", "git::file:///foo/foo", false},
{
"git::github.com/hashicorp/foo",
"",
"git::https://github.com/hashicorp/foo.git",
false,
},
{
"./foo//bar",
"/foo",
"file:///foo/foo//bar",
false,
},
{
"git::github.com/hashicorp/foo//bar",
"",
"git::https://github.com/hashicorp/foo.git//bar",
false,
},
{
"git::https://github.com/hashicorp/consul.git",
"",
"git::https://github.com/hashicorp/consul.git",
false,
},
}
for i, tc := range cases {
output, err := Detect(tc.Input, tc.Pwd)
if err != nil != tc.Err {
t.Fatalf("%d: bad err: %s", i, err)
}
if output != tc.Output {
t.Fatalf("%d: bad output: %s\nexpected: %s", i, output, tc.Output)
}
}
}

View File

@ -1,65 +0,0 @@
package module
import (
"crypto/md5"
"encoding/hex"
"fmt"
"os"
"path/filepath"
)
// FolderStorage is an implementation of the Storage interface that manages
// modules on the disk.
type FolderStorage struct {
// StorageDir is the directory where the modules will be stored.
StorageDir string
}
// Dir implements Storage.Dir
func (s *FolderStorage) Dir(key string) (d string, e bool, err error) {
d = s.dir(key)
_, err = os.Stat(d)
if err == nil {
// Directory exists
e = true
return
}
if os.IsNotExist(err) {
// Directory doesn't exist
d = ""
e = false
err = nil
return
}
// An error
d = ""
e = false
return
}
// Get implements Storage.Get
func (s *FolderStorage) Get(key string, source string, update bool) error {
dir := s.dir(key)
if !update {
if _, err := os.Stat(dir); err == nil {
// If the directory already exists, then we're done since
// we're not updating.
return nil
} else if !os.IsNotExist(err) {
// If the error we got wasn't a file-not-exist error, then
// something went wrong and we should report it.
return fmt.Errorf("Error reading module directory: %s", err)
}
}
// Get the source. This always forces an update.
return Get(dir, source)
}
// dir returns the directory name internally that we'll use to map to
// internally.
func (s *FolderStorage) dir(key string) string {
sum := md5.Sum([]byte(key))
return filepath.Join(s.StorageDir, hex.EncodeToString(sum[:]))
}

View File

@ -1,48 +0,0 @@
package module
import (
"os"
"path/filepath"
"testing"
)
func TestFolderStorage_impl(t *testing.T) {
var _ Storage = new(FolderStorage)
}
func TestFolderStorage(t *testing.T) {
s := &FolderStorage{StorageDir: tempDir(t)}
module := testModule("basic")
// A module shouldn't exist at first...
_, ok, err := s.Dir(module)
if err != nil {
t.Fatalf("err: %s", err)
}
if ok {
t.Fatal("should not exist")
}
key := "foo"
// We can get it
err = s.Get(key, module, false)
if err != nil {
t.Fatalf("err: %s", err)
}
// Now the module exists
dir, ok, err := s.Dir(key)
if err != nil {
t.Fatalf("err: %s", err)
}
if !ok {
t.Fatal("should exist")
}
mainPath := filepath.Join(dir, "main.tf")
if _, err := os.Stat(mainPath); err != nil {
t.Fatalf("err: %s", err)
}
}

View File

@ -1,113 +1,30 @@
package module package module
import ( import (
"bytes"
"fmt"
"io/ioutil" "io/ioutil"
"net/url"
"os" "os"
"os/exec"
"path/filepath"
"regexp"
"strings"
"syscall"
urlhelper "github.com/hashicorp/terraform/helper/url" "github.com/hashicorp/go-getter"
) )
// Getter defines the interface that schemes must implement to download // GetMode is an enum that describes how modules are loaded.
// and update modules.
type Getter interface {
// Get downloads the given URL into the given directory. This always
// assumes that we're updating and gets the latest version that it can.
//
// The directory may already exist (if we're updating). If it is in a
// format that isn't understood, an error should be returned. Get shouldn't
// simply nuke the directory.
Get(string, *url.URL) error
}
// Getters is the mapping of scheme to the Getter implementation that will
// be used to get a dependency.
var Getters map[string]Getter
// forcedRegexp is the regular expression that finds forced getters. This
// syntax is schema::url, example: git::https://foo.com
var forcedRegexp = regexp.MustCompile(`^([A-Za-z]+)::(.+)$`)
func init() {
httpGetter := new(HttpGetter)
Getters = map[string]Getter{
"file": new(FileGetter),
"git": new(GitGetter),
"hg": new(HgGetter),
"http": httpGetter,
"https": httpGetter,
}
}
// Get downloads the module specified by src into the folder specified by
// dst. If dst already exists, Get will attempt to update it.
// //
// src is a URL, whereas dst is always just a file path to a folder. This // GetModeLoad says that modules will not be downloaded or updated, they will
// folder doesn't need to exist. It will be created if it doesn't exist. // only be loaded from the storage.
func Get(dst, src string) error { //
var force string // GetModeGet says that modules can be initially downloaded if they don't
force, src = getForcedGetter(src) // exist, but otherwise to just load from the current version in storage.
//
// GetModeUpdate says that modules should be checked for updates and
// downloaded prior to loading. If there are no updates, we load the version
// from disk, otherwise we download first and then load.
type GetMode byte
// If there is a subdir component, then we download the root separately const (
// and then copy over the proper subdir. GetModeNone GetMode = iota
var realDst string GetModeGet
src, subDir := getDirSubdir(src) GetModeUpdate
if subDir != "" { )
tmpDir, err := ioutil.TempDir("", "tf")
if err != nil {
return err
}
if err := os.RemoveAll(tmpDir); err != nil {
return err
}
defer os.RemoveAll(tmpDir)
realDst = dst
dst = tmpDir
}
u, err := urlhelper.Parse(src)
if err != nil {
return err
}
if force == "" {
force = u.Scheme
}
g, ok := Getters[force]
if !ok {
return fmt.Errorf(
"module download not supported for scheme '%s'", force)
}
err = g.Get(dst, u)
if err != nil {
err = fmt.Errorf("error downloading module '%s': %s", src, err)
return err
}
// If we have a subdir, copy that over
if subDir != "" {
if err := os.RemoveAll(realDst); err != nil {
return err
}
if err := os.MkdirAll(realDst, 0755); err != nil {
return err
}
return copyDir(realDst, filepath.Join(dst, subDir))
}
return nil
}
// GetCopy is the same as Get except that it downloads a copy of the // GetCopy is the same as Get except that it downloads a copy of the
// module represented by source. // module represented by source.
@ -126,7 +43,7 @@ func GetCopy(dst, src string) error {
defer os.RemoveAll(tmpDir) defer os.RemoveAll(tmpDir)
// Get to that temporary dir // Get to that temporary dir
if err := Get(tmpDir, src); err != nil { if err := getter.Get(tmpDir, src); err != nil {
return err return err
} }
@ -139,69 +56,14 @@ func GetCopy(dst, src string) error {
return copyDir(dst, tmpDir) return copyDir(dst, tmpDir)
} }
// getRunCommand is a helper that will run a command and capture the output func getStorage(s getter.Storage, key string, src string, mode GetMode) (string, bool, error) {
// in the case an error happens. // Get the module with the level specified if we were told to.
func getRunCommand(cmd *exec.Cmd) error { if mode > GetModeNone {
var buf bytes.Buffer if err := s.Get(key, src, mode == GetModeUpdate); err != nil {
cmd.Stdout = &buf return "", false, err
cmd.Stderr = &buf
err := cmd.Run()
if err == nil {
return nil
}
if exiterr, ok := err.(*exec.ExitError); ok {
// The program has exited with an exit code != 0
if status, ok := exiterr.Sys().(syscall.WaitStatus); ok {
return fmt.Errorf(
"%s exited with %d: %s",
cmd.Path,
status.ExitStatus(),
buf.String())
} }
} }
return fmt.Errorf("error running %s: %s", cmd.Path, buf.String()) // Get the directory where the module is.
} return s.Dir(key)
// getDirSubdir takes a source and returns a tuple of the URL without
// the subdir and the URL with the subdir.
func getDirSubdir(src string) (string, string) {
// Calcaulate an offset to avoid accidentally marking the scheme
// as the dir.
var offset int
if idx := strings.Index(src, "://"); idx > -1 {
offset = idx + 3
}
// First see if we even have an explicit subdir
idx := strings.Index(src[offset:], "//")
if idx == -1 {
return src, ""
}
idx += offset
subdir := src[idx+2:]
src = src[:idx]
// Next, check if we have query parameters and push them onto the
// URL.
if idx = strings.Index(subdir, "?"); idx > -1 {
query := subdir[idx:]
subdir = subdir[:idx]
src += query
}
return src, subdir
}
// getForcedGetter takes a source and returns the tuple of the forced
// getter and the raw URL (without the force syntax).
func getForcedGetter(src string) (string, string) {
var forced string
if ms := forcedRegexp.FindStringSubmatch(src); ms != nil {
forced = ms[1]
src = ms[2]
}
return forced, src
} }

View File

@ -1,46 +0,0 @@
package module
import (
"fmt"
"net/url"
"os"
"path/filepath"
)
// FileGetter is a Getter implementation that will download a module from
// a file scheme.
type FileGetter struct{}
func (g *FileGetter) Get(dst string, u *url.URL) error {
// The source path must exist and be a directory to be usable.
if fi, err := os.Stat(u.Path); err != nil {
return fmt.Errorf("source path error: %s", err)
} else if !fi.IsDir() {
return fmt.Errorf("source path must be a directory")
}
fi, err := os.Lstat(dst)
if err != nil && !os.IsNotExist(err) {
return err
}
// If the destination already exists, it must be a symlink
if err == nil {
mode := fi.Mode()
if mode&os.ModeSymlink == 0 {
return fmt.Errorf("destination exists and is not a symlink")
}
// Remove the destination
if err := os.Remove(dst); err != nil {
return err
}
}
// Create all the parent directories
if err := os.MkdirAll(filepath.Dir(dst), 0755); err != nil {
return err
}
return os.Symlink(u.Path, dst)
}

View File

@ -1,104 +0,0 @@
package module
import (
"os"
"path/filepath"
"testing"
)
func TestFileGetter_impl(t *testing.T) {
var _ Getter = new(FileGetter)
}
func TestFileGetter(t *testing.T) {
g := new(FileGetter)
dst := tempDir(t)
// With a dir that doesn't exist
if err := g.Get(dst, testModuleURL("basic")); err != nil {
t.Fatalf("err: %s", err)
}
// Verify the destination folder is a symlink
fi, err := os.Lstat(dst)
if err != nil {
t.Fatalf("err: %s", err)
}
if fi.Mode()&os.ModeSymlink == 0 {
t.Fatal("destination is not a symlink")
}
// Verify the main file exists
mainPath := filepath.Join(dst, "main.tf")
if _, err := os.Stat(mainPath); err != nil {
t.Fatalf("err: %s", err)
}
}
func TestFileGetter_sourceFile(t *testing.T) {
g := new(FileGetter)
dst := tempDir(t)
// With a source URL that is a path to a file
u := testModuleURL("basic")
u.Path += "/main.tf"
if err := g.Get(dst, u); err == nil {
t.Fatal("should error")
}
}
func TestFileGetter_sourceNoExist(t *testing.T) {
g := new(FileGetter)
dst := tempDir(t)
// With a source URL that doesn't exist
u := testModuleURL("basic")
u.Path += "/main"
if err := g.Get(dst, u); err == nil {
t.Fatal("should error")
}
}
func TestFileGetter_dir(t *testing.T) {
g := new(FileGetter)
dst := tempDir(t)
if err := os.MkdirAll(dst, 0755); err != nil {
t.Fatalf("err: %s", err)
}
// With a dir that exists that isn't a symlink
if err := g.Get(dst, testModuleURL("basic")); err == nil {
t.Fatal("should error")
}
}
func TestFileGetter_dirSymlink(t *testing.T) {
g := new(FileGetter)
dst := tempDir(t)
dst2 := tempDir(t)
// Make parents
if err := os.MkdirAll(filepath.Dir(dst), 0755); err != nil {
t.Fatalf("err: %s", err)
}
if err := os.MkdirAll(dst2, 0755); err != nil {
t.Fatalf("err: %s", err)
}
// Make a symlink
if err := os.Symlink(dst2, dst); err != nil {
t.Fatalf("err: %s", err)
}
// With a dir that exists that isn't a symlink
if err := g.Get(dst, testModuleURL("basic")); err != nil {
t.Fatalf("err: %s", err)
}
// Verify the main file exists
mainPath := filepath.Join(dst, "main.tf")
if _, err := os.Stat(mainPath); err != nil {
t.Fatalf("err: %s", err)
}
}

View File

@ -1,74 +0,0 @@
package module
import (
"fmt"
"net/url"
"os"
"os/exec"
)
// GitGetter is a Getter implementation that will download a module from
// a git repository.
type GitGetter struct{}
func (g *GitGetter) Get(dst string, u *url.URL) error {
if _, err := exec.LookPath("git"); err != nil {
return fmt.Errorf("git must be available and on the PATH")
}
// Extract some query parameters we use
var ref string
q := u.Query()
if len(q) > 0 {
ref = q.Get("ref")
q.Del("ref")
// Copy the URL
var newU url.URL = *u
u = &newU
u.RawQuery = q.Encode()
}
// First: clone or update the repository
_, err := os.Stat(dst)
if err != nil && !os.IsNotExist(err) {
return err
}
if err == nil {
err = g.update(dst, u)
} else {
err = g.clone(dst, u)
}
if err != nil {
return err
}
// Next: check out the proper tag/branch if it is specified, and checkout
if ref == "" {
return nil
}
return g.checkout(dst, ref)
}
func (g *GitGetter) checkout(dst string, ref string) error {
cmd := exec.Command("git", "checkout", ref)
cmd.Dir = dst
return getRunCommand(cmd)
}
func (g *GitGetter) clone(dst string, u *url.URL) error {
cmd := exec.Command("git", "clone", u.String(), dst)
return getRunCommand(cmd)
}
func (g *GitGetter) update(dst string, u *url.URL) error {
// We have to be on a branch to pull
if err := g.checkout(dst, "master"); err != nil {
return err
}
cmd := exec.Command("git", "pull", "--ff-only")
cmd.Dir = dst
return getRunCommand(cmd)
}

View File

@ -1,143 +0,0 @@
package module
import (
"os"
"os/exec"
"path/filepath"
"testing"
)
var testHasGit bool
func init() {
if _, err := exec.LookPath("git"); err == nil {
testHasGit = true
}
}
func TestGitGetter_impl(t *testing.T) {
var _ Getter = new(GitGetter)
}
func TestGitGetter(t *testing.T) {
if !testHasGit {
t.Log("git not found, skipping")
t.Skip()
}
g := new(GitGetter)
dst := tempDir(t)
// Git doesn't allow nested ".git" directories so we do some hackiness
// here to get around that...
moduleDir := filepath.Join(fixtureDir, "basic-git")
oldName := filepath.Join(moduleDir, "DOTgit")
newName := filepath.Join(moduleDir, ".git")
if err := os.Rename(oldName, newName); err != nil {
t.Fatalf("err: %s", err)
}
defer os.Rename(newName, oldName)
// With a dir that doesn't exist
if err := g.Get(dst, testModuleURL("basic-git")); err != nil {
t.Fatalf("err: %s", err)
}
// Verify the main file exists
mainPath := filepath.Join(dst, "main.tf")
if _, err := os.Stat(mainPath); err != nil {
t.Fatalf("err: %s", err)
}
}
func TestGitGetter_branch(t *testing.T) {
if !testHasGit {
t.Log("git not found, skipping")
t.Skip()
}
g := new(GitGetter)
dst := tempDir(t)
// Git doesn't allow nested ".git" directories so we do some hackiness
// here to get around that...
moduleDir := filepath.Join(fixtureDir, "basic-git")
oldName := filepath.Join(moduleDir, "DOTgit")
newName := filepath.Join(moduleDir, ".git")
if err := os.Rename(oldName, newName); err != nil {
t.Fatalf("err: %s", err)
}
defer os.Rename(newName, oldName)
url := testModuleURL("basic-git")
q := url.Query()
q.Add("ref", "test-branch")
url.RawQuery = q.Encode()
if err := g.Get(dst, url); err != nil {
t.Fatalf("err: %s", err)
}
// Verify the main file exists
mainPath := filepath.Join(dst, "main_branch.tf")
if _, err := os.Stat(mainPath); err != nil {
t.Fatalf("err: %s", err)
}
// Get again should work
if err := g.Get(dst, url); err != nil {
t.Fatalf("err: %s", err)
}
// Verify the main file exists
mainPath = filepath.Join(dst, "main_branch.tf")
if _, err := os.Stat(mainPath); err != nil {
t.Fatalf("err: %s", err)
}
}
func TestGitGetter_tag(t *testing.T) {
if !testHasGit {
t.Log("git not found, skipping")
t.Skip()
}
g := new(GitGetter)
dst := tempDir(t)
// Git doesn't allow nested ".git" directories so we do some hackiness
// here to get around that...
moduleDir := filepath.Join(fixtureDir, "basic-git")
oldName := filepath.Join(moduleDir, "DOTgit")
newName := filepath.Join(moduleDir, ".git")
if err := os.Rename(oldName, newName); err != nil {
t.Fatalf("err: %s", err)
}
defer os.Rename(newName, oldName)
url := testModuleURL("basic-git")
q := url.Query()
q.Add("ref", "v1.0")
url.RawQuery = q.Encode()
if err := g.Get(dst, url); err != nil {
t.Fatalf("err: %s", err)
}
// Verify the main file exists
mainPath := filepath.Join(dst, "main_tag1.tf")
if _, err := os.Stat(mainPath); err != nil {
t.Fatalf("err: %s", err)
}
// Get again should work
if err := g.Get(dst, url); err != nil {
t.Fatalf("err: %s", err)
}
// Verify the main file exists
mainPath = filepath.Join(dst, "main_tag1.tf")
if _, err := os.Stat(mainPath); err != nil {
t.Fatalf("err: %s", err)
}
}

View File

@ -1,89 +0,0 @@
package module
import (
"fmt"
"net/url"
"os"
"os/exec"
"runtime"
urlhelper "github.com/hashicorp/terraform/helper/url"
)
// HgGetter is a Getter implementation that will download a module from
// a Mercurial repository.
type HgGetter struct{}
func (g *HgGetter) Get(dst string, u *url.URL) error {
if _, err := exec.LookPath("hg"); err != nil {
return fmt.Errorf("hg must be available and on the PATH")
}
newURL, err := urlhelper.Parse(u.String())
if err != nil {
return err
}
if fixWindowsDrivePath(newURL) {
// See valid file path form on http://www.selenic.com/hg/help/urls
newURL.Path = fmt.Sprintf("/%s", newURL.Path)
}
// Extract some query parameters we use
var rev string
q := newURL.Query()
if len(q) > 0 {
rev = q.Get("rev")
q.Del("rev")
newURL.RawQuery = q.Encode()
}
_, err = os.Stat(dst)
if err != nil && !os.IsNotExist(err) {
return err
}
if err != nil {
if err := g.clone(dst, newURL); err != nil {
return err
}
}
if err := g.pull(dst, newURL); err != nil {
return err
}
return g.update(dst, newURL, rev)
}
func (g *HgGetter) clone(dst string, u *url.URL) error {
cmd := exec.Command("hg", "clone", "-U", u.String(), dst)
return getRunCommand(cmd)
}
func (g *HgGetter) pull(dst string, u *url.URL) error {
cmd := exec.Command("hg", "pull")
cmd.Dir = dst
return getRunCommand(cmd)
}
func (g *HgGetter) update(dst string, u *url.URL, rev string) error {
args := []string{"update"}
if rev != "" {
args = append(args, rev)
}
cmd := exec.Command("hg", args...)
cmd.Dir = dst
return getRunCommand(cmd)
}
func fixWindowsDrivePath(u *url.URL) bool {
// hg assumes a file:/// prefix for Windows drive letter file paths.
// (e.g. file:///c:/foo/bar)
// If the URL Path does not begin with a '/' character, the resulting URL
// path will have a file:// prefix. (e.g. file://c:/foo/bar)
// See http://www.selenic.com/hg/help/urls and the examples listed in
// http://selenic.com/repo/hg-stable/file/1265a3a71d75/mercurial/util.py#l1936
return runtime.GOOS == "windows" && u.Scheme == "file" &&
len(u.Path) > 1 && u.Path[0] != '/' && u.Path[1] == ':'
}

View File

@ -1,81 +0,0 @@
package module
import (
"os"
"os/exec"
"path/filepath"
"testing"
)
var testHasHg bool
func init() {
if _, err := exec.LookPath("hg"); err == nil {
testHasHg = true
}
}
func TestHgGetter_impl(t *testing.T) {
var _ Getter = new(HgGetter)
}
func TestHgGetter(t *testing.T) {
t.Parallel()
if !testHasHg {
t.Log("hg not found, skipping")
t.Skip()
}
g := new(HgGetter)
dst := tempDir(t)
// With a dir that doesn't exist
if err := g.Get(dst, testModuleURL("basic-hg")); err != nil {
t.Fatalf("err: %s", err)
}
// Verify the main file exists
mainPath := filepath.Join(dst, "main.tf")
if _, err := os.Stat(mainPath); err != nil {
t.Fatalf("err: %s", err)
}
}
func TestHgGetter_branch(t *testing.T) {
t.Parallel()
if !testHasHg {
t.Log("hg not found, skipping")
t.Skip()
}
g := new(HgGetter)
dst := tempDir(t)
url := testModuleURL("basic-hg")
q := url.Query()
q.Add("rev", "test-branch")
url.RawQuery = q.Encode()
if err := g.Get(dst, url); err != nil {
t.Fatalf("err: %s", err)
}
// Verify the main file exists
mainPath := filepath.Join(dst, "main_branch.tf")
if _, err := os.Stat(mainPath); err != nil {
t.Fatalf("err: %s", err)
}
// Get again should work
if err := g.Get(dst, url); err != nil {
t.Fatalf("err: %s", err)
}
// Verify the main file exists
mainPath = filepath.Join(dst, "main_branch.tf")
if _, err := os.Stat(mainPath); err != nil {
t.Fatalf("err: %s", err)
}
}

View File

@ -1,173 +0,0 @@
package module
import (
"encoding/xml"
"fmt"
"io"
"io/ioutil"
"net/http"
"net/url"
"os"
"path/filepath"
"strings"
)
// HttpGetter is a Getter implementation that will download a module from
// an HTTP endpoint. The protocol for downloading a module from an HTTP
// endpoing is as follows:
//
// An HTTP GET request is made to the URL with the additional GET parameter
// "terraform-get=1". This lets you handle that scenario specially if you
// wish. The response must be a 2xx.
//
// First, a header is looked for "X-Terraform-Get" which should contain
// a source URL to download.
//
// If the header is not present, then a meta tag is searched for named
// "terraform-get" and the content should be a source URL.
//
// The source URL, whether from the header or meta tag, must be a fully
// formed URL. The shorthand syntax of "github.com/foo/bar" or relative
// paths are not allowed.
type HttpGetter struct{}
func (g *HttpGetter) Get(dst string, u *url.URL) error {
// Copy the URL so we can modify it
var newU url.URL = *u
u = &newU
// Add terraform-get to the parameter.
q := u.Query()
q.Add("terraform-get", "1")
u.RawQuery = q.Encode()
// Get the URL
resp, err := http.Get(u.String())
if err != nil {
return err
}
defer resp.Body.Close()
if resp.StatusCode < 200 || resp.StatusCode >= 300 {
return fmt.Errorf("bad response code: %d", resp.StatusCode)
}
// Extract the source URL
var source string
if v := resp.Header.Get("X-Terraform-Get"); v != "" {
source = v
} else {
source, err = g.parseMeta(resp.Body)
if err != nil {
return err
}
}
if source == "" {
return fmt.Errorf("no source URL was returned")
}
// If there is a subdir component, then we download the root separately
// into a temporary directory, then copy over the proper subdir.
source, subDir := getDirSubdir(source)
if subDir == "" {
return Get(dst, source)
}
// We have a subdir, time to jump some hoops
return g.getSubdir(dst, source, subDir)
}
// getSubdir downloads the source into the destination, but with
// the proper subdir.
func (g *HttpGetter) getSubdir(dst, source, subDir string) error {
// Create a temporary directory to store the full source
td, err := ioutil.TempDir("", "tf")
if err != nil {
return err
}
defer os.RemoveAll(td)
// Download that into the given directory
if err := Get(td, source); err != nil {
return err
}
// Make sure the subdir path actually exists
sourcePath := filepath.Join(td, subDir)
if _, err := os.Stat(sourcePath); err != nil {
return fmt.Errorf(
"Error downloading %s: %s", source, err)
}
// Copy the subdirectory into our actual destination.
if err := os.RemoveAll(dst); err != nil {
return err
}
// Make the final destination
if err := os.MkdirAll(dst, 0755); err != nil {
return err
}
return copyDir(dst, sourcePath)
}
// parseMeta looks for the first meta tag in the given reader that
// will give us the source URL.
func (g *HttpGetter) parseMeta(r io.Reader) (string, error) {
d := xml.NewDecoder(r)
d.CharsetReader = charsetReader
d.Strict = false
var err error
var t xml.Token
for {
t, err = d.Token()
if err != nil {
if err == io.EOF {
err = nil
}
return "", err
}
if e, ok := t.(xml.StartElement); ok && strings.EqualFold(e.Name.Local, "body") {
return "", nil
}
if e, ok := t.(xml.EndElement); ok && strings.EqualFold(e.Name.Local, "head") {
return "", nil
}
e, ok := t.(xml.StartElement)
if !ok || !strings.EqualFold(e.Name.Local, "meta") {
continue
}
if attrValue(e.Attr, "name") != "terraform-get" {
continue
}
if f := attrValue(e.Attr, "content"); f != "" {
return f, nil
}
}
}
// attrValue returns the attribute value for the case-insensitive key
// `name', or the empty string if nothing is found.
func attrValue(attrs []xml.Attr, name string) string {
for _, a := range attrs {
if strings.EqualFold(a.Name.Local, name) {
return a.Value
}
}
return ""
}
// charsetReader returns a reader for the given charset. Currently
// it only supports UTF-8 and ASCII. Otherwise, it returns a meaningful
// error which is printed by go get, so the user can find why the package
// wasn't downloaded if the encoding is not supported. Note that, in
// order to reduce potential errors, ASCII is treated as UTF-8 (i.e. characters
// greater than 0x7f are not rejected).
func charsetReader(charset string, input io.Reader) (io.Reader, error) {
switch strings.ToLower(charset) {
case "ascii":
return input, nil
default:
return nil, fmt.Errorf("can't decode XML document using charset %q", charset)
}
}

View File

@ -1,155 +0,0 @@
package module
import (
"fmt"
"net"
"net/http"
"net/url"
"os"
"path/filepath"
"testing"
)
func TestHttpGetter_impl(t *testing.T) {
var _ Getter = new(HttpGetter)
}
func TestHttpGetter_header(t *testing.T) {
ln := testHttpServer(t)
defer ln.Close()
g := new(HttpGetter)
dst := tempDir(t)
var u url.URL
u.Scheme = "http"
u.Host = ln.Addr().String()
u.Path = "/header"
// Get it!
if err := g.Get(dst, &u); err != nil {
t.Fatalf("err: %s", err)
}
// Verify the main file exists
mainPath := filepath.Join(dst, "main.tf")
if _, err := os.Stat(mainPath); err != nil {
t.Fatalf("err: %s", err)
}
}
func TestHttpGetter_meta(t *testing.T) {
ln := testHttpServer(t)
defer ln.Close()
g := new(HttpGetter)
dst := tempDir(t)
var u url.URL
u.Scheme = "http"
u.Host = ln.Addr().String()
u.Path = "/meta"
// Get it!
if err := g.Get(dst, &u); err != nil {
t.Fatalf("err: %s", err)
}
// Verify the main file exists
mainPath := filepath.Join(dst, "main.tf")
if _, err := os.Stat(mainPath); err != nil {
t.Fatalf("err: %s", err)
}
}
func TestHttpGetter_metaSubdir(t *testing.T) {
ln := testHttpServer(t)
defer ln.Close()
g := new(HttpGetter)
dst := tempDir(t)
var u url.URL
u.Scheme = "http"
u.Host = ln.Addr().String()
u.Path = "/meta-subdir"
// Get it!
if err := g.Get(dst, &u); err != nil {
t.Fatalf("err: %s", err)
}
// Verify the main file exists
mainPath := filepath.Join(dst, "sub.tf")
if _, err := os.Stat(mainPath); err != nil {
t.Fatalf("err: %s", err)
}
}
func TestHttpGetter_none(t *testing.T) {
ln := testHttpServer(t)
defer ln.Close()
g := new(HttpGetter)
dst := tempDir(t)
var u url.URL
u.Scheme = "http"
u.Host = ln.Addr().String()
u.Path = "/none"
// Get it!
if err := g.Get(dst, &u); err == nil {
t.Fatal("should error")
}
}
func testHttpServer(t *testing.T) net.Listener {
ln, err := net.Listen("tcp", "127.0.0.1:0")
if err != nil {
t.Fatalf("err: %s", err)
}
mux := http.NewServeMux()
mux.HandleFunc("/header", testHttpHandlerHeader)
mux.HandleFunc("/meta", testHttpHandlerMeta)
mux.HandleFunc("/meta-subdir", testHttpHandlerMetaSubdir)
var server http.Server
server.Handler = mux
go server.Serve(ln)
return ln
}
func testHttpHandlerHeader(w http.ResponseWriter, r *http.Request) {
w.Header().Add("X-Terraform-Get", testModuleURL("basic").String())
w.WriteHeader(200)
}
func testHttpHandlerMeta(w http.ResponseWriter, r *http.Request) {
w.Write([]byte(fmt.Sprintf(testHttpMetaStr, testModuleURL("basic").String())))
}
func testHttpHandlerMetaSubdir(w http.ResponseWriter, r *http.Request) {
w.Write([]byte(fmt.Sprintf(testHttpMetaStr, testModuleURL("basic//subdir").String())))
}
func testHttpHandlerNone(w http.ResponseWriter, r *http.Request) {
w.Write([]byte(testHttpNoneStr))
}
const testHttpMetaStr = `
<html>
<head>
<meta name="terraform-get" content="%s">
</head>
</html>
`
const testHttpNoneStr = `
<html>
<head>
</head>
</html>
`

View File

@ -1,128 +0,0 @@
package module
import (
"os"
"path/filepath"
"strings"
"testing"
)
func TestGet_badSchema(t *testing.T) {
dst := tempDir(t)
u := testModule("basic")
u = strings.Replace(u, "file", "nope", -1)
if err := Get(dst, u); err == nil {
t.Fatal("should error")
}
}
func TestGet_file(t *testing.T) {
dst := tempDir(t)
u := testModule("basic")
if err := Get(dst, u); err != nil {
t.Fatalf("err: %s", err)
}
mainPath := filepath.Join(dst, "main.tf")
if _, err := os.Stat(mainPath); err != nil {
t.Fatalf("err: %s", err)
}
}
func TestGet_fileForced(t *testing.T) {
dst := tempDir(t)
u := testModule("basic")
u = "file::" + u
if err := Get(dst, u); err != nil {
t.Fatalf("err: %s", err)
}
mainPath := filepath.Join(dst, "main.tf")
if _, err := os.Stat(mainPath); err != nil {
t.Fatalf("err: %s", err)
}
}
func TestGet_fileSubdir(t *testing.T) {
dst := tempDir(t)
u := testModule("basic//subdir")
if err := Get(dst, u); err != nil {
t.Fatalf("err: %s", err)
}
mainPath := filepath.Join(dst, "sub.tf")
if _, err := os.Stat(mainPath); err != nil {
t.Fatalf("err: %s", err)
}
}
func TestGetCopy_dot(t *testing.T) {
dst := tempDir(t)
u := testModule("basic-dot")
if err := GetCopy(dst, u); err != nil {
t.Fatalf("err: %s", err)
}
mainPath := filepath.Join(dst, "main.tf")
if _, err := os.Stat(mainPath); err != nil {
t.Fatalf("err: %s", err)
}
mainPath = filepath.Join(dst, "foo.tf")
if _, err := os.Stat(mainPath); err == nil {
t.Fatal("should not have foo.tf")
}
}
func TestGetCopy_file(t *testing.T) {
dst := tempDir(t)
u := testModule("basic")
if err := GetCopy(dst, u); err != nil {
t.Fatalf("err: %s", err)
}
mainPath := filepath.Join(dst, "main.tf")
if _, err := os.Stat(mainPath); err != nil {
t.Fatalf("err: %s", err)
}
}
func TestGetDirSubdir(t *testing.T) {
cases := []struct {
Input string
Dir, Sub string
}{
{
"hashicorp.com",
"hashicorp.com", "",
},
{
"hashicorp.com//foo",
"hashicorp.com", "foo",
},
{
"hashicorp.com//foo?bar=baz",
"hashicorp.com?bar=baz", "foo",
},
{
"file://foo//bar",
"file://foo", "bar",
},
}
for i, tc := range cases {
adir, asub := getDirSubdir(tc.Input)
if adir != tc.Dir {
t.Fatalf("%d: bad dir: %#v", i, adir)
}
if asub != tc.Sub {
t.Fatalf("%d: bad sub: %#v", i, asub)
}
}
}

View File

@ -2,13 +2,12 @@ package module
import ( import (
"io/ioutil" "io/ioutil"
"net/url"
"os" "os"
"path/filepath" "path/filepath"
"testing" "testing"
"github.com/hashicorp/go-getter"
"github.com/hashicorp/terraform/config" "github.com/hashicorp/terraform/config"
urlhelper "github.com/hashicorp/terraform/helper/url"
) )
const fixtureDir = "./test-fixtures" const fixtureDir = "./test-fixtures"
@ -34,24 +33,6 @@ func testConfig(t *testing.T, n string) *config.Config {
return c return c
} }
func testModule(n string) string { func testStorage(t *testing.T) getter.Storage {
p := filepath.Join(fixtureDir, n) return &getter.FolderStorage{StorageDir: tempDir(t)}
p, err := filepath.Abs(p)
if err != nil {
panic(err)
}
return fmtFileURL(p)
}
func testModuleURL(n string) *url.URL {
u, err := urlhelper.Parse(testModule(n))
if err != nil {
panic(err)
}
return u
}
func testStorage(t *testing.T) Storage {
return &FolderStorage{StorageDir: tempDir(t)}
} }

View File

@ -1,25 +0,0 @@
package module
// Storage is an interface that knows how to lookup downloaded modules
// as well as download and update modules from their sources into the
// proper location.
type Storage interface {
// Dir returns the directory on local disk where the modulue source
// can be loaded from.
Dir(string) (string, bool, error)
// Get will download and optionally update the given module.
Get(string, string, bool) error
}
func getStorage(s Storage, key string, src string, mode GetMode) (string, bool, error) {
// Get the module with the level specified if we were told to.
if mode > GetModeNone {
if err := s.Get(key, src, mode == GetModeUpdate); err != nil {
return "", false, err
}
}
// Get the directory where the module is.
return s.Dir(key)
}

View File

@ -8,6 +8,7 @@ import (
"strings" "strings"
"sync" "sync"
"github.com/hashicorp/go-getter"
"github.com/hashicorp/terraform/config" "github.com/hashicorp/terraform/config"
) )
@ -27,25 +28,6 @@ type Tree struct {
lock sync.RWMutex lock sync.RWMutex
} }
// GetMode is an enum that describes how modules are loaded.
//
// GetModeLoad says that modules will not be downloaded or updated, they will
// only be loaded from the storage.
//
// GetModeGet says that modules can be initially downloaded if they don't
// exist, but otherwise to just load from the current version in storage.
//
// GetModeUpdate says that modules should be checked for updates and
// downloaded prior to loading. If there are no updates, we load the version
// from disk, otherwise we download first and then load.
type GetMode byte
const (
GetModeNone GetMode = iota
GetModeGet
GetModeUpdate
)
// NewTree returns a new Tree for the given config structure. // NewTree returns a new Tree for the given config structure.
func NewTree(name string, c *config.Config) *Tree { func NewTree(name string, c *config.Config) *Tree {
return &Tree{config: c, name: name} return &Tree{config: c, name: name}
@ -136,7 +118,7 @@ func (t *Tree) Name() string {
// module trees inherently require the configuration to be in a reasonably // module trees inherently require the configuration to be in a reasonably
// sane state: no circular dependencies, proper module sources, etc. A full // sane state: no circular dependencies, proper module sources, etc. A full
// suite of validations can be done by running Validate (after loading). // suite of validations can be done by running Validate (after loading).
func (t *Tree) Load(s Storage, mode GetMode) error { func (t *Tree) Load(s getter.Storage, mode GetMode) error {
t.lock.Lock() t.lock.Lock()
defer t.lock.Unlock() defer t.lock.Unlock()
@ -159,15 +141,15 @@ func (t *Tree) Load(s Storage, mode GetMode) error {
path = append(path, m.Name) path = append(path, m.Name)
// Split out the subdir if we have one // Split out the subdir if we have one
source, subDir := getDirSubdir(m.Source) source, subDir := getter.SourceDirSubdir(m.Source)
source, err := Detect(source, t.config.Dir) source, err := getter.Detect(source, t.config.Dir, getter.Detectors)
if err != nil { if err != nil {
return fmt.Errorf("module %s: %s", m.Name, err) return fmt.Errorf("module %s: %s", m.Name, err)
} }
// Check if the detector introduced something new. // Check if the detector introduced something new.
source, subDir2 := getDirSubdir(source) source, subDir2 := getter.SourceDirSubdir(source)
if subDir2 != "" { if subDir2 != "" {
subDir = filepath.Join(subDir2, subDir) subDir = filepath.Join(subDir2, subDir)
} }

View File

@ -11,8 +11,8 @@ import (
type Graph struct { type Graph struct {
vertices *Set vertices *Set
edges *Set edges *Set
downEdges map[Vertex]*Set downEdges map[interface{}]*Set
upEdges map[Vertex]*Set upEdges map[interface{}]*Set
once sync.Once once sync.Once
} }
@ -110,10 +110,10 @@ func (g *Graph) RemoveEdge(edge Edge) {
g.edges.Delete(edge) g.edges.Delete(edge)
// Delete the up/down edges // Delete the up/down edges
if s, ok := g.downEdges[edge.Source()]; ok { if s, ok := g.downEdges[hashcode(edge.Source())]; ok {
s.Delete(edge.Target()) s.Delete(edge.Target())
} }
if s, ok := g.upEdges[edge.Target()]; ok { if s, ok := g.upEdges[hashcode(edge.Target())]; ok {
s.Delete(edge.Source()) s.Delete(edge.Source())
} }
} }
@ -121,13 +121,13 @@ func (g *Graph) RemoveEdge(edge Edge) {
// DownEdges returns the outward edges from the source Vertex v. // DownEdges returns the outward edges from the source Vertex v.
func (g *Graph) DownEdges(v Vertex) *Set { func (g *Graph) DownEdges(v Vertex) *Set {
g.once.Do(g.init) g.once.Do(g.init)
return g.downEdges[v] return g.downEdges[hashcode(v)]
} }
// UpEdges returns the inward edges to the destination Vertex v. // UpEdges returns the inward edges to the destination Vertex v.
func (g *Graph) UpEdges(v Vertex) *Set { func (g *Graph) UpEdges(v Vertex) *Set {
g.once.Do(g.init) g.once.Do(g.init)
return g.upEdges[v] return g.upEdges[hashcode(v)]
} }
// Connect adds an edge with the given source and target. This is safe to // Connect adds an edge with the given source and target. This is safe to
@ -139,9 +139,11 @@ func (g *Graph) Connect(edge Edge) {
source := edge.Source() source := edge.Source()
target := edge.Target() target := edge.Target()
sourceCode := hashcode(source)
targetCode := hashcode(target)
// Do we have this already? If so, don't add it again. // Do we have this already? If so, don't add it again.
if s, ok := g.downEdges[source]; ok && s.Include(target) { if s, ok := g.downEdges[sourceCode]; ok && s.Include(target) {
return return
} }
@ -149,18 +151,18 @@ func (g *Graph) Connect(edge Edge) {
g.edges.Add(edge) g.edges.Add(edge)
// Add the down edge // Add the down edge
s, ok := g.downEdges[source] s, ok := g.downEdges[sourceCode]
if !ok { if !ok {
s = new(Set) s = new(Set)
g.downEdges[source] = s g.downEdges[sourceCode] = s
} }
s.Add(target) s.Add(target)
// Add the up edge // Add the up edge
s, ok = g.upEdges[target] s, ok = g.upEdges[targetCode]
if !ok { if !ok {
s = new(Set) s = new(Set)
g.upEdges[target] = s g.upEdges[targetCode] = s
} }
s.Add(source) s.Add(source)
} }
@ -184,7 +186,7 @@ func (g *Graph) String() string {
// Write each node in order... // Write each node in order...
for _, name := range names { for _, name := range names {
v := mapping[name] v := mapping[name]
targets := g.downEdges[v] targets := g.downEdges[hashcode(v)]
buf.WriteString(fmt.Sprintf("%s\n", name)) buf.WriteString(fmt.Sprintf("%s\n", name))
@ -207,8 +209,8 @@ func (g *Graph) String() string {
func (g *Graph) init() { func (g *Graph) init() {
g.vertices = new(Set) g.vertices = new(Set)
g.edges = new(Set) g.edges = new(Set)
g.downEdges = make(map[Vertex]*Set) g.downEdges = make(map[interface{}]*Set)
g.upEdges = make(map[Vertex]*Set) g.upEdges = make(map[interface{}]*Set)
} }
// VertexName returns the name of a vertex. // VertexName returns the name of a vertex.

View File

@ -1,6 +1,7 @@
package dag package dag
import ( import (
"fmt"
"strings" "strings"
"testing" "testing"
) )
@ -79,6 +80,36 @@ func TestGraph_replaceSelf(t *testing.T) {
} }
} }
// This tests that connecting edges works based on custom Hashcode
// implementations for uniqueness.
func TestGraph_hashcode(t *testing.T) {
var g Graph
g.Add(&hashVertex{code: 1})
g.Add(&hashVertex{code: 2})
g.Add(&hashVertex{code: 3})
g.Connect(BasicEdge(
&hashVertex{code: 1},
&hashVertex{code: 3}))
actual := strings.TrimSpace(g.String())
expected := strings.TrimSpace(testGraphBasicStr)
if actual != expected {
t.Fatalf("bad: %s", actual)
}
}
type hashVertex struct {
code interface{}
}
func (v *hashVertex) Hashcode() interface{} {
return v.code
}
func (v *hashVertex) Name() string {
return fmt.Sprintf("%#v", v.code)
}
const testGraphBasicStr = ` const testGraphBasicStr = `
1 1
3 3

View File

@ -17,22 +17,31 @@ type Hashable interface {
Hashcode() interface{} Hashcode() interface{}
} }
// hashcode returns the hashcode used for set elements.
func hashcode(v interface{}) interface{} {
if h, ok := v.(Hashable); ok {
return h.Hashcode()
}
return v
}
// Add adds an item to the set // Add adds an item to the set
func (s *Set) Add(v interface{}) { func (s *Set) Add(v interface{}) {
s.once.Do(s.init) s.once.Do(s.init)
s.m[s.code(v)] = v s.m[hashcode(v)] = v
} }
// Delete removes an item from the set. // Delete removes an item from the set.
func (s *Set) Delete(v interface{}) { func (s *Set) Delete(v interface{}) {
s.once.Do(s.init) s.once.Do(s.init)
delete(s.m, s.code(v)) delete(s.m, hashcode(v))
} }
// Include returns true/false of whether a value is in the set. // Include returns true/false of whether a value is in the set.
func (s *Set) Include(v interface{}) bool { func (s *Set) Include(v interface{}) bool {
s.once.Do(s.init) s.once.Do(s.init)
_, ok := s.m[s.code(v)] _, ok := s.m[hashcode(v)]
return ok return ok
} }
@ -73,14 +82,6 @@ func (s *Set) List() []interface{} {
return r return r
} }
func (s *Set) code(v interface{}) interface{} {
if h, ok := v.(Hashable); ok {
return h.Hashcode()
}
return v
}
func (s *Set) init() { func (s *Set) init() {
s.m = make(map[interface{}]interface{}) s.m = make(map[interface{}]interface{})
} }

440
deps/v0-6-4.json vendored Normal file
View File

@ -0,0 +1,440 @@
{
"ImportPath": "github.com/hashicorp/terraform",
"GoVersion": "go1.4.2",
"Packages": [
"./..."
],
"Deps": [
{
"ImportPath": "github.com/Azure/azure-sdk-for-go/core/http",
"Comment": "v1.2-261-g3dcabb6",
"Rev": "3dcabb61c225af4013db7af20d4fe430fd09e311"
},
{
"ImportPath": "github.com/Azure/azure-sdk-for-go/core/tls",
"Comment": "v1.2-261-g3dcabb6",
"Rev": "3dcabb61c225af4013db7af20d4fe430fd09e311"
},
{
"ImportPath": "github.com/Azure/azure-sdk-for-go/management",
"Comment": "v1.2-261-g3dcabb6",
"Rev": "3dcabb61c225af4013db7af20d4fe430fd09e311"
},
{
"ImportPath": "github.com/Azure/azure-sdk-for-go/storage",
"Comment": "v1.2-261-g3dcabb6",
"Rev": "3dcabb61c225af4013db7af20d4fe430fd09e311"
},
{
"ImportPath": "github.com/apparentlymart/go-rundeck-api/rundeck",
"Comment": "v0.0.1",
"Rev": "cddcfbabbe903e9c8df35ff9569dbb8d67789200"
},
{
"ImportPath": "github.com/armon/circbuf",
"Rev": "bbbad097214e2918d8543d5201d12bfd7bca254d"
},
{
"ImportPath": "github.com/aws/aws-sdk-go/aws",
"Comment": "v0.9.14-3-g308eaa6",
"Rev": "308eaa65c0ddf03c701d511b7d73b3f3620452a1"
},
{
"ImportPath": "github.com/aws/aws-sdk-go/internal/endpoints",
"Comment": "v0.9.14-3-g308eaa6",
"Rev": "308eaa65c0ddf03c701d511b7d73b3f3620452a1"
},
{
"ImportPath": "github.com/aws/aws-sdk-go/internal/protocol/ec2query",
"Comment": "v0.9.14-3-g308eaa6",
"Rev": "308eaa65c0ddf03c701d511b7d73b3f3620452a1"
},
{
"ImportPath": "github.com/aws/aws-sdk-go/internal/protocol/json/jsonutil",
"Comment": "v0.9.14-3-g308eaa6",
"Rev": "308eaa65c0ddf03c701d511b7d73b3f3620452a1"
},
{
"ImportPath": "github.com/aws/aws-sdk-go/internal/protocol/jsonrpc",
"Comment": "v0.9.14-3-g308eaa6",
"Rev": "308eaa65c0ddf03c701d511b7d73b3f3620452a1"
},
{
"ImportPath": "github.com/aws/aws-sdk-go/internal/protocol/query",
"Comment": "v0.9.14-3-g308eaa6",
"Rev": "308eaa65c0ddf03c701d511b7d73b3f3620452a1"
},
{
"ImportPath": "github.com/aws/aws-sdk-go/internal/protocol/rest",
"Comment": "v0.9.14-3-g308eaa6",
"Rev": "308eaa65c0ddf03c701d511b7d73b3f3620452a1"
},
{
"ImportPath": "github.com/aws/aws-sdk-go/internal/protocol/restjson",
"Comment": "v0.9.14-3-g308eaa6",
"Rev": "308eaa65c0ddf03c701d511b7d73b3f3620452a1"
},
{
"ImportPath": "github.com/aws/aws-sdk-go/internal/protocol/restxml",
"Comment": "v0.9.14-3-g308eaa6",
"Rev": "308eaa65c0ddf03c701d511b7d73b3f3620452a1"
},
{
"ImportPath": "github.com/aws/aws-sdk-go/internal/protocol/xml/xmlutil",
"Comment": "v0.9.14-3-g308eaa6",
"Rev": "308eaa65c0ddf03c701d511b7d73b3f3620452a1"
},
{
"ImportPath": "github.com/aws/aws-sdk-go/internal/signer/v4",
"Comment": "v0.9.14-3-g308eaa6",
"Rev": "308eaa65c0ddf03c701d511b7d73b3f3620452a1"
},
{
"ImportPath": "github.com/aws/aws-sdk-go/service/autoscaling",
"Comment": "v0.9.14-3-g308eaa6",
"Rev": "308eaa65c0ddf03c701d511b7d73b3f3620452a1"
},
{
"ImportPath": "github.com/aws/aws-sdk-go/service/cloudwatch",
"Comment": "v0.9.14-3-g308eaa6",
"Rev": "308eaa65c0ddf03c701d511b7d73b3f3620452a1"
},
{
"ImportPath": "github.com/aws/aws-sdk-go/service/cloudwatchlogs",
"Comment": "v0.9.14-3-g308eaa6",
"Rev": "308eaa65c0ddf03c701d511b7d73b3f3620452a1"
},
{
"ImportPath": "github.com/aws/aws-sdk-go/service/directoryservice",
"Comment": "v0.9.14-3-g308eaa6",
"Rev": "308eaa65c0ddf03c701d511b7d73b3f3620452a1"
},
{
"ImportPath": "github.com/aws/aws-sdk-go/service/dynamodb",
"Comment": "v0.9.14-3-g308eaa6",
"Rev": "308eaa65c0ddf03c701d511b7d73b3f3620452a1"
},
{
"ImportPath": "github.com/aws/aws-sdk-go/service/ec2",
"Comment": "v0.9.14-3-g308eaa6",
"Rev": "308eaa65c0ddf03c701d511b7d73b3f3620452a1"
},
{
"ImportPath": "github.com/aws/aws-sdk-go/service/ecs",
"Comment": "v0.9.14-3-g308eaa6",
"Rev": "308eaa65c0ddf03c701d511b7d73b3f3620452a1"
},
{
"ImportPath": "github.com/aws/aws-sdk-go/service/efs",
"Comment": "v0.9.14-3-g308eaa6",
"Rev": "308eaa65c0ddf03c701d511b7d73b3f3620452a1"
},
{
"ImportPath": "github.com/aws/aws-sdk-go/service/elasticache",
"Comment": "v0.9.14-3-g308eaa6",
"Rev": "308eaa65c0ddf03c701d511b7d73b3f3620452a1"
},
{
"ImportPath": "github.com/aws/aws-sdk-go/service/elasticsearchservice",
"Comment": "v0.9.14-3-g308eaa6",
"Rev": "308eaa65c0ddf03c701d511b7d73b3f3620452a1"
},
{
"ImportPath": "github.com/aws/aws-sdk-go/service/elb",
"Comment": "v0.9.14-3-g308eaa6",
"Rev": "308eaa65c0ddf03c701d511b7d73b3f3620452a1"
},
{
"ImportPath": "github.com/aws/aws-sdk-go/service/glacier",
"Comment": "v0.9.14-3-g308eaa6",
"Rev": "308eaa65c0ddf03c701d511b7d73b3f3620452a1"
},
{
"ImportPath": "github.com/aws/aws-sdk-go/service/iam",
"Comment": "v0.9.14-3-g308eaa6",
"Rev": "308eaa65c0ddf03c701d511b7d73b3f3620452a1"
},
{
"ImportPath": "github.com/aws/aws-sdk-go/service/kinesis",
"Comment": "v0.9.14-3-g308eaa6",
"Rev": "308eaa65c0ddf03c701d511b7d73b3f3620452a1"
},
{
"ImportPath": "github.com/aws/aws-sdk-go/service/lambda",
"Comment": "v0.9.14-3-g308eaa6",
"Rev": "308eaa65c0ddf03c701d511b7d73b3f3620452a1"
},
{
"ImportPath": "github.com/aws/aws-sdk-go/service/opsworks",
"Comment": "v0.9.14-3-g308eaa6",
"Rev": "308eaa65c0ddf03c701d511b7d73b3f3620452a1"
},
{
"ImportPath": "github.com/aws/aws-sdk-go/service/rds",
"Comment": "v0.9.14-3-g308eaa6",
"Rev": "308eaa65c0ddf03c701d511b7d73b3f3620452a1"
},
{
"ImportPath": "github.com/aws/aws-sdk-go/service/route53",
"Comment": "v0.9.14-3-g308eaa6",
"Rev": "308eaa65c0ddf03c701d511b7d73b3f3620452a1"
},
{
"ImportPath": "github.com/aws/aws-sdk-go/service/s3",
"Comment": "v0.9.14-3-g308eaa6",
"Rev": "308eaa65c0ddf03c701d511b7d73b3f3620452a1"
},
{
"ImportPath": "github.com/aws/aws-sdk-go/service/sns",
"Comment": "v0.9.14-3-g308eaa6",
"Rev": "308eaa65c0ddf03c701d511b7d73b3f3620452a1"
},
{
"ImportPath": "github.com/aws/aws-sdk-go/service/sqs",
"Comment": "v0.9.14-3-g308eaa6",
"Rev": "308eaa65c0ddf03c701d511b7d73b3f3620452a1"
},
{
"ImportPath": "github.com/awslabs/aws-sdk-go/aws",
"Comment": "v0.9.14-3-g308eaa6",
"Rev": "308eaa65c0ddf03c701d511b7d73b3f3620452a1"
},
{
"ImportPath": "github.com/cyberdelia/heroku-go/v3",
"Rev": "8344c6a3e281a99a693f5b71186249a8620eeb6b"
},
{
"ImportPath": "github.com/dylanmei/iso8601",
"Rev": "2075bf119b58e5576c6ed9f867b8f3d17f2e54d4"
},
{
"ImportPath": "github.com/dylanmei/winrmtest",
"Rev": "3e9661c52c45dab9a8528966a23d421922fca9b9"
},
{
"ImportPath": "github.com/fsouza/go-dockerclient",
"Rev": "09604abc82243886001c3f56fd709d4ba603cead"
},
{
"ImportPath": "github.com/hashicorp/atlas-go/archive",
"Comment": "20141209094003-77-g85a782d",
"Rev": "85a782d724b87fcd19db1c4aef9d5337a9bb7a0f"
},
{
"ImportPath": "github.com/hashicorp/atlas-go/v1",
"Comment": "20141209094003-77-g85a782d",
"Rev": "85a782d724b87fcd19db1c4aef9d5337a9bb7a0f"
},
{
"ImportPath": "github.com/hashicorp/consul/api",
"Comment": "v0.5.2-325-g5d9530d",
"Rev": "5d9530d7def3be989ba141382f1b9d82583418f4"
},
{
"ImportPath": "github.com/hashicorp/errwrap",
"Rev": "7554cd9344cec97297fa6649b055a8c98c2a1e55"
},
{
"ImportPath": "github.com/hashicorp/go-checkpoint",
"Rev": "528ab62f37fa83d4360e8ab2b2c425d6692ef533"
},
{
"ImportPath": "github.com/hashicorp/go-multierror",
"Rev": "d30f09973e19c1dfcd120b2d9c4f168e68d6b5d5"
},
{
"ImportPath": "github.com/hashicorp/go-version",
"Rev": "2b9865f60ce11e527bd1255ba82036d465570aa3"
},
{
"ImportPath": "github.com/hashicorp/hcl",
"Rev": "4de51957ef8d4aba6e285ddfc587633bbfc7c0e8"
},
{
"ImportPath": "github.com/hashicorp/logutils",
"Rev": "0dc08b1671f34c4250ce212759ebd880f743d883"
},
{
"ImportPath": "github.com/hashicorp/yamux",
"Rev": "ddcd0a6ec7c55e29f235e27935bf98d302281bd3"
},
{
"ImportPath": "github.com/imdario/mergo",
"Comment": "0.2.0-5-g61a5285",
"Rev": "61a52852277811e93e06d28e0d0c396284a7730b"
},
{
"ImportPath": "github.com/masterzen/simplexml/dom",
"Rev": "95ba30457eb1121fa27753627c774c7cd4e90083"
},
{
"ImportPath": "github.com/masterzen/winrm/soap",
"Rev": "b280be362a0c6af26fbaaa055924fb9c4830b006"
},
{
"ImportPath": "github.com/masterzen/winrm/winrm",
"Rev": "b280be362a0c6af26fbaaa055924fb9c4830b006"
},
{
"ImportPath": "github.com/masterzen/xmlpath",
"Rev": "13f4951698adc0fa9c1dda3e275d489a24201161"
},
{
"ImportPath": "github.com/mitchellh/cli",
"Rev": "8102d0ed5ea2709ade1243798785888175f6e415"
},
{
"ImportPath": "github.com/mitchellh/colorstring",
"Rev": "8631ce90f28644f54aeedcb3e389a85174e067d1"
},
{
"ImportPath": "github.com/mitchellh/copystructure",
"Rev": "6fc66267e9da7d155a9d3bd489e00dad02666dc6"
},
{
"ImportPath": "github.com/mitchellh/go-homedir",
"Rev": "df55a15e5ce646808815381b3db47a8c66ea62f4"
},
{
"ImportPath": "github.com/mitchellh/go-linereader",
"Rev": "07bab5fdd9580500aea6ada0e09df4aa28e68abd"
},
{
"ImportPath": "github.com/mitchellh/mapstructure",
"Rev": "281073eb9eb092240d33ef253c404f1cca550309"
},
{
"ImportPath": "github.com/mitchellh/osext",
"Rev": "0dd3f918b21bec95ace9dc86c7e70266cfc5c702"
},
{
"ImportPath": "github.com/mitchellh/packer/common/uuid",
"Comment": "v0.8.6-76-g88386bc",
"Rev": "88386bc9db1c850306e5c3737f14bef3a2c4050d"
},
{
"ImportPath": "github.com/mitchellh/panicwrap",
"Rev": "1655d88c8ff7495ae9d2c19fd8f445f4657e22b0"
},
{
"ImportPath": "github.com/mitchellh/prefixedio",
"Rev": "89d9b535996bf0a185f85b59578f2e245f9e1724"
},
{
"ImportPath": "github.com/mitchellh/reflectwalk",
"Rev": "eecf4c70c626c7cfbb95c90195bc34d386c74ac6"
},
{
"ImportPath": "github.com/nu7hatch/gouuid",
"Rev": "179d4d0c4d8d407a32af483c2354df1d2c91e6c3"
},
{
"ImportPath": "github.com/packer-community/winrmcp/winrmcp",
"Rev": "743b1afe5ee3f6d5ba71a0d50673fa0ba2123d6b"
},
{
"ImportPath": "github.com/packethost/packngo",
"Rev": "496f5c8895c06505fae527830a9e554dc65325f4"
},
{
"ImportPath": "github.com/pborman/uuid",
"Rev": "cccd189d45f7ac3368a0d127efb7f4d08ae0b655"
},
{
"ImportPath": "github.com/pearkes/cloudflare",
"Rev": "19e280b056f3742e535ea12ae92a37ea7767ea82"
},
{
"ImportPath": "github.com/pearkes/digitalocean",
"Rev": "e966f00c2d9de5743e87697ab77c7278f5998914"
},
{
"ImportPath": "github.com/pearkes/dnsimple",
"Rev": "2a807d118c9e52e94819f414a6ec0293b45cad01"
},
{
"ImportPath": "github.com/pearkes/mailgun",
"Rev": "5b02e7e9ffee9869f81393e80db138f6ff726260"
},
{
"ImportPath": "github.com/rackspace/gophercloud",
"Comment": "v1.0.0-681-g8d032cb",
"Rev": "8d032cb1e835a0018269de3d6b53bb24fc77a8c0"
},
{
"ImportPath": "github.com/satori/go.uuid",
"Rev": "08f0718b61e95ddba0ade3346725fe0e4bf28ca6"
},
{
"ImportPath": "github.com/soniah/dnsmadeeasy",
"Comment": "v1.1-2-g5578a8c",
"Rev": "5578a8c15e33958c61cf7db720b6181af65f4a9e"
},
{
"ImportPath": "github.com/vaughan0/go-ini",
"Rev": "a98ad7ee00ec53921f08832bc06ecf7fd600e6a1"
},
{
"ImportPath": "github.com/vmware/govmomi",
"Comment": "v0.2.0-28-g6037863",
"Rev": "603786323c18c13dd8b3da3d4f86b1dce4adf126"
},
{
"ImportPath": "github.com/xanzy/go-cloudstack/cloudstack",
"Comment": "v1.2.0-48-g0e6e56f",
"Rev": "0e6e56fc0db3f48f060273f2e2ffe5d8d41b0112"
},
{
"ImportPath": "golang.org/x/crypto/curve25519",
"Rev": "c8b9e6388ef638d5a8a9d865c634befdc46a6784"
},
{
"ImportPath": "golang.org/x/crypto/pkcs12",
"Rev": "c8b9e6388ef638d5a8a9d865c634befdc46a6784"
},
{
"ImportPath": "golang.org/x/crypto/ssh",
"Rev": "c8b9e6388ef638d5a8a9d865c634befdc46a6784"
},
{
"ImportPath": "golang.org/x/net/context",
"Rev": "21c3935a8fc0f954d03e6b8a560c9600ffee38d2"
},
{
"ImportPath": "golang.org/x/oauth2",
"Rev": "ef4eca6b097fad7cec79afcc278d213a6de1c960"
},
{
"ImportPath": "google.golang.org/api/compute/v1",
"Rev": "e2903ca9e33d6cbaedda541d96996219056e8214"
},
{
"ImportPath": "google.golang.org/api/container/v1",
"Rev": "e2903ca9e33d6cbaedda541d96996219056e8214"
},
{
"ImportPath": "google.golang.org/api/dns/v1",
"Rev": "e2903ca9e33d6cbaedda541d96996219056e8214"
},
{
"ImportPath": "google.golang.org/api/googleapi",
"Rev": "e2903ca9e33d6cbaedda541d96996219056e8214"
},
{
"ImportPath": "google.golang.org/api/internal",
"Rev": "e2903ca9e33d6cbaedda541d96996219056e8214"
},
{
"ImportPath": "google.golang.org/api/storage/v1",
"Rev": "e2903ca9e33d6cbaedda541d96996219056e8214"
},
{
"ImportPath": "google.golang.org/cloud/compute/metadata",
"Rev": "4bea1598a0936d6d116506b59a8e1aa962b585c3"
},
{
"ImportPath": "google.golang.org/cloud/internal",
"Rev": "4bea1598a0936d6d116506b59a8e1aa962b585c3"
}
]
}

476
deps/v0-6-5.json vendored Normal file
View File

@ -0,0 +1,476 @@
{
"ImportPath": "github.com/hashicorp/terraform",
"GoVersion": "go1.4.2",
"Packages": [
"./..."
],
"Deps": [
{
"ImportPath": "github.com/Azure/azure-sdk-for-go/core/http",
"Comment": "v1.2-261-g3dcabb6",
"Rev": "3dcabb61c225af4013db7af20d4fe430fd09e311"
},
{
"ImportPath": "github.com/Azure/azure-sdk-for-go/core/tls",
"Comment": "v1.2-261-g3dcabb6",
"Rev": "3dcabb61c225af4013db7af20d4fe430fd09e311"
},
{
"ImportPath": "github.com/Azure/azure-sdk-for-go/management",
"Comment": "v1.2-261-g3dcabb6",
"Rev": "3dcabb61c225af4013db7af20d4fe430fd09e311"
},
{
"ImportPath": "github.com/Azure/azure-sdk-for-go/storage",
"Comment": "v1.2-261-g3dcabb6",
"Rev": "3dcabb61c225af4013db7af20d4fe430fd09e311"
},
{
"ImportPath": "github.com/apparentlymart/go-rundeck-api/rundeck",
"Comment": "v0.0.1",
"Rev": "cddcfbabbe903e9c8df35ff9569dbb8d67789200"
},
{
"ImportPath": "github.com/armon/circbuf",
"Rev": "bbbad097214e2918d8543d5201d12bfd7bca254d"
},
{
"ImportPath": "github.com/aws/aws-sdk-go/aws",
"Comment": "v0.9.15",
"Rev": "7ab6754ddaaa7972ac1c896ddd7f796cc726e79d"
},
{
"ImportPath": "github.com/aws/aws-sdk-go/internal/endpoints",
"Comment": "v0.9.15",
"Rev": "7ab6754ddaaa7972ac1c896ddd7f796cc726e79d"
},
{
"ImportPath": "github.com/aws/aws-sdk-go/internal/protocol/ec2query",
"Comment": "v0.9.15",
"Rev": "7ab6754ddaaa7972ac1c896ddd7f796cc726e79d"
},
{
"ImportPath": "github.com/aws/aws-sdk-go/internal/protocol/json/jsonutil",
"Comment": "v0.9.15",
"Rev": "7ab6754ddaaa7972ac1c896ddd7f796cc726e79d"
},
{
"ImportPath": "github.com/aws/aws-sdk-go/internal/protocol/jsonrpc",
"Comment": "v0.9.15",
"Rev": "7ab6754ddaaa7972ac1c896ddd7f796cc726e79d"
},
{
"ImportPath": "github.com/aws/aws-sdk-go/internal/protocol/query",
"Comment": "v0.9.15",
"Rev": "7ab6754ddaaa7972ac1c896ddd7f796cc726e79d"
},
{
"ImportPath": "github.com/aws/aws-sdk-go/internal/protocol/rest",
"Comment": "v0.9.15",
"Rev": "7ab6754ddaaa7972ac1c896ddd7f796cc726e79d"
},
{
"ImportPath": "github.com/aws/aws-sdk-go/internal/protocol/restjson",
"Comment": "v0.9.15",
"Rev": "7ab6754ddaaa7972ac1c896ddd7f796cc726e79d"
},
{
"ImportPath": "github.com/aws/aws-sdk-go/internal/protocol/restxml",
"Comment": "v0.9.15",
"Rev": "7ab6754ddaaa7972ac1c896ddd7f796cc726e79d"
},
{
"ImportPath": "github.com/aws/aws-sdk-go/internal/protocol/xml/xmlutil",
"Comment": "v0.9.15",
"Rev": "7ab6754ddaaa7972ac1c896ddd7f796cc726e79d"
},
{
"ImportPath": "github.com/aws/aws-sdk-go/internal/signer/v4",
"Comment": "v0.9.15",
"Rev": "7ab6754ddaaa7972ac1c896ddd7f796cc726e79d"
},
{
"ImportPath": "github.com/aws/aws-sdk-go/service/autoscaling",
"Comment": "v0.9.15",
"Rev": "7ab6754ddaaa7972ac1c896ddd7f796cc726e79d"
},
{
"ImportPath": "github.com/aws/aws-sdk-go/service/cloudwatch",
"Comment": "v0.9.15",
"Rev": "7ab6754ddaaa7972ac1c896ddd7f796cc726e79d"
},
{
"ImportPath": "github.com/aws/aws-sdk-go/service/cloudwatchlogs",
"Comment": "v0.9.15",
"Rev": "7ab6754ddaaa7972ac1c896ddd7f796cc726e79d"
},
{
"ImportPath": "github.com/aws/aws-sdk-go/service/directoryservice",
"Comment": "v0.9.15",
"Rev": "7ab6754ddaaa7972ac1c896ddd7f796cc726e79d"
},
{
"ImportPath": "github.com/aws/aws-sdk-go/service/dynamodb",
"Comment": "v0.9.15",
"Rev": "7ab6754ddaaa7972ac1c896ddd7f796cc726e79d"
},
{
"ImportPath": "github.com/aws/aws-sdk-go/service/ec2",
"Comment": "v0.9.15",
"Rev": "7ab6754ddaaa7972ac1c896ddd7f796cc726e79d"
},
{
"ImportPath": "github.com/aws/aws-sdk-go/service/ecs",
"Comment": "v0.9.15",
"Rev": "7ab6754ddaaa7972ac1c896ddd7f796cc726e79d"
},
{
"ImportPath": "github.com/aws/aws-sdk-go/service/efs",
"Comment": "v0.9.15",
"Rev": "7ab6754ddaaa7972ac1c896ddd7f796cc726e79d"
},
{
"ImportPath": "github.com/aws/aws-sdk-go/service/elasticache",
"Comment": "v0.9.15",
"Rev": "7ab6754ddaaa7972ac1c896ddd7f796cc726e79d"
},
{
"ImportPath": "github.com/aws/aws-sdk-go/service/elasticsearchservice",
"Comment": "v0.9.15",
"Rev": "7ab6754ddaaa7972ac1c896ddd7f796cc726e79d"
},
{
"ImportPath": "github.com/aws/aws-sdk-go/service/elb",
"Comment": "v0.9.15",
"Rev": "7ab6754ddaaa7972ac1c896ddd7f796cc726e79d"
},
{
"ImportPath": "github.com/aws/aws-sdk-go/service/glacier",
"Comment": "v0.9.15",
"Rev": "7ab6754ddaaa7972ac1c896ddd7f796cc726e79d"
},
{
"ImportPath": "github.com/aws/aws-sdk-go/service/iam",
"Comment": "v0.9.15",
"Rev": "7ab6754ddaaa7972ac1c896ddd7f796cc726e79d"
},
{
"ImportPath": "github.com/aws/aws-sdk-go/service/kinesis",
"Comment": "v0.9.15",
"Rev": "7ab6754ddaaa7972ac1c896ddd7f796cc726e79d"
},
{
"ImportPath": "github.com/aws/aws-sdk-go/service/lambda",
"Comment": "v0.9.15",
"Rev": "7ab6754ddaaa7972ac1c896ddd7f796cc726e79d"
},
{
"ImportPath": "github.com/aws/aws-sdk-go/service/opsworks",
"Comment": "v0.9.15",
"Rev": "7ab6754ddaaa7972ac1c896ddd7f796cc726e79d"
},
{
"ImportPath": "github.com/aws/aws-sdk-go/service/rds",
"Comment": "v0.9.15",
"Rev": "7ab6754ddaaa7972ac1c896ddd7f796cc726e79d"
},
{
"ImportPath": "github.com/aws/aws-sdk-go/service/route53",
"Comment": "v0.9.15",
"Rev": "7ab6754ddaaa7972ac1c896ddd7f796cc726e79d"
},
{
"ImportPath": "github.com/aws/aws-sdk-go/service/s3",
"Comment": "v0.9.15",
"Rev": "7ab6754ddaaa7972ac1c896ddd7f796cc726e79d"
},
{
"ImportPath": "github.com/aws/aws-sdk-go/service/sns",
"Comment": "v0.9.15",
"Rev": "7ab6754ddaaa7972ac1c896ddd7f796cc726e79d"
},
{
"ImportPath": "github.com/aws/aws-sdk-go/service/sqs",
"Comment": "v0.9.15",
"Rev": "7ab6754ddaaa7972ac1c896ddd7f796cc726e79d"
},
{
"ImportPath": "github.com/awslabs/aws-sdk-go/aws",
"Comment": "v0.9.15",
"Rev": "7ab6754ddaaa7972ac1c896ddd7f796cc726e79d"
},
{
"ImportPath": "github.com/coreos/etcd/client",
"Comment": "v2.2.0-246-g8d3ed01",
"Rev": "8d3ed0176c41a5585e040368455fe803fa95511b"
},
{
"ImportPath": "github.com/coreos/etcd/pkg/pathutil",
"Comment": "v2.2.0-246-g8d3ed01",
"Rev": "8d3ed0176c41a5585e040368455fe803fa95511b"
},
{
"ImportPath": "github.com/coreos/etcd/pkg/types",
"Comment": "v2.2.0-246-g8d3ed01",
"Rev": "8d3ed0176c41a5585e040368455fe803fa95511b"
},
{
"ImportPath": "github.com/cyberdelia/heroku-go/v3",
"Rev": "8344c6a3e281a99a693f5b71186249a8620eeb6b"
},
{
"ImportPath": "github.com/digitalocean/godo",
"Comment": "v0.9.0-2-gc03bb09",
"Rev": "c03bb099b8dc38e87581902a56885013a0865703"
},
{
"ImportPath": "github.com/dylanmei/iso8601",
"Rev": "2075bf119b58e5576c6ed9f867b8f3d17f2e54d4"
},
{
"ImportPath": "github.com/dylanmei/winrmtest",
"Rev": "3e9661c52c45dab9a8528966a23d421922fca9b9"
},
{
"ImportPath": "github.com/fsouza/go-dockerclient",
"Rev": "412c004d923b7b89701e7a1632de83f843657a03"
},
{
"ImportPath": "github.com/google/go-querystring/query",
"Rev": "547ef5ac979778feb2f760cdb5f4eae1a2207b86"
},
{
"ImportPath": "github.com/hashicorp/atlas-go/archive",
"Comment": "20141209094003-79-gabffe75",
"Rev": "abffe75c7dff7f6c3344727348a95fe70c519696"
},
{
"ImportPath": "github.com/hashicorp/atlas-go/v1",
"Comment": "20141209094003-79-gabffe75",
"Rev": "abffe75c7dff7f6c3344727348a95fe70c519696"
},
{
"ImportPath": "github.com/hashicorp/consul/api",
"Comment": "v0.5.2-461-g158eabd",
"Rev": "158eabdd6f2408067c1d7656fa10e49434f96480"
},
{
"ImportPath": "github.com/hashicorp/errwrap",
"Rev": "7554cd9344cec97297fa6649b055a8c98c2a1e55"
},
{
"ImportPath": "github.com/hashicorp/go-checkpoint",
"Rev": "ee53b27929ebf0a6d217c96d2107c6c09b8bebb3"
},
{
"ImportPath": "github.com/hashicorp/go-getter",
"Rev": "2463fe5ef95a59a4096482fb9390b5683a5c380a"
},
{
"ImportPath": "github.com/hashicorp/go-multierror",
"Rev": "d30f09973e19c1dfcd120b2d9c4f168e68d6b5d5"
},
{
"ImportPath": "github.com/hashicorp/go-version",
"Rev": "2b9865f60ce11e527bd1255ba82036d465570aa3"
},
{
"ImportPath": "github.com/hashicorp/hcl",
"Rev": "4de51957ef8d4aba6e285ddfc587633bbfc7c0e8"
},
{
"ImportPath": "github.com/hashicorp/logutils",
"Rev": "0dc08b1671f34c4250ce212759ebd880f743d883"
},
{
"ImportPath": "github.com/hashicorp/yamux",
"Rev": "ddcd0a6ec7c55e29f235e27935bf98d302281bd3"
},
{
"ImportPath": "github.com/imdario/mergo",
"Comment": "0.2.0-5-g61a5285",
"Rev": "61a52852277811e93e06d28e0d0c396284a7730b"
},
{
"ImportPath": "github.com/kardianos/osext",
"Rev": "6e7f843663477789fac7c02def0d0909e969b4e5"
},
{
"ImportPath": "github.com/masterzen/simplexml/dom",
"Rev": "95ba30457eb1121fa27753627c774c7cd4e90083"
},
{
"ImportPath": "github.com/masterzen/winrm/soap",
"Rev": "e3e57d617b7d9573db6c98567a261916ff53cfb3"
},
{
"ImportPath": "github.com/masterzen/winrm/winrm",
"Rev": "e3e57d617b7d9573db6c98567a261916ff53cfb3"
},
{
"ImportPath": "github.com/masterzen/xmlpath",
"Rev": "13f4951698adc0fa9c1dda3e275d489a24201161"
},
{
"ImportPath": "github.com/mitchellh/cli",
"Rev": "8102d0ed5ea2709ade1243798785888175f6e415"
},
{
"ImportPath": "github.com/mitchellh/colorstring",
"Rev": "8631ce90f28644f54aeedcb3e389a85174e067d1"
},
{
"ImportPath": "github.com/mitchellh/copystructure",
"Rev": "6fc66267e9da7d155a9d3bd489e00dad02666dc6"
},
{
"ImportPath": "github.com/mitchellh/go-homedir",
"Rev": "df55a15e5ce646808815381b3db47a8c66ea62f4"
},
{
"ImportPath": "github.com/mitchellh/go-linereader",
"Rev": "07bab5fdd9580500aea6ada0e09df4aa28e68abd"
},
{
"ImportPath": "github.com/mitchellh/mapstructure",
"Rev": "281073eb9eb092240d33ef253c404f1cca550309"
},
{
"ImportPath": "github.com/mitchellh/osext",
"Rev": "5e2d6d41470f99c881826dedd8c526728b783c9c"
},
{
"ImportPath": "github.com/mitchellh/packer/common/uuid",
"Comment": "v0.8.6-114-gd66268f",
"Rev": "d66268f5f92dc3f785616f9d10f233ece8636e9c"
},
{
"ImportPath": "github.com/mitchellh/panicwrap",
"Rev": "1655d88c8ff7495ae9d2c19fd8f445f4657e22b0"
},
{
"ImportPath": "github.com/mitchellh/prefixedio",
"Rev": "89d9b535996bf0a185f85b59578f2e245f9e1724"
},
{
"ImportPath": "github.com/mitchellh/reflectwalk",
"Rev": "eecf4c70c626c7cfbb95c90195bc34d386c74ac6"
},
{
"ImportPath": "github.com/nu7hatch/gouuid",
"Rev": "179d4d0c4d8d407a32af483c2354df1d2c91e6c3"
},
{
"ImportPath": "github.com/packer-community/winrmcp/winrmcp",
"Rev": "3d184cea22ee1c41ec1697e0d830ff0c78f7ea97"
},
{
"ImportPath": "github.com/packethost/packngo",
"Rev": "f03d7dc788a8b57b62d301ccb98c950c325756f8"
},
{
"ImportPath": "github.com/pborman/uuid",
"Rev": "cccd189d45f7ac3368a0d127efb7f4d08ae0b655"
},
{
"ImportPath": "github.com/pearkes/cloudflare",
"Rev": "922f1c75017c54430fb706364d29eff10f64c56d"
},
{
"ImportPath": "github.com/pearkes/dnsimple",
"Rev": "59fa6243d3d5ac56ab0df76be4c6da30821154b0"
},
{
"ImportPath": "github.com/pearkes/mailgun",
"Rev": "5b02e7e9ffee9869f81393e80db138f6ff726260"
},
{
"ImportPath": "github.com/rackspace/gophercloud",
"Comment": "v1.0.0-683-gdc139e8",
"Rev": "dc139e8a4612310304c1c71aa2b07d94ab7bdeaf"
},
{
"ImportPath": "github.com/satori/go.uuid",
"Rev": "08f0718b61e95ddba0ade3346725fe0e4bf28ca6"
},
{
"ImportPath": "github.com/soniah/dnsmadeeasy",
"Comment": "v1.1-2-g5578a8c",
"Rev": "5578a8c15e33958c61cf7db720b6181af65f4a9e"
},
{
"ImportPath": "github.com/tent/http-link-go",
"Rev": "ac974c61c2f990f4115b119354b5e0b47550e888"
},
{
"ImportPath": "github.com/ugorji/go/codec",
"Rev": "8a2a3a8c488c3ebd98f422a965260278267a0551"
},
{
"ImportPath": "github.com/vaughan0/go-ini",
"Rev": "a98ad7ee00ec53921f08832bc06ecf7fd600e6a1"
},
{
"ImportPath": "github.com/vmware/govmomi",
"Comment": "v0.2.0-32-gc33a28e",
"Rev": "c33a28ed780856865047dda04412c67f2d55de8e"
},
{
"ImportPath": "github.com/xanzy/go-cloudstack/cloudstack",
"Comment": "v1.2.0-48-g0e6e56f",
"Rev": "0e6e56fc0db3f48f060273f2e2ffe5d8d41b0112"
},
{
"ImportPath": "golang.org/x/crypto/curve25519",
"Rev": "c8b9e6388ef638d5a8a9d865c634befdc46a6784"
},
{
"ImportPath": "golang.org/x/crypto/pkcs12",
"Rev": "c8b9e6388ef638d5a8a9d865c634befdc46a6784"
},
{
"ImportPath": "golang.org/x/crypto/ssh",
"Rev": "c8b9e6388ef638d5a8a9d865c634befdc46a6784"
},
{
"ImportPath": "golang.org/x/net/context",
"Rev": "9946ad7d5eae91d8edca4f54d1a1e130a052e823"
},
{
"ImportPath": "golang.org/x/oauth2",
"Rev": "ef4eca6b097fad7cec79afcc278d213a6de1c960"
},
{
"ImportPath": "google.golang.org/api/compute/v1",
"Rev": "c83ee8e9b7e6c40a486c0992a963ea8b6911de67"
},
{
"ImportPath": "google.golang.org/api/container/v1",
"Rev": "c83ee8e9b7e6c40a486c0992a963ea8b6911de67"
},
{
"ImportPath": "google.golang.org/api/dns/v1",
"Rev": "c83ee8e9b7e6c40a486c0992a963ea8b6911de67"
},
{
"ImportPath": "google.golang.org/api/googleapi",
"Rev": "c83ee8e9b7e6c40a486c0992a963ea8b6911de67"
},
{
"ImportPath": "google.golang.org/api/internal",
"Rev": "c83ee8e9b7e6c40a486c0992a963ea8b6911de67"
},
{
"ImportPath": "google.golang.org/api/storage/v1",
"Rev": "c83ee8e9b7e6c40a486c0992a963ea8b6911de67"
},
{
"ImportPath": "google.golang.org/cloud/compute/metadata",
"Rev": "2400193c85c3561d13880d34e0e10c4315bb02af"
},
{
"ImportPath": "google.golang.org/cloud/internal",
"Rev": "2400193c85c3561d13880d34e0e10c4315bb02af"
}
]
}

489
deps/v0-6-6.json vendored Normal file
View File

@ -0,0 +1,489 @@
{
"ImportPath": "github.com/hashicorp/terraform",
"GoVersion": "go1.4.2",
"Packages": [
"./..."
],
"Deps": [
{
"ImportPath": "github.com/Azure/azure-sdk-for-go/core/http",
"Comment": "v1.2-261-g3dcabb6",
"Rev": "3dcabb61c225af4013db7af20d4fe430fd09e311"
},
{
"ImportPath": "github.com/Azure/azure-sdk-for-go/core/tls",
"Comment": "v1.2-261-g3dcabb6",
"Rev": "3dcabb61c225af4013db7af20d4fe430fd09e311"
},
{
"ImportPath": "github.com/Azure/azure-sdk-for-go/management",
"Comment": "v1.2-261-g3dcabb6",
"Rev": "3dcabb61c225af4013db7af20d4fe430fd09e311"
},
{
"ImportPath": "github.com/Azure/azure-sdk-for-go/storage",
"Comment": "v1.2-261-g3dcabb6",
"Rev": "3dcabb61c225af4013db7af20d4fe430fd09e311"
},
{
"ImportPath": "github.com/apparentlymart/go-cidr/cidr",
"Rev": "a3ebdb999b831ecb6ab8a226e31b07b2b9061c47"
},
{
"ImportPath": "github.com/apparentlymart/go-rundeck-api/rundeck",
"Comment": "v0.0.1",
"Rev": "cddcfbabbe903e9c8df35ff9569dbb8d67789200"
},
{
"ImportPath": "github.com/armon/circbuf",
"Rev": "bbbad097214e2918d8543d5201d12bfd7bca254d"
},
{
"ImportPath": "github.com/aws/aws-sdk-go/aws",
"Comment": "v0.9.16-1-g66c840e",
"Rev": "66c840e9981dd121a4239fc25e33b6c1c1caa781"
},
{
"ImportPath": "github.com/aws/aws-sdk-go/internal/endpoints",
"Comment": "v0.9.16-1-g66c840e",
"Rev": "66c840e9981dd121a4239fc25e33b6c1c1caa781"
},
{
"ImportPath": "github.com/aws/aws-sdk-go/internal/protocol/ec2query",
"Comment": "v0.9.16-1-g66c840e",
"Rev": "66c840e9981dd121a4239fc25e33b6c1c1caa781"
},
{
"ImportPath": "github.com/aws/aws-sdk-go/internal/protocol/json/jsonutil",
"Comment": "v0.9.16-1-g66c840e",
"Rev": "66c840e9981dd121a4239fc25e33b6c1c1caa781"
},
{
"ImportPath": "github.com/aws/aws-sdk-go/internal/protocol/jsonrpc",
"Comment": "v0.9.16-1-g66c840e",
"Rev": "66c840e9981dd121a4239fc25e33b6c1c1caa781"
},
{
"ImportPath": "github.com/aws/aws-sdk-go/internal/protocol/query",
"Comment": "v0.9.16-1-g66c840e",
"Rev": "66c840e9981dd121a4239fc25e33b6c1c1caa781"
},
{
"ImportPath": "github.com/aws/aws-sdk-go/internal/protocol/rest",
"Comment": "v0.9.16-1-g66c840e",
"Rev": "66c840e9981dd121a4239fc25e33b6c1c1caa781"
},
{
"ImportPath": "github.com/aws/aws-sdk-go/internal/protocol/restjson",
"Comment": "v0.9.16-1-g66c840e",
"Rev": "66c840e9981dd121a4239fc25e33b6c1c1caa781"
},
{
"ImportPath": "github.com/aws/aws-sdk-go/internal/protocol/restxml",
"Comment": "v0.9.16-1-g66c840e",
"Rev": "66c840e9981dd121a4239fc25e33b6c1c1caa781"
},
{
"ImportPath": "github.com/aws/aws-sdk-go/internal/protocol/xml/xmlutil",
"Comment": "v0.9.16-1-g66c840e",
"Rev": "66c840e9981dd121a4239fc25e33b6c1c1caa781"
},
{
"ImportPath": "github.com/aws/aws-sdk-go/internal/signer/v4",
"Comment": "v0.9.16-1-g66c840e",
"Rev": "66c840e9981dd121a4239fc25e33b6c1c1caa781"
},
{
"ImportPath": "github.com/aws/aws-sdk-go/service/autoscaling",
"Comment": "v0.9.16-1-g66c840e",
"Rev": "66c840e9981dd121a4239fc25e33b6c1c1caa781"
},
{
"ImportPath": "github.com/aws/aws-sdk-go/service/cloudwatch",
"Comment": "v0.9.16-1-g66c840e",
"Rev": "66c840e9981dd121a4239fc25e33b6c1c1caa781"
},
{
"ImportPath": "github.com/aws/aws-sdk-go/service/cloudwatchlogs",
"Comment": "v0.9.16-1-g66c840e",
"Rev": "66c840e9981dd121a4239fc25e33b6c1c1caa781"
},
{
"ImportPath": "github.com/aws/aws-sdk-go/service/codedeploy",
"Comment": "v0.9.16-1-g66c840e",
"Rev": "66c840e9981dd121a4239fc25e33b6c1c1caa781"
},
{
"ImportPath": "github.com/aws/aws-sdk-go/service/directoryservice",
"Comment": "v0.9.16-1-g66c840e",
"Rev": "66c840e9981dd121a4239fc25e33b6c1c1caa781"
},
{
"ImportPath": "github.com/aws/aws-sdk-go/service/dynamodb",
"Comment": "v0.9.16-1-g66c840e",
"Rev": "66c840e9981dd121a4239fc25e33b6c1c1caa781"
},
{
"ImportPath": "github.com/aws/aws-sdk-go/service/ec2",
"Comment": "v0.9.16-1-g66c840e",
"Rev": "66c840e9981dd121a4239fc25e33b6c1c1caa781"
},
{
"ImportPath": "github.com/aws/aws-sdk-go/service/ecs",
"Comment": "v0.9.16-1-g66c840e",
"Rev": "66c840e9981dd121a4239fc25e33b6c1c1caa781"
},
{
"ImportPath": "github.com/aws/aws-sdk-go/service/efs",
"Comment": "v0.9.16-1-g66c840e",
"Rev": "66c840e9981dd121a4239fc25e33b6c1c1caa781"
},
{
"ImportPath": "github.com/aws/aws-sdk-go/service/elasticache",
"Comment": "v0.9.16-1-g66c840e",
"Rev": "66c840e9981dd121a4239fc25e33b6c1c1caa781"
},
{
"ImportPath": "github.com/aws/aws-sdk-go/service/elasticsearchservice",
"Comment": "v0.9.16-1-g66c840e",
"Rev": "66c840e9981dd121a4239fc25e33b6c1c1caa781"
},
{
"ImportPath": "github.com/aws/aws-sdk-go/service/elb",
"Comment": "v0.9.16-1-g66c840e",
"Rev": "66c840e9981dd121a4239fc25e33b6c1c1caa781"
},
{
"ImportPath": "github.com/aws/aws-sdk-go/service/glacier",
"Comment": "v0.9.16-1-g66c840e",
"Rev": "66c840e9981dd121a4239fc25e33b6c1c1caa781"
},
{
"ImportPath": "github.com/aws/aws-sdk-go/service/iam",
"Comment": "v0.9.16-1-g66c840e",
"Rev": "66c840e9981dd121a4239fc25e33b6c1c1caa781"
},
{
"ImportPath": "github.com/aws/aws-sdk-go/service/kinesis",
"Comment": "v0.9.16-1-g66c840e",
"Rev": "66c840e9981dd121a4239fc25e33b6c1c1caa781"
},
{
"ImportPath": "github.com/aws/aws-sdk-go/service/lambda",
"Comment": "v0.9.16-1-g66c840e",
"Rev": "66c840e9981dd121a4239fc25e33b6c1c1caa781"
},
{
"ImportPath": "github.com/aws/aws-sdk-go/service/opsworks",
"Comment": "v0.9.16-1-g66c840e",
"Rev": "66c840e9981dd121a4239fc25e33b6c1c1caa781"
},
{
"ImportPath": "github.com/aws/aws-sdk-go/service/rds",
"Comment": "v0.9.16-1-g66c840e",
"Rev": "66c840e9981dd121a4239fc25e33b6c1c1caa781"
},
{
"ImportPath": "github.com/aws/aws-sdk-go/service/route53",
"Comment": "v0.9.16-1-g66c840e",
"Rev": "66c840e9981dd121a4239fc25e33b6c1c1caa781"
},
{
"ImportPath": "github.com/aws/aws-sdk-go/service/s3",
"Comment": "v0.9.16-1-g66c840e",
"Rev": "66c840e9981dd121a4239fc25e33b6c1c1caa781"
},
{
"ImportPath": "github.com/aws/aws-sdk-go/service/sns",
"Comment": "v0.9.16-1-g66c840e",
"Rev": "66c840e9981dd121a4239fc25e33b6c1c1caa781"
},
{
"ImportPath": "github.com/aws/aws-sdk-go/service/sqs",
"Comment": "v0.9.16-1-g66c840e",
"Rev": "66c840e9981dd121a4239fc25e33b6c1c1caa781"
},
{
"ImportPath": "github.com/awslabs/aws-sdk-go/aws",
"Comment": "v0.9.16-1-g66c840e",
"Rev": "66c840e9981dd121a4239fc25e33b6c1c1caa781"
},
{
"ImportPath": "github.com/coreos/etcd/client",
"Comment": "v2.2.0-261-gae62a77",
"Rev": "ae62a77de61d70f434ed848ba48b44247cb54c94"
},
{
"ImportPath": "github.com/coreos/etcd/pkg/pathutil",
"Comment": "v2.2.0-261-gae62a77",
"Rev": "ae62a77de61d70f434ed848ba48b44247cb54c94"
},
{
"ImportPath": "github.com/coreos/etcd/pkg/types",
"Comment": "v2.2.0-261-gae62a77",
"Rev": "ae62a77de61d70f434ed848ba48b44247cb54c94"
},
{
"ImportPath": "github.com/cyberdelia/heroku-go/v3",
"Rev": "8344c6a3e281a99a693f5b71186249a8620eeb6b"
},
{
"ImportPath": "github.com/digitalocean/godo",
"Comment": "v0.9.0-2-gc03bb09",
"Rev": "c03bb099b8dc38e87581902a56885013a0865703"
},
{
"ImportPath": "github.com/dylanmei/iso8601",
"Rev": "2075bf119b58e5576c6ed9f867b8f3d17f2e54d4"
},
{
"ImportPath": "github.com/dylanmei/winrmtest",
"Rev": "3e9661c52c45dab9a8528966a23d421922fca9b9"
},
{
"ImportPath": "github.com/fsouza/go-dockerclient",
"Rev": "44f75219dec4d25d3ac5483d38d3ada7eaf047ab"
},
{
"ImportPath": "github.com/google/go-querystring/query",
"Rev": "547ef5ac979778feb2f760cdb5f4eae1a2207b86"
},
{
"ImportPath": "github.com/hashicorp/atlas-go/archive",
"Comment": "20141209094003-81-g6c9afe8",
"Rev": "6c9afe8bb88099b424db07dea18f434371de8199"
},
{
"ImportPath": "github.com/hashicorp/atlas-go/v1",
"Comment": "20141209094003-81-g6c9afe8",
"Rev": "6c9afe8bb88099b424db07dea18f434371de8199"
},
{
"ImportPath": "github.com/hashicorp/consul/api",
"Comment": "v0.5.2-469-g6a350d5",
"Rev": "6a350d5d19a41f94e0c99a933410e8545c4e7a51"
},
{
"ImportPath": "github.com/hashicorp/errwrap",
"Rev": "7554cd9344cec97297fa6649b055a8c98c2a1e55"
},
{
"ImportPath": "github.com/hashicorp/go-checkpoint",
"Rev": "e4b2dc34c0f698ee04750bf2035d8b9384233e1b"
},
{
"ImportPath": "github.com/hashicorp/go-cleanhttp",
"Rev": "5df5ddc69534f1a4697289f1dca2193fbb40213f"
},
{
"ImportPath": "github.com/hashicorp/go-getter",
"Rev": "2463fe5ef95a59a4096482fb9390b5683a5c380a"
},
{
"ImportPath": "github.com/hashicorp/go-multierror",
"Rev": "d30f09973e19c1dfcd120b2d9c4f168e68d6b5d5"
},
{
"ImportPath": "github.com/hashicorp/go-version",
"Rev": "2b9865f60ce11e527bd1255ba82036d465570aa3"
},
{
"ImportPath": "github.com/hashicorp/hcl",
"Rev": "4de51957ef8d4aba6e285ddfc587633bbfc7c0e8"
},
{
"ImportPath": "github.com/hashicorp/logutils",
"Rev": "0dc08b1671f34c4250ce212759ebd880f743d883"
},
{
"ImportPath": "github.com/hashicorp/yamux",
"Rev": "ddcd0a6ec7c55e29f235e27935bf98d302281bd3"
},
{
"ImportPath": "github.com/imdario/mergo",
"Comment": "0.2.0-5-g61a5285",
"Rev": "61a52852277811e93e06d28e0d0c396284a7730b"
},
{
"ImportPath": "github.com/kardianos/osext",
"Rev": "6e7f843663477789fac7c02def0d0909e969b4e5"
},
{
"ImportPath": "github.com/masterzen/simplexml/dom",
"Rev": "95ba30457eb1121fa27753627c774c7cd4e90083"
},
{
"ImportPath": "github.com/masterzen/winrm/soap",
"Rev": "e3e57d617b7d9573db6c98567a261916ff53cfb3"
},
{
"ImportPath": "github.com/masterzen/winrm/winrm",
"Rev": "e3e57d617b7d9573db6c98567a261916ff53cfb3"
},
{
"ImportPath": "github.com/masterzen/xmlpath",
"Rev": "13f4951698adc0fa9c1dda3e275d489a24201161"
},
{
"ImportPath": "github.com/mitchellh/cli",
"Rev": "8102d0ed5ea2709ade1243798785888175f6e415"
},
{
"ImportPath": "github.com/mitchellh/colorstring",
"Rev": "8631ce90f28644f54aeedcb3e389a85174e067d1"
},
{
"ImportPath": "github.com/mitchellh/copystructure",
"Rev": "6fc66267e9da7d155a9d3bd489e00dad02666dc6"
},
{
"ImportPath": "github.com/mitchellh/go-homedir",
"Rev": "df55a15e5ce646808815381b3db47a8c66ea62f4"
},
{
"ImportPath": "github.com/mitchellh/go-linereader",
"Rev": "07bab5fdd9580500aea6ada0e09df4aa28e68abd"
},
{
"ImportPath": "github.com/mitchellh/mapstructure",
"Rev": "281073eb9eb092240d33ef253c404f1cca550309"
},
{
"ImportPath": "github.com/mitchellh/osext",
"Rev": "5e2d6d41470f99c881826dedd8c526728b783c9c"
},
{
"ImportPath": "github.com/mitchellh/packer/common/uuid",
"Comment": "v0.8.6-128-g8e63ce1",
"Rev": "8e63ce13028ed6a3204d7ed210c4790ea11d7db9"
},
{
"ImportPath": "github.com/mitchellh/panicwrap",
"Rev": "1655d88c8ff7495ae9d2c19fd8f445f4657e22b0"
},
{
"ImportPath": "github.com/mitchellh/prefixedio",
"Rev": "89d9b535996bf0a185f85b59578f2e245f9e1724"
},
{
"ImportPath": "github.com/mitchellh/reflectwalk",
"Rev": "eecf4c70c626c7cfbb95c90195bc34d386c74ac6"
},
{
"ImportPath": "github.com/nu7hatch/gouuid",
"Rev": "179d4d0c4d8d407a32af483c2354df1d2c91e6c3"
},
{
"ImportPath": "github.com/packer-community/winrmcp/winrmcp",
"Rev": "3d184cea22ee1c41ec1697e0d830ff0c78f7ea97"
},
{
"ImportPath": "github.com/packethost/packngo",
"Rev": "f03d7dc788a8b57b62d301ccb98c950c325756f8"
},
{
"ImportPath": "github.com/pborman/uuid",
"Rev": "cccd189d45f7ac3368a0d127efb7f4d08ae0b655"
},
{
"ImportPath": "github.com/pearkes/cloudflare",
"Rev": "3d4cd12a4c3a7fc29b338b774f7f8b7e3d5afc2e"
},
{
"ImportPath": "github.com/pearkes/dnsimple",
"Rev": "78996265f576c7580ff75d0cb2c606a61883ceb8"
},
{
"ImportPath": "github.com/pearkes/mailgun",
"Rev": "b88605989c4141d22a6d874f78800399e5bb7ac2"
},
{
"ImportPath": "github.com/rackspace/gophercloud",
"Comment": "v1.0.0-685-g63ee53d",
"Rev": "63ee53d682169b50b8dfaca88722ba19bd5b17a6"
},
{
"ImportPath": "github.com/satori/go.uuid",
"Rev": "08f0718b61e95ddba0ade3346725fe0e4bf28ca6"
},
{
"ImportPath": "github.com/soniah/dnsmadeeasy",
"Comment": "v1.1-2-g5578a8c",
"Rev": "5578a8c15e33958c61cf7db720b6181af65f4a9e"
},
{
"ImportPath": "github.com/tent/http-link-go",
"Rev": "ac974c61c2f990f4115b119354b5e0b47550e888"
},
{
"ImportPath": "github.com/ugorji/go/codec",
"Rev": "8a2a3a8c488c3ebd98f422a965260278267a0551"
},
{
"ImportPath": "github.com/vaughan0/go-ini",
"Rev": "a98ad7ee00ec53921f08832bc06ecf7fd600e6a1"
},
{
"ImportPath": "github.com/vmware/govmomi",
"Comment": "v0.2.0-36-g6be2410",
"Rev": "6be2410334b7be4f6f8962206e49042207f99673"
},
{
"ImportPath": "github.com/xanzy/go-cloudstack/cloudstack",
"Comment": "v1.2.0-48-g0e6e56f",
"Rev": "0e6e56fc0db3f48f060273f2e2ffe5d8d41b0112"
},
{
"ImportPath": "golang.org/x/crypto/curve25519",
"Rev": "c8b9e6388ef638d5a8a9d865c634befdc46a6784"
},
{
"ImportPath": "golang.org/x/crypto/pkcs12",
"Rev": "c8b9e6388ef638d5a8a9d865c634befdc46a6784"
},
{
"ImportPath": "golang.org/x/crypto/ssh",
"Rev": "c8b9e6388ef638d5a8a9d865c634befdc46a6784"
},
{
"ImportPath": "golang.org/x/net/context",
"Rev": "2cba614e8ff920c60240d2677bc019af32ee04e5"
},
{
"ImportPath": "golang.org/x/oauth2",
"Rev": "038cb4adce85ed41e285c2e7cc6221a92bfa44aa"
},
{
"ImportPath": "google.golang.org/api/compute/v1",
"Rev": "c83ee8e9b7e6c40a486c0992a963ea8b6911de67"
},
{
"ImportPath": "google.golang.org/api/container/v1",
"Rev": "c83ee8e9b7e6c40a486c0992a963ea8b6911de67"
},
{
"ImportPath": "google.golang.org/api/dns/v1",
"Rev": "c83ee8e9b7e6c40a486c0992a963ea8b6911de67"
},
{
"ImportPath": "google.golang.org/api/googleapi",
"Rev": "c83ee8e9b7e6c40a486c0992a963ea8b6911de67"
},
{
"ImportPath": "google.golang.org/api/internal",
"Rev": "c83ee8e9b7e6c40a486c0992a963ea8b6911de67"
},
{
"ImportPath": "google.golang.org/api/storage/v1",
"Rev": "c83ee8e9b7e6c40a486c0992a963ea8b6911de67"
},
{
"ImportPath": "google.golang.org/cloud/compute/metadata",
"Rev": "2400193c85c3561d13880d34e0e10c4315bb02af"
},
{
"ImportPath": "google.golang.org/cloud/internal",
"Rev": "2400193c85c3561d13880d34e0e10c4315bb02af"
}
]
}

View File

@ -10,7 +10,7 @@ If you need to use existing security groups and subnets, remove the sg.tf and su
Pass the password variable through your ENV variable. Pass the password variable through your ENV variable.
Several paraneters are externalized, review the different variables.tf files and change them to fit your needs. Carefully review the CIDR blocks, egress/ingress rules, availability zones that are very specific to your account. Several parameters are externalized, review the different variables.tf files and change them to fit your needs. Carefully review the CIDR blocks, egress/ingress rules, availability zones that are very specific to your account.
Once ready run 'terraform plan' to review. At the minimum, provide the vpc_id as input variable. Once ready run 'terraform plan' to review. At the minimum, provide the vpc_id as input variable.

View File

@ -98,7 +98,7 @@ resource "google_compute_forwarding_rule" "fr2_udp4500" {
} }
# Each tunnel is responsible for encrypting and decrypting traffic exiting # Each tunnel is responsible for encrypting and decrypting traffic exiting
# and leaving it's associated gateway # and leaving its associated gateway
resource "google_compute_vpn_tunnel" "tunnel1" { resource "google_compute_vpn_tunnel" "tunnel1" {
name = "tunnel1" name = "tunnel1"
region = "${var.region1}" region = "${var.region1}"

View File

@ -11,6 +11,7 @@ import (
"strings" "strings"
"testing" "testing"
"github.com/hashicorp/go-getter"
"github.com/hashicorp/terraform/config/module" "github.com/hashicorp/terraform/config/module"
"github.com/hashicorp/terraform/terraform" "github.com/hashicorp/terraform/terraform"
) )
@ -198,7 +199,7 @@ func testStep(
} }
// Load the modules // Load the modules
modStorage := &module.FolderStorage{ modStorage := &getter.FolderStorage{
StorageDir: filepath.Join(cfgPath, ".tfmodules"), StorageDir: filepath.Join(cfgPath, ".tfmodules"),
} }
err = mod.Load(modStorage, module.GetModeGet) err = mod.Load(modStorage, module.GetModeGet)

View File

@ -1178,8 +1178,25 @@ func (m schemaMap) validatePrimitive(
raw interface{}, raw interface{},
schema *Schema, schema *Schema,
c *terraform.ResourceConfig) ([]string, []error) { c *terraform.ResourceConfig) ([]string, []error) {
// Catch if the user gave a complex type where a primitive was
// expected, so we can return a friendly error message that
// doesn't contain Go type system terminology.
switch reflect.ValueOf(raw).Type().Kind() {
case reflect.Slice:
return nil, []error{
fmt.Errorf("%s must be a single value, not a list", k),
}
case reflect.Map:
return nil, []error{
fmt.Errorf("%s must be a single value, not a map", k),
}
default: // ok
}
if c.IsComputed(k) { if c.IsComputed(k) {
// If the key is being computed, then it is not an error // If the key is being computed, then it is not an error as
// long as it's not a slice or map.
return nil, nil return nil, nil
} }

View File

@ -3409,6 +3409,36 @@ func TestSchemaMap_Validate(t *testing.T) {
Err: true, Err: true,
}, },
"Bad, should not allow lists to be assigned to string attributes": {
Schema: map[string]*Schema{
"availability_zone": &Schema{
Type: TypeString,
Required: true,
},
},
Config: map[string]interface{}{
"availability_zone": []interface{}{"foo", "bar", "baz"},
},
Err: true,
},
"Bad, should not allow maps to be assigned to string attributes": {
Schema: map[string]*Schema{
"availability_zone": &Schema{
Type: TypeString,
Required: true,
},
},
Config: map[string]interface{}{
"availability_zone": map[string]interface{}{"foo": "bar", "baz": "thing"},
},
Err: true,
},
"Deprecated attribute usage generates warning, but not error": { "Deprecated attribute usage generates warning, but not error": {
Schema: map[string]*Schema{ Schema: map[string]*Schema{
"old_news": &Schema{ "old_news": &Schema{

View File

@ -36,14 +36,6 @@ shasum -a256 * > ./terraform_${VERSION}_SHA256SUMS
popd popd
# Upload # Upload
for ARCHIVE in ./pkg/dist/*; do hc-releases -upload=./pkg/dist
ARCHIVE_NAME=$(basename ${ARCHIVE})
echo Uploading: $ARCHIVE_NAME
curl \
-T ${ARCHIVE} \
-umitchellh:${BINTRAY_API_KEY} \
"https://api.bintray.com/content/mitchellh/terraform/terraform/${VERSION}/${ARCHIVE_NAME}"
done
exit 0 exit 0

View File

@ -6,11 +6,15 @@ import (
"encoding/base64" "encoding/base64"
"fmt" "fmt"
"io" "io"
"log"
"net/http" "net/http"
"net/url" "net/url"
"os" "os"
"path" "path"
"strings" "strings"
"github.com/hashicorp/go-cleanhttp"
"github.com/hashicorp/terraform/terraform"
) )
const ( const (
@ -73,6 +77,9 @@ type AtlasClient struct {
Name string Name string
AccessToken string AccessToken string
RunId string RunId string
HTTPClient *http.Client
conflictHandlingAttempted bool
} }
func (c *AtlasClient) Get() (*Payload, error) { func (c *AtlasClient) Get() (*Payload, error) {
@ -83,7 +90,8 @@ func (c *AtlasClient) Get() (*Payload, error) {
} }
// Request the url // Request the url
resp, err := http.DefaultClient.Do(req) client := c.http()
resp, err := client.Do(req)
if err != nil { if err != nil {
return nil, err return nil, err
} }
@ -161,7 +169,8 @@ func (c *AtlasClient) Put(state []byte) error {
req.ContentLength = int64(len(state)) req.ContentLength = int64(len(state))
// Make the request // Make the request
resp, err := http.DefaultClient.Do(req) client := c.http()
resp, err := client.Do(req)
if err != nil { if err != nil {
return fmt.Errorf("Failed to upload state: %v", err) return fmt.Errorf("Failed to upload state: %v", err)
} }
@ -171,6 +180,8 @@ func (c *AtlasClient) Put(state []byte) error {
switch resp.StatusCode { switch resp.StatusCode {
case http.StatusOK: case http.StatusOK:
return nil return nil
case http.StatusConflict:
return c.handleConflict(c.readBody(resp.Body), state)
default: default:
return fmt.Errorf( return fmt.Errorf(
"HTTP error: %d\n\nBody: %s", "HTTP error: %d\n\nBody: %s",
@ -186,7 +197,8 @@ func (c *AtlasClient) Delete() error {
} }
// Make the request // Make the request
resp, err := http.DefaultClient.Do(req) client := c.http()
resp, err := client.Do(req)
if err != nil { if err != nil {
return fmt.Errorf("Failed to delete state: %v", err) return fmt.Errorf("Failed to delete state: %v", err)
} }
@ -236,3 +248,74 @@ func (c *AtlasClient) url() *url.URL {
RawQuery: values.Encode(), RawQuery: values.Encode(),
} }
} }
func (c *AtlasClient) http() *http.Client {
if c.HTTPClient != nil {
return c.HTTPClient
}
return cleanhttp.DefaultClient()
}
// Atlas returns an HTTP 409 - Conflict if the pushed state reports the same
// Serial number but the checksum of the raw content differs. This can
// sometimes happen when Terraform changes state representation internally
// between versions in a way that's semantically neutral but affects the JSON
// output and therefore the checksum.
//
// Here we detect and handle this situation by ticking the serial and retrying
// iff for the previous state and the proposed state:
//
// * the serials match
// * the parsed states are Equal (semantically equivalent)
//
// In other words, in this situation Terraform can override Atlas's detected
// conflict by asserting that the state it is pushing is indeed correct.
func (c *AtlasClient) handleConflict(msg string, state []byte) error {
log.Printf("[DEBUG] Handling Atlas conflict response: %s", msg)
if c.conflictHandlingAttempted {
log.Printf("[DEBUG] Already attempted conflict resolution; returning conflict.")
} else {
c.conflictHandlingAttempted = true
log.Printf("[DEBUG] Atlas reported conflict, checking for equivalent states.")
payload, err := c.Get()
if err != nil {
return conflictHandlingError(err)
}
currentState, err := terraform.ReadState(bytes.NewReader(payload.Data))
if err != nil {
return conflictHandlingError(err)
}
proposedState, err := terraform.ReadState(bytes.NewReader(state))
if err != nil {
return conflictHandlingError(err)
}
if statesAreEquivalent(currentState, proposedState) {
log.Printf("[DEBUG] States are equivalent, incrementing serial and retrying.")
proposedState.Serial++
var buf bytes.Buffer
if err := terraform.WriteState(proposedState, &buf); err != nil {
return conflictHandlingError(err)
}
return c.Put(buf.Bytes())
} else {
log.Printf("[DEBUG] States are not equivalent, returning conflict.")
}
}
return fmt.Errorf(
"Atlas detected a remote state conflict.\n\nMessage: %s", msg)
}
func conflictHandlingError(err error) error {
return fmt.Errorf(
"Error while handling a conflict response from Atlas: %s", err)
}
func statesAreEquivalent(current, proposed *terraform.State) bool {
return current.Serial == proposed.Serial && current.Equal(proposed)
}

View File

@ -1,9 +1,15 @@
package remote package remote
import ( import (
"bytes"
"crypto/md5"
"net/http" "net/http"
"net/http/httptest"
"os" "os"
"testing" "testing"
"time"
"github.com/hashicorp/terraform/terraform"
) )
func TestAtlasClient_impl(t *testing.T) { func TestAtlasClient_impl(t *testing.T) {
@ -30,3 +36,259 @@ func TestAtlasClient(t *testing.T) {
testClient(t, client) testClient(t, client)
} }
func TestAtlasClient_ReportedConflictEqualStates(t *testing.T) {
fakeAtlas := newFakeAtlas(t, testStateModuleOrderChange)
srv := fakeAtlas.Server()
defer srv.Close()
client, err := atlasFactory(map[string]string{
"access_token": "sometoken",
"name": "someuser/some-test-remote-state",
"address": srv.URL,
})
if err != nil {
t.Fatalf("err: %s", err)
}
state, err := terraform.ReadState(bytes.NewReader(testStateModuleOrderChange))
if err != nil {
t.Fatalf("err: %s", err)
}
var stateJson bytes.Buffer
if err := terraform.WriteState(state, &stateJson); err != nil {
t.Fatalf("err: %s", err)
}
if err := client.Put(stateJson.Bytes()); err != nil {
t.Fatalf("err: %s", err)
}
}
func TestAtlasClient_NoConflict(t *testing.T) {
fakeAtlas := newFakeAtlas(t, testStateSimple)
srv := fakeAtlas.Server()
defer srv.Close()
client, err := atlasFactory(map[string]string{
"access_token": "sometoken",
"name": "someuser/some-test-remote-state",
"address": srv.URL,
})
if err != nil {
t.Fatalf("err: %s", err)
}
state, err := terraform.ReadState(bytes.NewReader(testStateSimple))
if err != nil {
t.Fatalf("err: %s", err)
}
fakeAtlas.NoConflictAllowed(true)
var stateJson bytes.Buffer
if err := terraform.WriteState(state, &stateJson); err != nil {
t.Fatalf("err: %s", err)
}
if err := client.Put(stateJson.Bytes()); err != nil {
t.Fatalf("err: %s", err)
}
}
func TestAtlasClient_LegitimateConflict(t *testing.T) {
fakeAtlas := newFakeAtlas(t, testStateSimple)
srv := fakeAtlas.Server()
defer srv.Close()
client, err := atlasFactory(map[string]string{
"access_token": "sometoken",
"name": "someuser/some-test-remote-state",
"address": srv.URL,
})
if err != nil {
t.Fatalf("err: %s", err)
}
state, err := terraform.ReadState(bytes.NewReader(testStateSimple))
if err != nil {
t.Fatalf("err: %s", err)
}
// Changing the state but not the serial. Should generate a conflict.
state.RootModule().Outputs["drift"] = "happens"
var stateJson bytes.Buffer
if err := terraform.WriteState(state, &stateJson); err != nil {
t.Fatalf("err: %s", err)
}
if err := client.Put(stateJson.Bytes()); err == nil {
t.Fatal("Expected error from state conflict, got none.")
}
}
func TestAtlasClient_UnresolvableConflict(t *testing.T) {
fakeAtlas := newFakeAtlas(t, testStateSimple)
// Something unexpected causes Atlas to conflict in a way that we can't fix.
fakeAtlas.AlwaysConflict(true)
srv := fakeAtlas.Server()
defer srv.Close()
client, err := atlasFactory(map[string]string{
"access_token": "sometoken",
"name": "someuser/some-test-remote-state",
"address": srv.URL,
})
if err != nil {
t.Fatalf("err: %s", err)
}
state, err := terraform.ReadState(bytes.NewReader(testStateSimple))
if err != nil {
t.Fatalf("err: %s", err)
}
var stateJson bytes.Buffer
if err := terraform.WriteState(state, &stateJson); err != nil {
t.Fatalf("err: %s", err)
}
doneCh := make(chan struct{})
go func() {
defer close(doneCh)
if err := client.Put(stateJson.Bytes()); err == nil {
t.Fatal("Expected error from state conflict, got none.")
}
}()
select {
case <-doneCh:
// OK
case <-time.After(50 * time.Millisecond):
t.Fatalf("Timed out after 50ms, probably because retrying infinitely.")
}
}
// Stub Atlas HTTP API for a given state JSON string; does checksum-based
// conflict detection equivalent to Atlas's.
type fakeAtlas struct {
state []byte
t *testing.T
// Used to test that we only do the special conflict handling retry once.
alwaysConflict bool
// Used to fail the test immediately if a conflict happens.
noConflictAllowed bool
}
func newFakeAtlas(t *testing.T, state []byte) *fakeAtlas {
return &fakeAtlas{
state: state,
t: t,
}
}
func (f *fakeAtlas) Server() *httptest.Server {
return httptest.NewServer(http.HandlerFunc(f.handler))
}
func (f *fakeAtlas) CurrentState() *terraform.State {
currentState, err := terraform.ReadState(bytes.NewReader(f.state))
if err != nil {
f.t.Fatalf("err: %s", err)
}
return currentState
}
func (f *fakeAtlas) CurrentSerial() int64 {
return f.CurrentState().Serial
}
func (f *fakeAtlas) CurrentSum() [md5.Size]byte {
return md5.Sum(f.state)
}
func (f *fakeAtlas) AlwaysConflict(b bool) {
f.alwaysConflict = b
}
func (f *fakeAtlas) NoConflictAllowed(b bool) {
f.noConflictAllowed = b
}
func (f *fakeAtlas) handler(resp http.ResponseWriter, req *http.Request) {
switch req.Method {
case "GET":
// Respond with the current stored state.
resp.Header().Set("Content-Type", "application/json")
resp.Write(f.state)
case "PUT":
var buf bytes.Buffer
buf.ReadFrom(req.Body)
sum := md5.Sum(buf.Bytes())
state, err := terraform.ReadState(&buf)
if err != nil {
f.t.Fatalf("err: %s", err)
}
conflict := f.CurrentSerial() == state.Serial && f.CurrentSum() != sum
conflict = conflict || f.alwaysConflict
if conflict {
if f.noConflictAllowed {
f.t.Fatal("Got conflict when NoConflictAllowed was set.")
}
http.Error(resp, "Conflict", 409)
} else {
f.state = buf.Bytes()
resp.WriteHeader(200)
}
}
}
// This is a tfstate file with the module order changed, which is a structural
// but not a semantic difference. Terraform will sort these modules as it
// loads the state.
var testStateModuleOrderChange = []byte(
`{
"version": 1,
"serial": 1,
"modules": [
{
"path": [
"root",
"child2",
"grandchild"
],
"outputs": {
"foo": "bar2"
},
"resources": null
},
{
"path": [
"root",
"child1",
"grandchild"
],
"outputs": {
"foo": "bar1"
},
"resources": null
}
]
}
`)
var testStateSimple = []byte(
`{
"version": 1,
"serial": 1,
"modules": [
{
"path": [
"root"
],
"outputs": {
"foo": "bar"
},
"resources": null
}
]
}
`)

78
state/remote/etcd.go Normal file
View File

@ -0,0 +1,78 @@
package remote
import (
"crypto/md5"
"fmt"
"strings"
etcdapi "github.com/coreos/etcd/client"
"golang.org/x/net/context"
)
func etcdFactory(conf map[string]string) (Client, error) {
path, ok := conf["path"]
if !ok {
return nil, fmt.Errorf("missing 'path' configuration")
}
endpoints, ok := conf["endpoints"]
if !ok || endpoints == "" {
return nil, fmt.Errorf("missing 'endpoints' configuration")
}
config := etcdapi.Config{
Endpoints: strings.Split(endpoints, " "),
}
if username, ok := conf["username"]; ok && username != "" {
config.Username = username
}
if password, ok := conf["password"]; ok && password != "" {
config.Password = password
}
client, err := etcdapi.New(config)
if err != nil {
return nil, err
}
return &EtcdClient{
Client: client,
Path: path,
}, nil
}
// EtcdClient is a remote client that stores data in etcd.
type EtcdClient struct {
Client etcdapi.Client
Path string
}
func (c *EtcdClient) Get() (*Payload, error) {
resp, err := etcdapi.NewKeysAPI(c.Client).Get(context.Background(), c.Path, &etcdapi.GetOptions{Quorum: true})
if err != nil {
if err, ok := err.(etcdapi.Error); ok && err.Code == etcdapi.ErrorCodeKeyNotFound {
return nil, nil
}
return nil, err
}
if resp.Node.Dir {
return nil, fmt.Errorf("path is a directory")
}
data := []byte(resp.Node.Value)
md5 := md5.Sum(data)
return &Payload{
Data: data,
MD5: md5[:],
}, nil
}
func (c *EtcdClient) Put(data []byte) error {
_, err := etcdapi.NewKeysAPI(c.Client).Set(context.Background(), c.Path, string(data), nil)
return err
}
func (c *EtcdClient) Delete() error {
_, err := etcdapi.NewKeysAPI(c.Client).Delete(context.Background(), c.Path, nil)
return err
}

38
state/remote/etcd_test.go Normal file
View File

@ -0,0 +1,38 @@
package remote
import (
"fmt"
"os"
"testing"
"time"
)
func TestEtcdClient_impl(t *testing.T) {
var _ Client = new(EtcdClient)
}
func TestEtcdClient(t *testing.T) {
endpoint := os.Getenv("ETCD_ENDPOINT")
if endpoint == "" {
t.Skipf("skipping; ETCD_ENDPOINT must be set")
}
config := map[string]string{
"endpoints": endpoint,
"path": fmt.Sprintf("tf-unit/%s", time.Now().String()),
}
if username := os.Getenv("ETCD_USERNAME"); username != "" {
config["username"] = username
}
if password := os.Getenv("ETCD_PASSWORD"); password != "" {
config["password"] = password
}
client, err := etcdFactory(config)
if err != nil {
t.Fatalf("Error for valid config: %s", err)
}
testClient(t, client)
}

View File

@ -8,6 +8,8 @@ import (
"net/http/httptest" "net/http/httptest"
"net/url" "net/url"
"testing" "testing"
"github.com/hashicorp/go-cleanhttp"
) )
func TestHTTPClient_impl(t *testing.T) { func TestHTTPClient_impl(t *testing.T) {
@ -24,7 +26,7 @@ func TestHTTPClient(t *testing.T) {
t.Fatalf("err: %s", err) t.Fatalf("err: %s", err)
} }
client := &HTTPClient{URL: url, Client: http.DefaultClient} client := &HTTPClient{URL: url, Client: cleanhttp.DefaultClient()}
testClient(t, client) testClient(t, client)
} }

Some files were not shown because too many files have changed in this diff Show More