diff --git a/CHANGELOG.md b/CHANGELOG.md index d4209dc0d..24336b5ee 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -12,6 +12,10 @@ BACKWARDS INCOMPATIBILITIES: FEATURES: * **New provider: `dme` (DNSMadeEasy)** [GH-855] + * **New provider: `docker` (Docker)** - Manage container lifecycle + using the standard Docker API. [GH-855] + * **New provider: `openstack` (OpenStack)** - Interact with the many resources + provided by OpenStack. [GH-924] * **New command: `taint`** - Manually mark a resource as tainted, causing a destroy and recreate on the next plan/apply. * **New resource: `aws_vpn_gateway`** [GH-1137] @@ -24,8 +28,8 @@ FEATURES: or system killing Terraform. * **Math operations** in interpolations. You can now do things like `${count.index+1}`. [GH-1068] - * **New AWS SDK:** Move to `aws-sdk-go` (hashicorp/aws-sdk-go), - a fork of the offical `awslabs` repo. We forked for stability while + * **New AWS SDK:** Move to `aws-sdk-go` (hashicorp/aws-sdk-go), + a fork of the offical `awslabs` repo. We forked for stability while `awslabs` refactored the library, and will move back to the officially supported version in the next release. @@ -39,6 +43,7 @@ IMPROVEMENTS: * **New config function: `split`** - Split a value based on a delimiter. This is useful for faking lists as parameters to modules. * **New resource: `digitalocean_ssh_key`** [GH-1074] + * config: Expand `~` with homedir in `file()` paths [GH-1338] * core: The serial of the state is only updated if there is an actual change. This will lower the amount of state changing on things like refresh. @@ -48,9 +53,9 @@ IMPROVEMENTS: automatically done initially. * providers/google: Add `size` option to disk blocks for instances. [GH-1284] * providers/aws: Improve support for tagging resources. - * providers/aws: Add a short syntax for Route 53 Record names, e.g. + * providers/aws: Add a short syntax for Route 53 Record names, e.g. `www` instead of `www.example.com`. - * providers/aws: Improve dependency violation error handling, when deleting + * providers/aws: Improve dependency violation error handling, when deleting Internet Gateways or Auto Scaling groups [GH-1325]. BUG FIXES: @@ -72,15 +77,15 @@ BUG FIXES: * providers/aws: Longer wait times for route53 records (30 mins). [GH-1164] * providers/aws: Fix support for TXT records in Route 53. [GH-1213] * providers/aws: Fix support for wildcard records in Route 53. [GH-1222] - * providers/aws: Fix issue with ignoring the 'self' attribute of a + * providers/aws: Fix issue with ignoring the 'self' attribute of a Security Group rule. [GH-1223] - * providers/aws: Fix issue with `sql_mode` in RDS parameter group always + * providers/aws: Fix issue with `sql_mode` in RDS parameter group always causing an update. [GH-1225] - * providers/aws: Fix dependency violation with subnets and security groups + * providers/aws: Fix dependency violation with subnets and security groups [GH-1252] - * providers/aws: Fix issue with refreshing `db_subnet_groups` causing an error + * providers/aws: Fix issue with refreshing `db_subnet_groups` causing an error instead of updating state [GH-1254] - * providers/aws: Prevent empty string to be used as default + * providers/aws: Prevent empty string to be used as default `health_check_type` [GH-1052] * providers/aws: Add tags on AWS IG creation, not just on update [GH-1176] * providers/digitalocean: Waits until droplet is ready to be destroyed [GH-1057] diff --git a/CONTRIBUTING.md b/CONTRIBUTING.md index 87f5ca66d..f5554557f 100644 --- a/CONTRIBUTING.md +++ b/CONTRIBUTING.md @@ -53,8 +53,8 @@ If you have never worked with Go before, you will have to complete the following steps in order to be able to compile and test Terraform (or use the Vagrantfile in this repo to stand up a dev VM). -1. Install Go. Make sure the Go version is at least Go 1.2. Terraform will not work with anything less than - Go 1.2. On a Mac, you can `brew install go` to install Go 1.2. +1. Install Go. Make sure the Go version is at least Go 1.4. Terraform will not work with anything less than + Go 1.4. On a Mac, you can `brew install go` to install Go 1.4. 2. Set and export the `GOPATH` environment variable and update your `PATH`. For example, you can add to your `.bash_profile`. diff --git a/builtin/bins/provider-docker/main.go b/builtin/bins/provider-docker/main.go new file mode 100644 index 000000000..a54af4c02 --- /dev/null +++ b/builtin/bins/provider-docker/main.go @@ -0,0 +1,12 @@ +package main + +import ( + "github.com/hashicorp/terraform/builtin/providers/docker" + "github.com/hashicorp/terraform/plugin" +) + +func main() { + plugin.Serve(&plugin.ServeOpts{ + ProviderFunc: docker.Provider, + }) +} diff --git a/builtin/bins/provider-docker/main_test.go b/builtin/bins/provider-docker/main_test.go new file mode 100644 index 000000000..06ab7d0f9 --- /dev/null +++ b/builtin/bins/provider-docker/main_test.go @@ -0,0 +1 @@ +package main diff --git a/builtin/bins/provider-openstack/main.go b/builtin/bins/provider-openstack/main.go new file mode 100644 index 000000000..f897f1c55 --- /dev/null +++ b/builtin/bins/provider-openstack/main.go @@ -0,0 +1,12 @@ +package main + +import ( + "github.com/hashicorp/terraform/builtin/providers/openstack" + "github.com/hashicorp/terraform/plugin" +) + +func main() { + plugin.Serve(&plugin.ServeOpts{ + ProviderFunc: openstack.Provider, + }) +} diff --git a/builtin/providers/aws/resource_aws_autoscaling_group.go b/builtin/providers/aws/resource_aws_autoscaling_group.go index de3bbe9cc..60b22ff26 100644 --- a/builtin/providers/aws/resource_aws_autoscaling_group.go +++ b/builtin/providers/aws/resource_aws_autoscaling_group.go @@ -287,7 +287,12 @@ func resourceAwsAutoscalingGroupDelete(d *schema.ResourceData, meta interface{}) return err } - return nil + return resource.Retry(5*time.Minute, func() error { + if g, _ = getAwsAutoscalingGroup(d, meta); g != nil { + return fmt.Errorf("Auto Scaling Group still exists") + } + return nil + }) } func getAwsAutoscalingGroup( diff --git a/builtin/providers/aws/resource_aws_autoscaling_group_test.go b/builtin/providers/aws/resource_aws_autoscaling_group_test.go index 661e71fe8..09a4d73a6 100644 --- a/builtin/providers/aws/resource_aws_autoscaling_group_test.go +++ b/builtin/providers/aws/resource_aws_autoscaling_group_test.go @@ -26,7 +26,7 @@ func TestAccAWSAutoScalingGroup_basic(t *testing.T) { testAccCheckAWSAutoScalingGroupExists("aws_autoscaling_group.bar", &group), testAccCheckAWSAutoScalingGroupAttributes(&group), resource.TestCheckResourceAttr( - "aws_autoscaling_group.bar", "availability_zones.1807834199", "us-west-2a"), + "aws_autoscaling_group.bar", "availability_zones.2487133097", "us-west-2a"), resource.TestCheckResourceAttr( "aws_autoscaling_group.bar", "name", "foobar3-terraform-test"), resource.TestCheckResourceAttr( diff --git a/builtin/providers/aws/resource_aws_db_instance.go b/builtin/providers/aws/resource_aws_db_instance.go index 14deff732..0cf2e4202 100644 --- a/builtin/providers/aws/resource_aws_db_instance.go +++ b/builtin/providers/aws/resource_aws_db_instance.go @@ -319,7 +319,11 @@ func resourceAwsDbInstanceRead(d *schema.ResourceData, meta interface{}) error { return nil } - d.Set("name", *v.DBName) + if v.DBName != nil { + d.Set("name", *v.DBName) + } else { + d.Set("name", "") + } d.Set("username", *v.MasterUsername) d.Set("engine", *v.Engine) d.Set("engine_version", *v.EngineVersion) diff --git a/builtin/providers/aws/resource_aws_route53_zone.go b/builtin/providers/aws/resource_aws_route53_zone.go index 6d9914b7f..b60c91a79 100644 --- a/builtin/providers/aws/resource_aws_route53_zone.go +++ b/builtin/providers/aws/resource_aws_route53_zone.go @@ -16,6 +16,7 @@ func resourceAwsRoute53Zone() *schema.Resource { return &schema.Resource{ Create: resourceAwsRoute53ZoneCreate, Read: resourceAwsRoute53ZoneRead, + Update: resourceAwsRoute53ZoneUpdate, Delete: resourceAwsRoute53ZoneDelete, Schema: map[string]*schema.Schema{ @@ -29,6 +30,8 @@ func resourceAwsRoute53Zone() *schema.Resource { Type: schema.TypeString, Computed: true, }, + + "tags": tagsSchema(), }, } } @@ -72,7 +75,7 @@ func resourceAwsRoute53ZoneCreate(d *schema.ResourceData, meta interface{}) erro if err != nil { return err } - return nil + return resourceAwsRoute53ZoneUpdate(d, meta) } func resourceAwsRoute53ZoneRead(d *schema.ResourceData, meta interface{}) error { @@ -87,9 +90,41 @@ func resourceAwsRoute53ZoneRead(d *schema.ResourceData, meta interface{}) error return err } + // get tags + req := &route53.ListTagsForResourceRequest{ + ResourceID: aws.String(d.Id()), + ResourceType: aws.String("hostedzone"), + } + + resp, err := r53.ListTagsForResource(req) + if err != nil { + return err + } + + var tags []route53.Tag + if resp.ResourceTagSet != nil { + tags = resp.ResourceTagSet.Tags + } + + if err := d.Set("tags", tagsToMapR53(tags)); err != nil { + return err + } + return nil } +func resourceAwsRoute53ZoneUpdate(d *schema.ResourceData, meta interface{}) error { + conn := meta.(*AWSClient).r53conn + + if err := setTagsR53(conn, d); err != nil { + return err + } else { + d.SetPartial("tags") + } + + return resourceAwsRoute53ZoneRead(d, meta) +} + func resourceAwsRoute53ZoneDelete(d *schema.ResourceData, meta interface{}) error { r53 := meta.(*AWSClient).r53conn diff --git a/builtin/providers/aws/resource_aws_route53_zone_test.go b/builtin/providers/aws/resource_aws_route53_zone_test.go index fa78634cf..0669f88b1 100644 --- a/builtin/providers/aws/resource_aws_route53_zone_test.go +++ b/builtin/providers/aws/resource_aws_route53_zone_test.go @@ -63,6 +63,9 @@ func TestCleanChangeID(t *testing.T) { } func TestAccRoute53Zone(t *testing.T) { + var zone route53.HostedZone + var td route53.ResourceTagSet + resource.Test(t, resource.TestCase{ PreCheck: func() { testAccPreCheck(t) }, Providers: testAccProviders, @@ -71,7 +74,9 @@ func TestAccRoute53Zone(t *testing.T) { resource.TestStep{ Config: testAccRoute53ZoneConfig, Check: resource.ComposeTestCheckFunc( - testAccCheckRoute53ZoneExists("aws_route53_zone.main"), + testAccCheckRoute53ZoneExists("aws_route53_zone.main", &zone), + testAccLoadTagsR53(&zone, &td), + testAccCheckTagsR53(&td.Tags, "foo", "bar"), ), }, }, @@ -93,7 +98,7 @@ func testAccCheckRoute53ZoneDestroy(s *terraform.State) error { return nil } -func testAccCheckRoute53ZoneExists(n string) resource.TestCheckFunc { +func testAccCheckRoute53ZoneExists(n string, zone *route53.HostedZone) resource.TestCheckFunc { return func(s *terraform.State) error { rs, ok := s.RootModule().Resources[n] if !ok { @@ -105,10 +110,34 @@ func testAccCheckRoute53ZoneExists(n string) resource.TestCheckFunc { } conn := testAccProvider.Meta().(*AWSClient).r53conn - _, err := conn.GetHostedZone(&route53.GetHostedZoneRequest{ID: aws.String(rs.Primary.ID)}) + resp, err := conn.GetHostedZone(&route53.GetHostedZoneRequest{ID: aws.String(rs.Primary.ID)}) if err != nil { return fmt.Errorf("Hosted zone err: %v", err) } + *zone = *resp.HostedZone + return nil + } +} + +func testAccLoadTagsR53(zone *route53.HostedZone, td *route53.ResourceTagSet) resource.TestCheckFunc { + return func(s *terraform.State) error { + conn := testAccProvider.Meta().(*AWSClient).r53conn + + zone := cleanZoneID(*zone.ID) + req := &route53.ListTagsForResourceRequest{ + ResourceID: aws.String(zone), + ResourceType: aws.String("hostedzone"), + } + + resp, err := conn.ListTagsForResource(req) + if err != nil { + return err + } + + if resp.ResourceTagSet != nil { + *td = *resp.ResourceTagSet + } + return nil } } @@ -116,5 +145,10 @@ func testAccCheckRoute53ZoneExists(n string) resource.TestCheckFunc { const testAccRoute53ZoneConfig = ` resource "aws_route53_zone" "main" { name = "hashicorp.com" + + tags { + foo = "bar" + Name = "tf-route53-tag-test" + } } ` diff --git a/builtin/providers/aws/resource_aws_s3_bucket.go b/builtin/providers/aws/resource_aws_s3_bucket.go index cb32d5fa3..a33f9b35f 100644 --- a/builtin/providers/aws/resource_aws_s3_bucket.go +++ b/builtin/providers/aws/resource_aws_s3_bucket.go @@ -88,14 +88,12 @@ func resourceAwsS3BucketRead(d *schema.ResourceData, meta interface{}) error { return err } - resp, err := s3conn.GetBucketTagging(&s3.GetBucketTaggingRequest{ - Bucket: aws.String(d.Id()), - }) + tagSet, err := getTagSetS3(s3conn, d.Id()) if err != nil { return err } - if err := d.Set("tags", tagsToMapS3(resp.TagSet)); err != nil { + if err := d.Set("tags", tagsToMapS3(tagSet)); err != nil { return err } diff --git a/builtin/providers/aws/resource_aws_security_group_test.go b/builtin/providers/aws/resource_aws_security_group_test.go index b1e4e8c82..067cda8a1 100644 --- a/builtin/providers/aws/resource_aws_security_group_test.go +++ b/builtin/providers/aws/resource_aws_security_group_test.go @@ -30,15 +30,15 @@ func TestAccAWSSecurityGroup_normal(t *testing.T) { resource.TestCheckResourceAttr( "aws_security_group.web", "description", "Used in the terraform acceptance tests"), resource.TestCheckResourceAttr( - "aws_security_group.web", "ingress.332851786.protocol", "tcp"), + "aws_security_group.web", "ingress.3629188364.protocol", "tcp"), resource.TestCheckResourceAttr( - "aws_security_group.web", "ingress.332851786.from_port", "80"), + "aws_security_group.web", "ingress.3629188364.from_port", "80"), resource.TestCheckResourceAttr( - "aws_security_group.web", "ingress.332851786.to_port", "8000"), + "aws_security_group.web", "ingress.3629188364.to_port", "8000"), resource.TestCheckResourceAttr( - "aws_security_group.web", "ingress.332851786.cidr_blocks.#", "1"), + "aws_security_group.web", "ingress.3629188364.cidr_blocks.#", "1"), resource.TestCheckResourceAttr( - "aws_security_group.web", "ingress.332851786.cidr_blocks.0", "10.0.0.0/8"), + "aws_security_group.web", "ingress.3629188364.cidr_blocks.0", "10.0.0.0/8"), ), }, }, @@ -116,25 +116,25 @@ func TestAccAWSSecurityGroup_vpc(t *testing.T) { resource.TestCheckResourceAttr( "aws_security_group.web", "description", "Used in the terraform acceptance tests"), resource.TestCheckResourceAttr( - "aws_security_group.web", "ingress.332851786.protocol", "tcp"), + "aws_security_group.web", "ingress.3629188364.protocol", "tcp"), resource.TestCheckResourceAttr( - "aws_security_group.web", "ingress.332851786.from_port", "80"), + "aws_security_group.web", "ingress.3629188364.from_port", "80"), resource.TestCheckResourceAttr( - "aws_security_group.web", "ingress.332851786.to_port", "8000"), + "aws_security_group.web", "ingress.3629188364.to_port", "8000"), resource.TestCheckResourceAttr( - "aws_security_group.web", "ingress.332851786.cidr_blocks.#", "1"), + "aws_security_group.web", "ingress.3629188364.cidr_blocks.#", "1"), resource.TestCheckResourceAttr( - "aws_security_group.web", "ingress.332851786.cidr_blocks.0", "10.0.0.0/8"), + "aws_security_group.web", "ingress.3629188364.cidr_blocks.0", "10.0.0.0/8"), resource.TestCheckResourceAttr( - "aws_security_group.web", "egress.332851786.protocol", "tcp"), + "aws_security_group.web", "egress.3629188364.protocol", "tcp"), resource.TestCheckResourceAttr( - "aws_security_group.web", "egress.332851786.from_port", "80"), + "aws_security_group.web", "egress.3629188364.from_port", "80"), resource.TestCheckResourceAttr( - "aws_security_group.web", "egress.332851786.to_port", "8000"), + "aws_security_group.web", "egress.3629188364.to_port", "8000"), resource.TestCheckResourceAttr( - "aws_security_group.web", "egress.332851786.cidr_blocks.#", "1"), + "aws_security_group.web", "egress.3629188364.cidr_blocks.#", "1"), resource.TestCheckResourceAttr( - "aws_security_group.web", "egress.332851786.cidr_blocks.0", "10.0.0.0/8"), + "aws_security_group.web", "egress.3629188364.cidr_blocks.0", "10.0.0.0/8"), testCheck, ), }, diff --git a/builtin/providers/aws/s3_tags.go b/builtin/providers/aws/s3_tags.go index 43678952b..4b8234b9b 100644 --- a/builtin/providers/aws/s3_tags.go +++ b/builtin/providers/aws/s3_tags.go @@ -110,3 +110,22 @@ func tagsToMapS3(ts []s3.Tag) map[string]string { return result } + +// return a slice of s3 tags associated with the given s3 bucket. Essentially +// s3.GetBucketTagging, except returns an empty slice instead of an error when +// there are no tags. +func getTagSetS3(s3conn *s3.S3, bucket string) ([]s3.Tag, error) { + request := &s3.GetBucketTaggingRequest{ + Bucket: aws.String(bucket), + } + + response, err := s3conn.GetBucketTagging(request) + if ec2err, ok := err.(aws.APIError); ok && ec2err.Code == "NoSuchTagSet" { + // There is no tag set associated with the bucket. + return []s3.Tag{}, nil + } else if err != nil { + return nil, err + } + + return response.TagSet, nil +} diff --git a/builtin/providers/aws/tags_route53.go b/builtin/providers/aws/tags_route53.go new file mode 100644 index 000000000..e5251d02a --- /dev/null +++ b/builtin/providers/aws/tags_route53.go @@ -0,0 +1,86 @@ +package aws + +import ( + "log" + + "github.com/hashicorp/aws-sdk-go/aws" + "github.com/hashicorp/aws-sdk-go/gen/route53" + "github.com/hashicorp/terraform/helper/schema" +) + +// setTags is a helper to set the tags for a resource. It expects the +// tags field to be named "tags" +func setTagsR53(conn *route53.Route53, d *schema.ResourceData) error { + if d.HasChange("tags") { + oraw, nraw := d.GetChange("tags") + o := oraw.(map[string]interface{}) + n := nraw.(map[string]interface{}) + create, remove := diffTagsR53(tagsFromMapR53(o), tagsFromMapR53(n)) + + // Set tags + r := make([]string, len(remove)) + for i, t := range remove { + r[i] = *t.Key + } + log.Printf("[DEBUG] Changing tags: \n\tadding: %#v\n\tremoving:%#v", create, remove) + req := &route53.ChangeTagsForResourceRequest{ + AddTags: create, + RemoveTagKeys: r, + ResourceID: aws.String(d.Id()), + ResourceType: aws.String("hostedzone"), + } + + _, err := conn.ChangeTagsForResource(req) + if err != nil { + return err + } + } + + return nil +} + +// diffTags takes our tags locally and the ones remotely and returns +// the set of tags that must be created, and the set of tags that must +// be destroyed. +func diffTagsR53(oldTags, newTags []route53.Tag) ([]route53.Tag, []route53.Tag) { + // First, we're creating everything we have + create := make(map[string]interface{}) + for _, t := range newTags { + create[*t.Key] = *t.Value + } + + // Build the list of what to remove + var remove []route53.Tag + for _, t := range oldTags { + old, ok := create[*t.Key] + if !ok || old != *t.Value { + // Delete it! + remove = append(remove, t) + } + } + + return tagsFromMapR53(create), remove +} + +// tagsFromMap returns the tags for the given map of data. +func tagsFromMapR53(m map[string]interface{}) []route53.Tag { + result := make([]route53.Tag, 0, len(m)) + for k, v := range m { + result = append(result, route53.Tag{ + Key: aws.String(k), + Value: aws.String(v.(string)), + }) + } + + return result +} + +// tagsToMap turns the list of tags into a map. +func tagsToMapR53(ts []route53.Tag) map[string]string { + result := make(map[string]string) + for _, t := range ts { + result[*t.Key] = *t.Value + } + + return result +} diff --git a/builtin/providers/aws/tags_route53_test.go b/builtin/providers/aws/tags_route53_test.go new file mode 100644 index 000000000..40a4154f3 --- /dev/null +++ b/builtin/providers/aws/tags_route53_test.go @@ -0,0 +1,85 @@ +package aws + +import ( + "fmt" + "reflect" + "testing" + + "github.com/hashicorp/aws-sdk-go/gen/route53" + "github.com/hashicorp/terraform/helper/resource" + "github.com/hashicorp/terraform/terraform" +) + +func TestDiffTagsR53(t *testing.T) { + cases := []struct { + Old, New map[string]interface{} + Create, Remove map[string]string + }{ + // Basic add/remove + { + Old: map[string]interface{}{ + "foo": "bar", + }, + New: map[string]interface{}{ + "bar": "baz", + }, + Create: map[string]string{ + "bar": "baz", + }, + Remove: map[string]string{ + "foo": "bar", + }, + }, + + // Modify + { + Old: map[string]interface{}{ + "foo": "bar", + }, + New: map[string]interface{}{ + "foo": "baz", + }, + Create: map[string]string{ + "foo": "baz", + }, + Remove: map[string]string{ + "foo": "bar", + }, + }, + } + + for i, tc := range cases { + c, r := diffTagsR53(tagsFromMapR53(tc.Old), tagsFromMapR53(tc.New)) + cm := tagsToMapR53(c) + rm := tagsToMapR53(r) + if !reflect.DeepEqual(cm, tc.Create) { + t.Fatalf("%d: bad create: %#v", i, cm) + } + if !reflect.DeepEqual(rm, tc.Remove) { + t.Fatalf("%d: bad remove: %#v", i, rm) + } + } +} + +// testAccCheckTags can be used to check the tags on a resource. +func testAccCheckTagsR53( + ts *[]route53.Tag, key string, value string) resource.TestCheckFunc { + return func(s *terraform.State) error { + m := tagsToMapR53(*ts) + v, ok := m[key] + if value != "" && !ok { + return fmt.Errorf("Missing tag: %s", key) + } else if value == "" && ok { + return fmt.Errorf("Extra tag: %s", key) + } + if value == "" { + return nil + } + + if v != value { + return fmt.Errorf("%s: bad value: %s", key, v) + } + + return nil + } +} diff --git a/builtin/providers/docker/config.go b/builtin/providers/docker/config.go new file mode 100644 index 000000000..199182744 --- /dev/null +++ b/builtin/providers/docker/config.go @@ -0,0 +1,33 @@ +package docker + +import ( + "path/filepath" + + dc "github.com/fsouza/go-dockerclient" +) + +// Config is the structure that stores the configuration to talk to a +// Docker API compatible host. +type Config struct { + Host string + CertPath string +} + +// NewClient() returns a new Docker client. +func (c *Config) NewClient() (*dc.Client, error) { + // If there is no cert information, then just return the direct client + if c.CertPath == "" { + return dc.NewClient(c.Host) + } + + // If there is cert information, load it and use it. + ca := filepath.Join(c.CertPath, "ca.pem") + cert := filepath.Join(c.CertPath, "cert.pem") + key := filepath.Join(c.CertPath, "key.pem") + return dc.NewTLSClient(c.Host, cert, key, ca) +} + +// Data ia structure for holding data that we fetch from Docker. +type Data struct { + DockerImages map[string]*dc.APIImages +} diff --git a/builtin/providers/docker/provider.go b/builtin/providers/docker/provider.go new file mode 100644 index 000000000..2fe456e93 --- /dev/null +++ b/builtin/providers/docker/provider.go @@ -0,0 +1,54 @@ +package docker + +import ( + "fmt" + + "github.com/hashicorp/terraform/helper/schema" + "github.com/hashicorp/terraform/terraform" +) + +func Provider() terraform.ResourceProvider { + return &schema.Provider{ + Schema: map[string]*schema.Schema{ + "host": &schema.Schema{ + Type: schema.TypeString, + Required: true, + DefaultFunc: schema.EnvDefaultFunc("DOCKER_HOST", "unix:/run/docker.sock"), + Description: "The Docker daemon address", + }, + + "cert_path": &schema.Schema{ + Type: schema.TypeString, + Optional: true, + DefaultFunc: schema.EnvDefaultFunc("DOCKER_CERT_PATH", nil), + Description: "Path to directory with Docker TLS config", + }, + }, + + ResourcesMap: map[string]*schema.Resource{ + "docker_container": resourceDockerContainer(), + "docker_image": resourceDockerImage(), + }, + + ConfigureFunc: providerConfigure, + } +} + +func providerConfigure(d *schema.ResourceData) (interface{}, error) { + config := Config{ + Host: d.Get("host").(string), + CertPath: d.Get("cert_path").(string), + } + + client, err := config.NewClient() + if err != nil { + return nil, fmt.Errorf("Error initializing Docker client: %s", err) + } + + err = client.Ping() + if err != nil { + return nil, fmt.Errorf("Error pinging Docker server: %s", err) + } + + return client, nil +} diff --git a/builtin/providers/docker/provider_test.go b/builtin/providers/docker/provider_test.go new file mode 100644 index 000000000..d09104889 --- /dev/null +++ b/builtin/providers/docker/provider_test.go @@ -0,0 +1,36 @@ +package docker + +import ( + "os/exec" + "testing" + + "github.com/hashicorp/terraform/helper/schema" + "github.com/hashicorp/terraform/terraform" +) + +var testAccProviders map[string]terraform.ResourceProvider +var testAccProvider *schema.Provider + +func init() { + testAccProvider = Provider().(*schema.Provider) + testAccProviders = map[string]terraform.ResourceProvider{ + "docker": testAccProvider, + } +} + +func TestProvider(t *testing.T) { + if err := Provider().(*schema.Provider).InternalValidate(); err != nil { + t.Fatalf("err: %s", err) + } +} + +func TestProvider_impl(t *testing.T) { + var _ terraform.ResourceProvider = Provider() +} + +func testAccPreCheck(t *testing.T) { + cmd := exec.Command("docker", "version") + if err := cmd.Run(); err != nil { + t.Fatalf("Docker must be available: %s", err) + } +} diff --git a/builtin/providers/docker/resource_docker_container.go b/builtin/providers/docker/resource_docker_container.go new file mode 100644 index 000000000..50b501ca2 --- /dev/null +++ b/builtin/providers/docker/resource_docker_container.go @@ -0,0 +1,222 @@ +package docker + +import ( + "bytes" + "fmt" + + "github.com/hashicorp/terraform/helper/hashcode" + "github.com/hashicorp/terraform/helper/schema" +) + +func resourceDockerContainer() *schema.Resource { + return &schema.Resource{ + Create: resourceDockerContainerCreate, + Read: resourceDockerContainerRead, + Update: resourceDockerContainerUpdate, + Delete: resourceDockerContainerDelete, + + Schema: map[string]*schema.Schema{ + "name": &schema.Schema{ + Type: schema.TypeString, + Required: true, + ForceNew: true, + }, + + // Indicates whether the container must be running. + // + // An assumption is made that configured containers + // should be running; if not, they should not be in + // the configuration. Therefore a stopped container + // should be started. Set to false to have the + // provider leave the container alone. + // + // Actively-debugged containers are likely to be + // stopped and started manually, and Docker has + // some provisions for restarting containers that + // stop. The utility here comes from the fact that + // this will delete and re-create the container + // following the principle that the containers + // should be pristine when started. + "must_run": &schema.Schema{ + Type: schema.TypeBool, + Default: true, + Optional: true, + }, + + // ForceNew is not true for image because we need to + // sane this against Docker image IDs, as each image + // can have multiple names/tags attached do it. + "image": &schema.Schema{ + Type: schema.TypeString, + Required: true, + ForceNew: true, + }, + + "hostname": &schema.Schema{ + Type: schema.TypeString, + Optional: true, + ForceNew: true, + }, + + "domainname": &schema.Schema{ + Type: schema.TypeString, + Optional: true, + ForceNew: true, + }, + + "command": &schema.Schema{ + Type: schema.TypeList, + Optional: true, + ForceNew: true, + Elem: &schema.Schema{Type: schema.TypeString}, + }, + + "dns": &schema.Schema{ + Type: schema.TypeSet, + Optional: true, + ForceNew: true, + Elem: &schema.Schema{Type: schema.TypeString}, + Set: stringSetHash, + }, + + "publish_all_ports": &schema.Schema{ + Type: schema.TypeBool, + Optional: true, + ForceNew: true, + }, + + "volumes": &schema.Schema{ + Type: schema.TypeSet, + Optional: true, + ForceNew: true, + Elem: getVolumesElem(), + Set: resourceDockerVolumesHash, + }, + + "ports": &schema.Schema{ + Type: schema.TypeSet, + Optional: true, + ForceNew: true, + Elem: getPortsElem(), + Set: resourceDockerPortsHash, + }, + + "env": &schema.Schema{ + Type: schema.TypeSet, + Optional: true, + ForceNew: true, + Elem: &schema.Schema{Type: schema.TypeString}, + Set: stringSetHash, + }, + }, + } +} + +func getVolumesElem() *schema.Resource { + return &schema.Resource{ + Schema: map[string]*schema.Schema{ + "from_container": &schema.Schema{ + Type: schema.TypeString, + Optional: true, + ForceNew: true, + }, + + "container_path": &schema.Schema{ + Type: schema.TypeString, + Optional: true, + ForceNew: true, + }, + + "host_path": &schema.Schema{ + Type: schema.TypeString, + Optional: true, + ForceNew: true, + }, + + "read_only": &schema.Schema{ + Type: schema.TypeBool, + Optional: true, + ForceNew: true, + }, + }, + } +} + +func getPortsElem() *schema.Resource { + return &schema.Resource{ + Schema: map[string]*schema.Schema{ + "internal": &schema.Schema{ + Type: schema.TypeInt, + Required: true, + ForceNew: true, + }, + + "external": &schema.Schema{ + Type: schema.TypeInt, + Optional: true, + ForceNew: true, + }, + + "ip": &schema.Schema{ + Type: schema.TypeString, + Optional: true, + ForceNew: true, + }, + + "protocol": &schema.Schema{ + Type: schema.TypeString, + Default: "tcp", + Optional: true, + ForceNew: true, + }, + }, + } +} + +func resourceDockerPortsHash(v interface{}) int { + var buf bytes.Buffer + m := v.(map[string]interface{}) + + buf.WriteString(fmt.Sprintf("%v-", m["internal"].(int))) + + if v, ok := m["external"]; ok { + buf.WriteString(fmt.Sprintf("%v-", v.(int))) + } + + if v, ok := m["ip"]; ok { + buf.WriteString(fmt.Sprintf("%v-", v.(string))) + } + + if v, ok := m["protocol"]; ok { + buf.WriteString(fmt.Sprintf("%v-", v.(string))) + } + + return hashcode.String(buf.String()) +} + +func resourceDockerVolumesHash(v interface{}) int { + var buf bytes.Buffer + m := v.(map[string]interface{}) + + if v, ok := m["from_container"]; ok { + buf.WriteString(fmt.Sprintf("%v-", v.(string))) + } + + if v, ok := m["container_path"]; ok { + buf.WriteString(fmt.Sprintf("%v-", v.(string))) + } + + if v, ok := m["host_path"]; ok { + buf.WriteString(fmt.Sprintf("%v-", v.(string))) + } + + if v, ok := m["read_only"]; ok { + buf.WriteString(fmt.Sprintf("%v-", v.(bool))) + } + + return hashcode.String(buf.String()) +} + +func stringSetHash(v interface{}) int { + return hashcode.String(v.(string)) +} diff --git a/builtin/providers/docker/resource_docker_container_funcs.go b/builtin/providers/docker/resource_docker_container_funcs.go new file mode 100644 index 000000000..17a8e4eed --- /dev/null +++ b/builtin/providers/docker/resource_docker_container_funcs.go @@ -0,0 +1,267 @@ +package docker + +import ( + "errors" + "fmt" + "strconv" + "strings" + + dc "github.com/fsouza/go-dockerclient" + "github.com/hashicorp/terraform/helper/schema" +) + +func resourceDockerContainerCreate(d *schema.ResourceData, meta interface{}) error { + var err error + client := meta.(*dc.Client) + + var data Data + if err := fetchLocalImages(&data, client); err != nil { + return err + } + + image := d.Get("image").(string) + if _, ok := data.DockerImages[image]; !ok { + if _, ok := data.DockerImages[image+":latest"]; !ok { + return fmt.Errorf("Unable to find image %s", image) + } else { + image = image + ":latest" + } + } + + // The awesome, wonderful, splendiferous, sensical + // Docker API now lets you specify a HostConfig in + // CreateContainerOptions, but in my testing it still only + // actually applies HostConfig options set in StartContainer. + // How cool is that? + createOpts := dc.CreateContainerOptions{ + Name: d.Get("name").(string), + Config: &dc.Config{ + Image: image, + Hostname: d.Get("hostname").(string), + Domainname: d.Get("domainname").(string), + }, + } + + if v, ok := d.GetOk("env"); ok { + createOpts.Config.Env = stringSetToStringSlice(v.(*schema.Set)) + } + + if v, ok := d.GetOk("command"); ok { + createOpts.Config.Cmd = stringListToStringSlice(v.([]interface{})) + } + + exposedPorts := map[dc.Port]struct{}{} + portBindings := map[dc.Port][]dc.PortBinding{} + + if v, ok := d.GetOk("ports"); ok { + exposedPorts, portBindings = portSetToDockerPorts(v.(*schema.Set)) + } + if len(exposedPorts) != 0 { + createOpts.Config.ExposedPorts = exposedPorts + } + + volumes := map[string]struct{}{} + binds := []string{} + volumesFrom := []string{} + + if v, ok := d.GetOk("volumes"); ok { + volumes, binds, volumesFrom, err = volumeSetToDockerVolumes(v.(*schema.Set)) + if err != nil { + return fmt.Errorf("Unable to parse volumes: %s", err) + } + } + if len(volumes) != 0 { + createOpts.Config.Volumes = volumes + } + + var retContainer *dc.Container + if retContainer, err = client.CreateContainer(createOpts); err != nil { + return fmt.Errorf("Unable to create container: %s", err) + } + if retContainer == nil { + return fmt.Errorf("Returned container is nil") + } + + d.SetId(retContainer.ID) + + hostConfig := &dc.HostConfig{ + PublishAllPorts: d.Get("publish_all_ports").(bool), + } + + if len(portBindings) != 0 { + hostConfig.PortBindings = portBindings + } + + if len(binds) != 0 { + hostConfig.Binds = binds + } + if len(volumesFrom) != 0 { + hostConfig.VolumesFrom = volumesFrom + } + + if v, ok := d.GetOk("dns"); ok { + hostConfig.DNS = stringSetToStringSlice(v.(*schema.Set)) + } + + if err := client.StartContainer(retContainer.ID, hostConfig); err != nil { + return fmt.Errorf("Unable to start container: %s", err) + } + + return resourceDockerContainerRead(d, meta) +} + +func resourceDockerContainerRead(d *schema.ResourceData, meta interface{}) error { + client := meta.(*dc.Client) + + apiContainer, err := fetchDockerContainer(d.Get("name").(string), client) + if err != nil { + return err + } + + if apiContainer == nil { + // This container doesn't exist anymore + d.SetId("") + + return nil + } + + container, err := client.InspectContainer(apiContainer.ID) + if err != nil { + return fmt.Errorf("Error inspecting container %s: %s", apiContainer.ID, err) + } + + if d.Get("must_run").(bool) && !container.State.Running { + return resourceDockerContainerDelete(d, meta) + } + + return nil +} + +func resourceDockerContainerUpdate(d *schema.ResourceData, meta interface{}) error { + return nil +} + +func resourceDockerContainerDelete(d *schema.ResourceData, meta interface{}) error { + client := meta.(*dc.Client) + + removeOpts := dc.RemoveContainerOptions{ + ID: d.Id(), + RemoveVolumes: true, + Force: true, + } + + if err := client.RemoveContainer(removeOpts); err != nil { + return fmt.Errorf("Error deleting container %s: %s", d.Id(), err) + } + + d.SetId("") + return nil +} + +func stringListToStringSlice(stringList []interface{}) []string { + ret := []string{} + for _, v := range stringList { + ret = append(ret, v.(string)) + } + return ret +} + +func stringSetToStringSlice(stringSet *schema.Set) []string { + ret := []string{} + if stringSet == nil { + return ret + } + for _, envVal := range stringSet.List() { + ret = append(ret, envVal.(string)) + } + return ret +} + +func fetchDockerContainer(name string, client *dc.Client) (*dc.APIContainers, error) { + apiContainers, err := client.ListContainers(dc.ListContainersOptions{All: true}) + + if err != nil { + return nil, fmt.Errorf("Error fetching container information from Docker: %s\n", err) + } + + for _, apiContainer := range apiContainers { + // Sometimes the Docker API prefixes container names with / + // like it does in these commands. But if there's no + // set name, it just uses the ID without a /...ugh. + var dockerContainerName string + if len(apiContainer.Names) > 0 { + dockerContainerName = strings.TrimLeft(apiContainer.Names[0], "/") + } else { + dockerContainerName = apiContainer.ID + } + + if dockerContainerName == name { + return &apiContainer, nil + } + } + + return nil, nil +} + +func portSetToDockerPorts(ports *schema.Set) (map[dc.Port]struct{}, map[dc.Port][]dc.PortBinding) { + retExposedPorts := map[dc.Port]struct{}{} + retPortBindings := map[dc.Port][]dc.PortBinding{} + + for _, portInt := range ports.List() { + port := portInt.(map[string]interface{}) + internal := port["internal"].(int) + protocol := port["protocol"].(string) + + exposedPort := dc.Port(strconv.Itoa(internal) + "/" + protocol) + retExposedPorts[exposedPort] = struct{}{} + + external, extOk := port["external"].(int) + ip, ipOk := port["ip"].(string) + + if extOk { + portBinding := dc.PortBinding{ + HostPort: strconv.Itoa(external), + } + if ipOk { + portBinding.HostIP = ip + } + retPortBindings[exposedPort] = append(retPortBindings[exposedPort], portBinding) + } + } + + return retExposedPorts, retPortBindings +} + +func volumeSetToDockerVolumes(volumes *schema.Set) (map[string]struct{}, []string, []string, error) { + retVolumeMap := map[string]struct{}{} + retHostConfigBinds := []string{} + retVolumeFromContainers := []string{} + + for _, volumeInt := range volumes.List() { + volume := volumeInt.(map[string]interface{}) + fromContainer := volume["from_container"].(string) + containerPath := volume["container_path"].(string) + hostPath := volume["host_path"].(string) + readOnly := volume["read_only"].(bool) + + switch { + case len(fromContainer) == 0 && len(containerPath) == 0: + return retVolumeMap, retHostConfigBinds, retVolumeFromContainers, errors.New("Volume entry without container path or source container") + case len(fromContainer) != 0 && len(containerPath) != 0: + return retVolumeMap, retHostConfigBinds, retVolumeFromContainers, errors.New("Both a container and a path specified in a volume entry") + case len(fromContainer) != 0: + retVolumeFromContainers = append(retVolumeFromContainers, fromContainer) + case len(hostPath) != 0: + readWrite := "rw" + if readOnly { + readWrite = "ro" + } + retVolumeMap[containerPath] = struct{}{} + retHostConfigBinds = append(retHostConfigBinds, hostPath+":"+containerPath+":"+readWrite) + default: + retVolumeMap[containerPath] = struct{}{} + } + } + + return retVolumeMap, retHostConfigBinds, retVolumeFromContainers, nil +} diff --git a/builtin/providers/docker/resource_docker_container_test.go b/builtin/providers/docker/resource_docker_container_test.go new file mode 100644 index 000000000..48302d096 --- /dev/null +++ b/builtin/providers/docker/resource_docker_container_test.go @@ -0,0 +1,63 @@ +package docker + +import ( + "fmt" + "testing" + + dc "github.com/fsouza/go-dockerclient" + "github.com/hashicorp/terraform/helper/resource" + "github.com/hashicorp/terraform/terraform" +) + +func TestAccDockerContainer_basic(t *testing.T) { + resource.Test(t, resource.TestCase{ + PreCheck: func() { testAccPreCheck(t) }, + Providers: testAccProviders, + Steps: []resource.TestStep{ + resource.TestStep{ + Config: testAccDockerContainerConfig, + Check: resource.ComposeTestCheckFunc( + testAccContainerRunning("docker_container.foo"), + ), + }, + }, + }) +} + +func testAccContainerRunning(n string) resource.TestCheckFunc { + return func(s *terraform.State) error { + rs, ok := s.RootModule().Resources[n] + if !ok { + return fmt.Errorf("Not found: %s", n) + } + + if rs.Primary.ID == "" { + return fmt.Errorf("No ID is set") + } + + client := testAccProvider.Meta().(*dc.Client) + containers, err := client.ListContainers(dc.ListContainersOptions{}) + if err != nil { + return err + } + + for _, c := range containers { + if c.ID == rs.Primary.ID { + return nil + } + } + + return fmt.Errorf("Container not found: %s", rs.Primary.ID) + } +} + +const testAccDockerContainerConfig = ` +resource "docker_image" "foo" { + name = "ubuntu:trusty-20150320" +} + +resource "docker_container" "foo" { + name = "tf-test" + image = "${docker_image.foo.latest}" +} +` diff --git a/builtin/providers/docker/resource_docker_image.go b/builtin/providers/docker/resource_docker_image.go new file mode 100644 index 000000000..54822d738 --- /dev/null +++ b/builtin/providers/docker/resource_docker_image.go @@ -0,0 +1,31 @@ +package docker + +import ( + "github.com/hashicorp/terraform/helper/schema" +) + +func resourceDockerImage() *schema.Resource { + return &schema.Resource{ + Create: resourceDockerImageCreate, + Read: resourceDockerImageRead, + Update: resourceDockerImageUpdate, + Delete: resourceDockerImageDelete, + + Schema: map[string]*schema.Schema{ + "name": &schema.Schema{ + Type: schema.TypeString, + Required: true, + }, + + "keep_updated": &schema.Schema{ + Type: schema.TypeBool, + Optional: true, + }, + + "latest": &schema.Schema{ + Type: schema.TypeString, + Computed: true, + }, + }, + } +} diff --git a/builtin/providers/docker/resource_docker_image_funcs.go b/builtin/providers/docker/resource_docker_image_funcs.go new file mode 100644 index 000000000..2c7470db0 --- /dev/null +++ b/builtin/providers/docker/resource_docker_image_funcs.go @@ -0,0 +1,173 @@ +package docker + +import ( + "fmt" + "strings" + + dc "github.com/fsouza/go-dockerclient" + "github.com/hashicorp/terraform/helper/schema" +) + +func resourceDockerImageCreate(d *schema.ResourceData, meta interface{}) error { + client := meta.(*dc.Client) + apiImage, err := findImage(d, client) + if err != nil { + return fmt.Errorf("Unable to read Docker image into resource: %s", err) + } + + d.SetId(apiImage.ID + d.Get("name").(string)) + d.Set("latest", apiImage.ID) + + return nil +} + +func resourceDockerImageRead(d *schema.ResourceData, meta interface{}) error { + client := meta.(*dc.Client) + apiImage, err := findImage(d, client) + if err != nil { + return fmt.Errorf("Unable to read Docker image into resource: %s", err) + } + + d.Set("latest", apiImage.ID) + + return nil +} + +func resourceDockerImageUpdate(d *schema.ResourceData, meta interface{}) error { + // We need to re-read in case switching parameters affects + // the value of "latest" or others + + return resourceDockerImageRead(d, meta) +} + +func resourceDockerImageDelete(d *schema.ResourceData, meta interface{}) error { + d.SetId("") + return nil +} + +func fetchLocalImages(data *Data, client *dc.Client) error { + images, err := client.ListImages(dc.ListImagesOptions{All: false}) + if err != nil { + return fmt.Errorf("Unable to list Docker images: %s", err) + } + + if data.DockerImages == nil { + data.DockerImages = make(map[string]*dc.APIImages) + } + + // Docker uses different nomenclatures in different places...sometimes a short + // ID, sometimes long, etc. So we store both in the map so we can always find + // the same image object. We store the tags, too. + for i, image := range images { + data.DockerImages[image.ID[:12]] = &images[i] + data.DockerImages[image.ID] = &images[i] + for _, repotag := range image.RepoTags { + data.DockerImages[repotag] = &images[i] + } + } + + return nil +} + +func pullImage(data *Data, client *dc.Client, image string) error { + // TODO: Test local registry handling. It should be working + // based on the code that was ported over + + pullOpts := dc.PullImageOptions{} + + splitImageName := strings.Split(image, ":") + switch { + + // It's in registry:port/repo:tag format + case len(splitImageName) == 3: + splitPortRepo := strings.Split(splitImageName[1], "/") + pullOpts.Registry = splitImageName[0] + ":" + splitPortRepo[0] + pullOpts.Repository = splitPortRepo[1] + pullOpts.Tag = splitImageName[2] + + // It's either registry:port/repo or repo:tag with default registry + case len(splitImageName) == 2: + splitPortRepo := strings.Split(splitImageName[1], "/") + switch len(splitPortRepo) { + + // registry:port/repo + case 2: + pullOpts.Registry = splitImageName[0] + ":" + splitPortRepo[0] + pullOpts.Repository = splitPortRepo[1] + pullOpts.Tag = "latest" + + // repo:tag + case 1: + pullOpts.Repository = splitImageName[0] + pullOpts.Tag = splitImageName[1] + } + + default: + pullOpts.Repository = image + } + + if err := client.PullImage(pullOpts, dc.AuthConfiguration{}); err != nil { + return fmt.Errorf("Error pulling image %s: %s\n", image, err) + } + + return fetchLocalImages(data, client) +} + +func getImageTag(image string) string { + splitImageName := strings.Split(image, ":") + switch { + + // It's in registry:port/repo:tag format + case len(splitImageName) == 3: + return splitImageName[2] + + // It's either registry:port/repo or repo:tag with default registry + case len(splitImageName) == 2: + splitPortRepo := strings.Split(splitImageName[1], "/") + if len(splitPortRepo) == 2 { + return "" + } else { + return splitImageName[1] + } + } + + return "" +} + +func findImage(d *schema.ResourceData, client *dc.Client) (*dc.APIImages, error) { + var data Data + if err := fetchLocalImages(&data, client); err != nil { + return nil, err + } + + imageName := d.Get("name").(string) + if imageName == "" { + return nil, fmt.Errorf("Empty image name is not allowed") + } + + searchLocal := func() *dc.APIImages { + if apiImage, ok := data.DockerImages[imageName]; ok { + return apiImage + } + if apiImage, ok := data.DockerImages[imageName+":latest"]; ok { + imageName = imageName + ":latest" + return apiImage + } + return nil + } + + foundImage := searchLocal() + + if d.Get("keep_updated").(bool) || foundImage == nil { + if err := pullImage(&data, client, imageName); err != nil { + return nil, fmt.Errorf("Unable to pull image %s: %s", imageName, err) + } + } + + foundImage = searchLocal() + if foundImage != nil { + return foundImage, nil + } + + return nil, fmt.Errorf("Unable to find or pull image %s", imageName) +} diff --git a/builtin/providers/docker/resource_docker_image_test.go b/builtin/providers/docker/resource_docker_image_test.go new file mode 100644 index 000000000..d43c81efc --- /dev/null +++ b/builtin/providers/docker/resource_docker_image_test.go @@ -0,0 +1,32 @@ +package docker + +import ( + "testing" + + "github.com/hashicorp/terraform/helper/resource" +) + +func TestAccDockerImage_basic(t *testing.T) { + resource.Test(t, resource.TestCase{ + PreCheck: func() { testAccPreCheck(t) }, + Providers: testAccProviders, + Steps: []resource.TestStep{ + resource.TestStep{ + Config: testAccDockerImageConfig, + Check: resource.ComposeTestCheckFunc( + resource.TestCheckResourceAttr( + "docker_image.foo", + "latest", + "d0955f21bf24f5bfffd32d2d0bb669d0564701c271bc3dfc64cfc5adfdec2d07"), + ), + }, + }, + }) +} + +const testAccDockerImageConfig = ` +resource "docker_image" "foo" { + name = "ubuntu:trusty-20150320" + keep_updated = true +} +` diff --git a/builtin/providers/openstack/config.go b/builtin/providers/openstack/config.go new file mode 100644 index 000000000..d05662017 --- /dev/null +++ b/builtin/providers/openstack/config.go @@ -0,0 +1,67 @@ +package openstack + +import ( + "github.com/rackspace/gophercloud" + "github.com/rackspace/gophercloud/openstack" +) + +type Config struct { + Username string + UserID string + Password string + APIKey string + IdentityEndpoint string + TenantID string + TenantName string + DomainID string + DomainName string + + osClient *gophercloud.ProviderClient +} + +func (c *Config) loadAndValidate() error { + ao := gophercloud.AuthOptions{ + Username: c.Username, + UserID: c.UserID, + Password: c.Password, + APIKey: c.APIKey, + IdentityEndpoint: c.IdentityEndpoint, + TenantID: c.TenantID, + TenantName: c.TenantName, + DomainID: c.DomainID, + DomainName: c.DomainName, + } + + client, err := openstack.AuthenticatedClient(ao) + if err != nil { + return err + } + + c.osClient = client + + return nil +} + +func (c *Config) blockStorageV1Client(region string) (*gophercloud.ServiceClient, error) { + return openstack.NewBlockStorageV1(c.osClient, gophercloud.EndpointOpts{ + Region: region, + }) +} + +func (c *Config) computeV2Client(region string) (*gophercloud.ServiceClient, error) { + return openstack.NewComputeV2(c.osClient, gophercloud.EndpointOpts{ + Region: region, + }) +} + +func (c *Config) networkingV2Client(region string) (*gophercloud.ServiceClient, error) { + return openstack.NewNetworkV2(c.osClient, gophercloud.EndpointOpts{ + Region: region, + }) +} + +func (c *Config) objectStorageV1Client(region string) (*gophercloud.ServiceClient, error) { + return openstack.NewObjectStorageV1(c.osClient, gophercloud.EndpointOpts{ + Region: region, + }) +} diff --git a/builtin/providers/openstack/provider.go b/builtin/providers/openstack/provider.go new file mode 100644 index 000000000..a43242333 --- /dev/null +++ b/builtin/providers/openstack/provider.go @@ -0,0 +1,113 @@ +package openstack + +import ( + "os" + + "github.com/hashicorp/terraform/helper/schema" + "github.com/hashicorp/terraform/terraform" +) + +// Provider returns a schema.Provider for OpenStack. +func Provider() terraform.ResourceProvider { + return &schema.Provider{ + Schema: map[string]*schema.Schema{ + "auth_url": &schema.Schema{ + Type: schema.TypeString, + Required: true, + DefaultFunc: envDefaultFunc("OS_AUTH_URL"), + }, + "user_name": &schema.Schema{ + Type: schema.TypeString, + Optional: true, + DefaultFunc: envDefaultFunc("OS_USERNAME"), + }, + "user_id": &schema.Schema{ + Type: schema.TypeString, + Optional: true, + Default: "", + }, + "tenant_id": &schema.Schema{ + Type: schema.TypeString, + Optional: true, + Default: "", + }, + "tenant_name": &schema.Schema{ + Type: schema.TypeString, + Optional: true, + DefaultFunc: envDefaultFunc("OS_TENANT_NAME"), + }, + "password": &schema.Schema{ + Type: schema.TypeString, + Optional: true, + DefaultFunc: envDefaultFunc("OS_PASSWORD"), + }, + "api_key": &schema.Schema{ + Type: schema.TypeString, + Optional: true, + Default: "", + }, + "domain_id": &schema.Schema{ + Type: schema.TypeString, + Optional: true, + Default: "", + }, + "domain_name": &schema.Schema{ + Type: schema.TypeString, + Optional: true, + Default: "", + }, + }, + + ResourcesMap: map[string]*schema.Resource{ + "openstack_blockstorage_volume_v1": resourceBlockStorageVolumeV1(), + "openstack_compute_instance_v2": resourceComputeInstanceV2(), + "openstack_compute_keypair_v2": resourceComputeKeypairV2(), + "openstack_compute_secgroup_v2": resourceComputeSecGroupV2(), + "openstack_compute_floatingip_v2": resourceComputeFloatingIPV2(), + "openstack_fw_firewall_v1": resourceFWFirewallV1(), + "openstack_fw_policy_v1": resourceFWPolicyV1(), + "openstack_fw_rule_v1": resourceFWRuleV1(), + "openstack_lb_monitor_v1": resourceLBMonitorV1(), + "openstack_lb_pool_v1": resourceLBPoolV1(), + "openstack_lb_vip_v1": resourceLBVipV1(), + "openstack_networking_network_v2": resourceNetworkingNetworkV2(), + "openstack_networking_subnet_v2": resourceNetworkingSubnetV2(), + "openstack_networking_floatingip_v2": resourceNetworkingFloatingIPV2(), + "openstack_networking_router_v2": resourceNetworkingRouterV2(), + "openstack_networking_router_interface_v2": resourceNetworkingRouterInterfaceV2(), + "openstack_objectstorage_container_v1": resourceObjectStorageContainerV1(), + }, + + ConfigureFunc: configureProvider, + } +} + +func configureProvider(d *schema.ResourceData) (interface{}, error) { + config := Config{ + IdentityEndpoint: d.Get("auth_url").(string), + Username: d.Get("user_name").(string), + UserID: d.Get("user_id").(string), + Password: d.Get("password").(string), + APIKey: d.Get("api_key").(string), + TenantID: d.Get("tenant_id").(string), + TenantName: d.Get("tenant_name").(string), + DomainID: d.Get("domain_id").(string), + DomainName: d.Get("domain_name").(string), + } + + if err := config.loadAndValidate(); err != nil { + return nil, err + } + + return &config, nil +} + +func envDefaultFunc(k string) schema.SchemaDefaultFunc { + return func() (interface{}, error) { + if v := os.Getenv(k); v != "" { + return v, nil + } + + return nil, nil + } +} diff --git a/builtin/providers/openstack/provider_test.go b/builtin/providers/openstack/provider_test.go new file mode 100644 index 000000000..7b3e65dd4 --- /dev/null +++ b/builtin/providers/openstack/provider_test.go @@ -0,0 +1,66 @@ +package openstack + +import ( + "os" + "testing" + + "github.com/hashicorp/terraform/helper/schema" + "github.com/hashicorp/terraform/terraform" +) + +var ( + OS_REGION_NAME = "" + OS_POOL_NAME = "" +) + +var testAccProviders map[string]terraform.ResourceProvider +var testAccProvider *schema.Provider + +func init() { + testAccProvider = Provider().(*schema.Provider) + testAccProviders = map[string]terraform.ResourceProvider{ + "openstack": testAccProvider, + } +} + +func TestProvider(t *testing.T) { + if err := Provider().(*schema.Provider).InternalValidate(); err != nil { + t.Fatalf("err: %s", err) + } +} + +func TestProvider_impl(t *testing.T) { + var _ terraform.ResourceProvider = Provider() +} + +func testAccPreCheck(t *testing.T) { + v := os.Getenv("OS_AUTH_URL") + if v == "" { + t.Fatal("OS_AUTH_URL must be set for acceptance tests") + } + + v = os.Getenv("OS_REGION_NAME") + if v == "" { + t.Fatal("OS_REGION_NAME must be set for acceptance tests") + } + OS_REGION_NAME = v + + v1 := os.Getenv("OS_IMAGE_ID") + v2 := os.Getenv("OS_IMAGE_NAME") + + if v1 == "" && v2 == "" { + t.Fatal("OS_IMAGE_ID or OS_IMAGE_NAME must be set for acceptance tests") + } + + v = os.Getenv("OS_POOL_NAME") + if v == "" { + t.Fatal("OS_POOL_NAME must be set for acceptance tests") + } + OS_POOL_NAME = v + + v1 = os.Getenv("OS_FLAVOR_ID") + v2 = os.Getenv("OS_FLAVOR_NAME") + if v1 == "" && v2 == "" { + t.Fatal("OS_FLAVOR_ID or OS_FLAVOR_NAME must be set for acceptance tests") + } +} diff --git a/builtin/providers/openstack/resource_openstack_blockstorage_volume_v1.go b/builtin/providers/openstack/resource_openstack_blockstorage_volume_v1.go new file mode 100644 index 000000000..dc2638590 --- /dev/null +++ b/builtin/providers/openstack/resource_openstack_blockstorage_volume_v1.go @@ -0,0 +1,314 @@ +package openstack + +import ( + "bytes" + "fmt" + "log" + "time" + + "github.com/hashicorp/terraform/helper/hashcode" + "github.com/hashicorp/terraform/helper/resource" + "github.com/hashicorp/terraform/helper/schema" + "github.com/rackspace/gophercloud" + "github.com/rackspace/gophercloud/openstack/blockstorage/v1/volumes" + "github.com/rackspace/gophercloud/openstack/compute/v2/extensions/volumeattach" +) + +func resourceBlockStorageVolumeV1() *schema.Resource { + return &schema.Resource{ + Create: resourceBlockStorageVolumeV1Create, + Read: resourceBlockStorageVolumeV1Read, + Update: resourceBlockStorageVolumeV1Update, + Delete: resourceBlockStorageVolumeV1Delete, + + Schema: map[string]*schema.Schema{ + "region": &schema.Schema{ + Type: schema.TypeString, + Required: true, + ForceNew: true, + DefaultFunc: envDefaultFunc("OS_REGION_NAME"), + }, + "size": &schema.Schema{ + Type: schema.TypeInt, + Required: true, + ForceNew: true, + }, + "name": &schema.Schema{ + Type: schema.TypeString, + Optional: true, + ForceNew: false, + }, + "description": &schema.Schema{ + Type: schema.TypeString, + Optional: true, + ForceNew: false, + }, + "metadata": &schema.Schema{ + Type: schema.TypeMap, + Optional: true, + ForceNew: false, + }, + "snapshot_id": &schema.Schema{ + Type: schema.TypeString, + Optional: true, + ForceNew: true, + }, + "source_vol_id": &schema.Schema{ + Type: schema.TypeString, + Optional: true, + ForceNew: true, + }, + "image_id": &schema.Schema{ + Type: schema.TypeString, + Optional: true, + ForceNew: true, + }, + "volume_type": &schema.Schema{ + Type: schema.TypeString, + Optional: true, + ForceNew: true, + }, + "attachment": &schema.Schema{ + Type: schema.TypeSet, + Computed: true, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "id": &schema.Schema{ + Type: schema.TypeString, + Computed: true, + }, + "instance_id": &schema.Schema{ + Type: schema.TypeString, + Computed: true, + }, + "device": &schema.Schema{ + Type: schema.TypeString, + Computed: true, + }, + }, + }, + Set: resourceVolumeAttachmentHash, + }, + }, + } +} + +func resourceBlockStorageVolumeV1Create(d *schema.ResourceData, meta interface{}) error { + config := meta.(*Config) + blockStorageClient, err := config.blockStorageV1Client(d.Get("region").(string)) + if err != nil { + return fmt.Errorf("Error creating OpenStack block storage client: %s", err) + } + + createOpts := &volumes.CreateOpts{ + Description: d.Get("description").(string), + Name: d.Get("name").(string), + Size: d.Get("size").(int), + SnapshotID: d.Get("snapshot_id").(string), + SourceVolID: d.Get("source_vol_id").(string), + ImageID: d.Get("image_id").(string), + VolumeType: d.Get("volume_type").(string), + Metadata: resourceContainerMetadataV2(d), + } + + log.Printf("[DEBUG] Create Options: %#v", createOpts) + v, err := volumes.Create(blockStorageClient, createOpts).Extract() + if err != nil { + return fmt.Errorf("Error creating OpenStack volume: %s", err) + } + log.Printf("[INFO] Volume ID: %s", v.ID) + + // Store the ID now + d.SetId(v.ID) + + // Wait for the volume to become available. + log.Printf( + "[DEBUG] Waiting for volume (%s) to become available", + v.ID) + + stateConf := &resource.StateChangeConf{ + Target: "available", + Refresh: VolumeV1StateRefreshFunc(blockStorageClient, v.ID), + Timeout: 10 * time.Minute, + Delay: 10 * time.Second, + MinTimeout: 3 * time.Second, + } + + _, err = stateConf.WaitForState() + if err != nil { + return fmt.Errorf( + "Error waiting for volume (%s) to become ready: %s", + v.ID, err) + } + + return resourceBlockStorageVolumeV1Read(d, meta) +} + +func resourceBlockStorageVolumeV1Read(d *schema.ResourceData, meta interface{}) error { + config := meta.(*Config) + blockStorageClient, err := config.blockStorageV1Client(d.Get("region").(string)) + if err != nil { + return fmt.Errorf("Error creating OpenStack block storage client: %s", err) + } + + v, err := volumes.Get(blockStorageClient, d.Id()).Extract() + if err != nil { + return CheckDeleted(d, err, "volume") + } + + log.Printf("[DEBUG] Retreived volume %s: %+v", d.Id(), v) + + d.Set("size", v.Size) + d.Set("description", v.Description) + d.Set("name", v.Name) + d.Set("snapshot_id", v.SnapshotID) + d.Set("source_vol_id", v.SourceVolID) + d.Set("volume_type", v.VolumeType) + d.Set("metadata", v.Metadata) + + if len(v.Attachments) > 0 { + attachments := make([]map[string]interface{}, len(v.Attachments)) + for i, attachment := range v.Attachments { + attachments[i] = make(map[string]interface{}) + attachments[i]["id"] = attachment["id"] + attachments[i]["instance_id"] = attachment["server_id"] + attachments[i]["device"] = attachment["device"] + log.Printf("[DEBUG] attachment: %v", attachment) + } + d.Set("attachment", attachments) + } + + return nil +} + +func resourceBlockStorageVolumeV1Update(d *schema.ResourceData, meta interface{}) error { + config := meta.(*Config) + blockStorageClient, err := config.blockStorageV1Client(d.Get("region").(string)) + if err != nil { + return fmt.Errorf("Error creating OpenStack block storage client: %s", err) + } + + updateOpts := volumes.UpdateOpts{ + Name: d.Get("name").(string), + Description: d.Get("description").(string), + } + + if d.HasChange("metadata") { + updateOpts.Metadata = resourceVolumeMetadataV1(d) + } + + _, err = volumes.Update(blockStorageClient, d.Id(), updateOpts).Extract() + if err != nil { + return fmt.Errorf("Error updating OpenStack volume: %s", err) + } + + return resourceBlockStorageVolumeV1Read(d, meta) +} + +func resourceBlockStorageVolumeV1Delete(d *schema.ResourceData, meta interface{}) error { + config := meta.(*Config) + blockStorageClient, err := config.blockStorageV1Client(d.Get("region").(string)) + if err != nil { + return fmt.Errorf("Error creating OpenStack block storage client: %s", err) + } + + v, err := volumes.Get(blockStorageClient, d.Id()).Extract() + if err != nil { + return CheckDeleted(d, err, "volume") + } + + // make sure this volume is detached from all instances before deleting + if len(v.Attachments) > 0 { + log.Printf("[DEBUG] detaching volumes") + if computeClient, err := config.computeV2Client(d.Get("region").(string)); err != nil { + return err + } else { + for _, volumeAttachment := range v.Attachments { + log.Printf("[DEBUG] Attachment: %v", volumeAttachment) + if err := volumeattach.Delete(computeClient, volumeAttachment["server_id"].(string), volumeAttachment["id"].(string)).ExtractErr(); err != nil { + return err + } + } + + stateConf := &resource.StateChangeConf{ + Pending: []string{"in-use", "attaching"}, + Target: "available", + Refresh: VolumeV1StateRefreshFunc(blockStorageClient, d.Id()), + Timeout: 10 * time.Minute, + Delay: 10 * time.Second, + MinTimeout: 3 * time.Second, + } + + _, err = stateConf.WaitForState() + if err != nil { + return fmt.Errorf( + "Error waiting for volume (%s) to become available: %s", + d.Id(), err) + } + } + } + + err = volumes.Delete(blockStorageClient, d.Id()).ExtractErr() + if err != nil { + return fmt.Errorf("Error deleting OpenStack volume: %s", err) + } + + // Wait for the volume to delete before moving on. + log.Printf("[DEBUG] Waiting for volume (%s) to delete", d.Id()) + + stateConf := &resource.StateChangeConf{ + Pending: []string{"deleting", "available"}, + Target: "deleted", + Refresh: VolumeV1StateRefreshFunc(blockStorageClient, d.Id()), + Timeout: 10 * time.Minute, + Delay: 10 * time.Second, + MinTimeout: 3 * time.Second, + } + + _, err = stateConf.WaitForState() + if err != nil { + return fmt.Errorf( + "Error waiting for volume (%s) to delete: %s", + d.Id(), err) + } + + d.SetId("") + return nil +} + +func resourceVolumeMetadataV1(d *schema.ResourceData) map[string]string { + m := make(map[string]string) + for key, val := range d.Get("metadata").(map[string]interface{}) { + m[key] = val.(string) + } + return m +} + +// VolumeV1StateRefreshFunc returns a resource.StateRefreshFunc that is used to watch +// an OpenStack volume. +func VolumeV1StateRefreshFunc(client *gophercloud.ServiceClient, volumeID string) resource.StateRefreshFunc { + return func() (interface{}, string, error) { + v, err := volumes.Get(client, volumeID).Extract() + if err != nil { + errCode, ok := err.(*gophercloud.UnexpectedResponseCodeError) + if !ok { + return nil, "", err + } + if errCode.Actual == 404 { + return v, "deleted", nil + } + return nil, "", err + } + + return v, v.Status, nil + } +} + +func resourceVolumeAttachmentHash(v interface{}) int { + var buf bytes.Buffer + m := v.(map[string]interface{}) + if m["instance_id"] != nil { + buf.WriteString(fmt.Sprintf("%s-", m["instance_id"].(string))) + } + return hashcode.String(buf.String()) +} diff --git a/builtin/providers/openstack/resource_openstack_blockstorage_volume_v1_test.go b/builtin/providers/openstack/resource_openstack_blockstorage_volume_v1_test.go new file mode 100644 index 000000000..5404fd391 --- /dev/null +++ b/builtin/providers/openstack/resource_openstack_blockstorage_volume_v1_test.go @@ -0,0 +1,138 @@ +package openstack + +import ( + "fmt" + "testing" + + "github.com/hashicorp/terraform/helper/resource" + "github.com/hashicorp/terraform/terraform" + + "github.com/rackspace/gophercloud/openstack/blockstorage/v1/volumes" +) + +func TestAccBlockStorageV1Volume_basic(t *testing.T) { + var volume volumes.Volume + + resource.Test(t, resource.TestCase{ + PreCheck: func() { testAccPreCheck(t) }, + Providers: testAccProviders, + CheckDestroy: testAccCheckBlockStorageV1VolumeDestroy, + Steps: []resource.TestStep{ + resource.TestStep{ + Config: testAccBlockStorageV1Volume_basic, + Check: resource.ComposeTestCheckFunc( + testAccCheckBlockStorageV1VolumeExists(t, "openstack_blockstorage_volume_v1.volume_1", &volume), + resource.TestCheckResourceAttr("openstack_blockstorage_volume_v1.volume_1", "name", "tf-test-volume"), + testAccCheckBlockStorageV1VolumeMetadata(&volume, "foo", "bar"), + ), + }, + resource.TestStep{ + Config: testAccBlockStorageV1Volume_update, + Check: resource.ComposeTestCheckFunc( + resource.TestCheckResourceAttr("openstack_blockstorage_volume_v1.volume_1", "name", "tf-test-volume-updated"), + testAccCheckBlockStorageV1VolumeMetadata(&volume, "foo", "bar"), + ), + }, + }, + }) +} + +func testAccCheckBlockStorageV1VolumeDestroy(s *terraform.State) error { + config := testAccProvider.Meta().(*Config) + blockStorageClient, err := config.blockStorageV1Client(OS_REGION_NAME) + if err != nil { + return fmt.Errorf("Error creating OpenStack block storage client: %s", err) + } + + for _, rs := range s.RootModule().Resources { + if rs.Type != "openstack_blockstorage_volume_v1" { + continue + } + + _, err := volumes.Get(blockStorageClient, rs.Primary.ID).Extract() + if err == nil { + return fmt.Errorf("Volume still exists") + } + } + + return nil +} + +func testAccCheckBlockStorageV1VolumeExists(t *testing.T, n string, volume *volumes.Volume) resource.TestCheckFunc { + return func(s *terraform.State) error { + rs, ok := s.RootModule().Resources[n] + if !ok { + return fmt.Errorf("Not found: %s", n) + } + + if rs.Primary.ID == "" { + return fmt.Errorf("No ID is set") + } + + config := testAccProvider.Meta().(*Config) + blockStorageClient, err := config.blockStorageV1Client(OS_REGION_NAME) + if err != nil { + return fmt.Errorf("Error creating OpenStack block storage client: %s", err) + } + + found, err := volumes.Get(blockStorageClient, rs.Primary.ID).Extract() + if err != nil { + return err + } + + if found.ID != rs.Primary.ID { + return fmt.Errorf("Volume not found") + } + + *volume = *found + + return nil + } +} + +func testAccCheckBlockStorageV1VolumeMetadata( + volume *volumes.Volume, k string, v string) resource.TestCheckFunc { + return func(s *terraform.State) error { + if volume.Metadata == nil { + return fmt.Errorf("No metadata") + } + + for key, value := range volume.Metadata { + if k != key { + continue + } + + if v == value { + return nil + } + + return fmt.Errorf("Bad value for %s: %s", k, value) + } + + return fmt.Errorf("Metadata not found: %s", k) + } +} + +var testAccBlockStorageV1Volume_basic = fmt.Sprintf(` + resource "openstack_blockstorage_volume_v1" "volume_1" { + region = "%s" + name = "tf-test-volume" + description = "first test volume" + metadata{ + foo = "bar" + } + size = 1 + }`, + OS_REGION_NAME) + +var testAccBlockStorageV1Volume_update = fmt.Sprintf(` + resource "openstack_blockstorage_volume_v1" "volume_1" { + region = "%s" + name = "tf-test-volume-updated" + description = "first test volume" + metadata{ + foo = "bar" + } + size = 1 + }`, + OS_REGION_NAME) diff --git a/builtin/providers/openstack/resource_openstack_compute_floatingip_v2.go b/builtin/providers/openstack/resource_openstack_compute_floatingip_v2.go new file mode 100644 index 000000000..bb2facc4e --- /dev/null +++ b/builtin/providers/openstack/resource_openstack_compute_floatingip_v2.go @@ -0,0 +1,107 @@ +package openstack + +import ( + "fmt" + "log" + + "github.com/hashicorp/terraform/helper/schema" + "github.com/rackspace/gophercloud/openstack/compute/v2/extensions/floatingip" +) + +func resourceComputeFloatingIPV2() *schema.Resource { + return &schema.Resource{ + Create: resourceComputeFloatingIPV2Create, + Read: resourceComputeFloatingIPV2Read, + Update: nil, + Delete: resourceComputeFloatingIPV2Delete, + + Schema: map[string]*schema.Schema{ + "region": &schema.Schema{ + Type: schema.TypeString, + Required: true, + ForceNew: true, + DefaultFunc: envDefaultFunc("OS_REGION_NAME"), + }, + + "pool": &schema.Schema{ + Type: schema.TypeString, + Required: true, + ForceNew: true, + DefaultFunc: envDefaultFunc("OS_POOL_NAME"), + }, + + "address": &schema.Schema{ + Type: schema.TypeString, + Computed: true, + }, + + "fixed_ip": &schema.Schema{ + Type: schema.TypeString, + Computed: true, + }, + + "instance_id": &schema.Schema{ + Type: schema.TypeString, + Computed: true, + }, + }, + } +} + +func resourceComputeFloatingIPV2Create(d *schema.ResourceData, meta interface{}) error { + config := meta.(*Config) + computeClient, err := config.computeV2Client(d.Get("region").(string)) + if err != nil { + return fmt.Errorf("Error creating OpenStack compute client: %s", err) + } + + createOpts := &floatingip.CreateOpts{ + Pool: d.Get("pool").(string), + } + log.Printf("[DEBUG] Create Options: %#v", createOpts) + newFip, err := floatingip.Create(computeClient, createOpts).Extract() + if err != nil { + return fmt.Errorf("Error creating Floating IP: %s", err) + } + + d.SetId(newFip.ID) + + return resourceComputeFloatingIPV2Read(d, meta) +} + +func resourceComputeFloatingIPV2Read(d *schema.ResourceData, meta interface{}) error { + config := meta.(*Config) + computeClient, err := config.computeV2Client(d.Get("region").(string)) + if err != nil { + return fmt.Errorf("Error creating OpenStack compute client: %s", err) + } + + fip, err := floatingip.Get(computeClient, d.Id()).Extract() + if err != nil { + return CheckDeleted(d, err, "floating ip") + } + + log.Printf("[DEBUG] Retrieved Floating IP %s: %+v", d.Id(), fip) + + d.Set("pool", fip.Pool) + d.Set("instance_id", fip.InstanceID) + d.Set("address", fip.IP) + d.Set("fixed_ip", fip.FixedIP) + + return nil +} + +func resourceComputeFloatingIPV2Delete(d *schema.ResourceData, meta interface{}) error { + config := meta.(*Config) + computeClient, err := config.computeV2Client(d.Get("region").(string)) + if err != nil { + return fmt.Errorf("Error creating OpenStack compute client: %s", err) + } + + log.Printf("[DEBUG] Deleting Floating IP %s", d.Id()) + if err := floatingip.Delete(computeClient, d.Id()).ExtractErr(); err != nil { + return fmt.Errorf("Error deleting Floating IP: %s", err) + } + + return nil +} diff --git a/builtin/providers/openstack/resource_openstack_compute_floatingip_v2_test.go b/builtin/providers/openstack/resource_openstack_compute_floatingip_v2_test.go new file mode 100644 index 000000000..a298a87d1 --- /dev/null +++ b/builtin/providers/openstack/resource_openstack_compute_floatingip_v2_test.go @@ -0,0 +1,91 @@ +package openstack + +import ( + "fmt" + "testing" + + "github.com/hashicorp/terraform/helper/resource" + "github.com/hashicorp/terraform/terraform" + + "github.com/rackspace/gophercloud/openstack/compute/v2/extensions/floatingip" +) + +func TestAccComputeV2FloatingIP_basic(t *testing.T) { + var floatingIP floatingip.FloatingIP + + resource.Test(t, resource.TestCase{ + PreCheck: func() { testAccPreCheck(t) }, + Providers: testAccProviders, + CheckDestroy: testAccCheckComputeV2FloatingIPDestroy, + Steps: []resource.TestStep{ + resource.TestStep{ + Config: testAccComputeV2FloatingIP_basic, + Check: resource.ComposeTestCheckFunc( + testAccCheckComputeV2FloatingIPExists(t, "openstack_compute_floatingip_v2.foo", &floatingIP), + ), + }, + }, + }) +} + +func testAccCheckComputeV2FloatingIPDestroy(s *terraform.State) error { + config := testAccProvider.Meta().(*Config) + computeClient, err := config.computeV2Client(OS_REGION_NAME) + if err != nil { + return fmt.Errorf("(testAccCheckComputeV2FloatingIPDestroy) Error creating OpenStack compute client: %s", err) + } + + for _, rs := range s.RootModule().Resources { + if rs.Type != "openstack_compute_floatingip_v2" { + continue + } + + _, err := floatingip.Get(computeClient, rs.Primary.ID).Extract() + if err == nil { + return fmt.Errorf("FloatingIP still exists") + } + } + + return nil +} + +func testAccCheckComputeV2FloatingIPExists(t *testing.T, n string, kp *floatingip.FloatingIP) resource.TestCheckFunc { + return func(s *terraform.State) error { + rs, ok := s.RootModule().Resources[n] + if !ok { + return fmt.Errorf("Not found: %s", n) + } + + if rs.Primary.ID == "" { + return fmt.Errorf("No ID is set") + } + + config := testAccProvider.Meta().(*Config) + computeClient, err := config.computeV2Client(OS_REGION_NAME) + if err != nil { + return fmt.Errorf("(testAccCheckComputeV2FloatingIPExists) Error creating OpenStack compute client: %s", err) + } + + found, err := floatingip.Get(computeClient, rs.Primary.ID).Extract() + if err != nil { + return err + } + + if found.ID != rs.Primary.ID { + return fmt.Errorf("FloatingIP not found") + } + + *kp = *found + + return nil + } +} + +var testAccComputeV2FloatingIP_basic = ` + resource "openstack_compute_floatingip_v2" "foo" { + } + + resource "openstack_compute_instance_v2" "bar" { + name = "terraform-acc-floating-ip-test" + floating_ip = "${openstack_compute_floatingip_v2.foo.address}" + }` diff --git a/builtin/providers/openstack/resource_openstack_compute_instance_v2.go b/builtin/providers/openstack/resource_openstack_compute_instance_v2.go new file mode 100644 index 000000000..b5fe36a10 --- /dev/null +++ b/builtin/providers/openstack/resource_openstack_compute_instance_v2.go @@ -0,0 +1,1007 @@ +package openstack + +import ( + "bytes" + "crypto/sha1" + "encoding/hex" + "fmt" + "log" + "time" + + "github.com/hashicorp/terraform/helper/hashcode" + "github.com/hashicorp/terraform/helper/resource" + "github.com/hashicorp/terraform/helper/schema" + "github.com/rackspace/gophercloud" + "github.com/rackspace/gophercloud/openstack/compute/v2/extensions/bootfromvolume" + "github.com/rackspace/gophercloud/openstack/compute/v2/extensions/keypairs" + "github.com/rackspace/gophercloud/openstack/compute/v2/extensions/secgroups" + "github.com/rackspace/gophercloud/openstack/compute/v2/extensions/volumeattach" + "github.com/rackspace/gophercloud/openstack/compute/v2/flavors" + "github.com/rackspace/gophercloud/openstack/compute/v2/images" + "github.com/rackspace/gophercloud/openstack/compute/v2/servers" + "github.com/rackspace/gophercloud/openstack/networking/v2/extensions/layer3/floatingips" + "github.com/rackspace/gophercloud/openstack/networking/v2/ports" + "github.com/rackspace/gophercloud/pagination" +) + +func resourceComputeInstanceV2() *schema.Resource { + return &schema.Resource{ + Create: resourceComputeInstanceV2Create, + Read: resourceComputeInstanceV2Read, + Update: resourceComputeInstanceV2Update, + Delete: resourceComputeInstanceV2Delete, + + Schema: map[string]*schema.Schema{ + "region": &schema.Schema{ + Type: schema.TypeString, + Required: true, + ForceNew: true, + DefaultFunc: envDefaultFunc("OS_REGION_NAME"), + }, + "name": &schema.Schema{ + Type: schema.TypeString, + Required: true, + ForceNew: false, + }, + "image_id": &schema.Schema{ + Type: schema.TypeString, + Optional: true, + ForceNew: true, + Computed: true, + DefaultFunc: envDefaultFunc("OS_IMAGE_ID"), + }, + "image_name": &schema.Schema{ + Type: schema.TypeString, + Optional: true, + ForceNew: true, + Computed: true, + DefaultFunc: envDefaultFunc("OS_IMAGE_NAME"), + }, + "flavor_id": &schema.Schema{ + Type: schema.TypeString, + Optional: true, + ForceNew: false, + Computed: true, + DefaultFunc: envDefaultFunc("OS_FLAVOR_ID"), + }, + "flavor_name": &schema.Schema{ + Type: schema.TypeString, + Optional: true, + ForceNew: false, + Computed: true, + DefaultFunc: envDefaultFunc("OS_FLAVOR_NAME"), + }, + "floating_ip": &schema.Schema{ + Type: schema.TypeString, + Optional: true, + ForceNew: false, + }, + "user_data": &schema.Schema{ + Type: schema.TypeString, + Optional: true, + ForceNew: true, + // just stash the hash for state & diff comparisons + StateFunc: func(v interface{}) string { + switch v.(type) { + case string: + hash := sha1.Sum([]byte(v.(string))) + return hex.EncodeToString(hash[:]) + default: + return "" + } + }, + }, + "security_groups": &schema.Schema{ + Type: schema.TypeList, + Optional: true, + ForceNew: false, + Elem: &schema.Schema{Type: schema.TypeString}, + }, + "availability_zone": &schema.Schema{ + Type: schema.TypeString, + Optional: true, + ForceNew: true, + }, + "network": &schema.Schema{ + Type: schema.TypeList, + Optional: true, + ForceNew: true, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "uuid": &schema.Schema{ + Type: schema.TypeString, + Optional: true, + }, + "port": &schema.Schema{ + Type: schema.TypeString, + Optional: true, + }, + "fixed_ip": &schema.Schema{ + Type: schema.TypeString, + Optional: true, + }, + }, + }, + }, + "metadata": &schema.Schema{ + Type: schema.TypeMap, + Optional: true, + ForceNew: false, + }, + "config_drive": &schema.Schema{ + Type: schema.TypeBool, + Optional: true, + ForceNew: true, + }, + "admin_pass": &schema.Schema{ + Type: schema.TypeString, + Optional: true, + ForceNew: false, + }, + "access_ip_v4": &schema.Schema{ + Type: schema.TypeString, + Computed: true, + Optional: true, + ForceNew: false, + }, + "access_ip_v6": &schema.Schema{ + Type: schema.TypeString, + Computed: true, + Optional: true, + ForceNew: false, + }, + "key_pair": &schema.Schema{ + Type: schema.TypeString, + Optional: true, + ForceNew: true, + }, + "block_device": &schema.Schema{ + Type: schema.TypeList, + Optional: true, + ForceNew: true, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "uuid": &schema.Schema{ + Type: schema.TypeString, + Required: true, + }, + "source_type": &schema.Schema{ + Type: schema.TypeString, + Required: true, + }, + "volume_size": &schema.Schema{ + Type: schema.TypeInt, + Optional: true, + }, + "destination_type": &schema.Schema{ + Type: schema.TypeString, + Optional: true, + }, + "boot_index": &schema.Schema{ + Type: schema.TypeInt, + Optional: true, + }, + }, + }, + }, + "volume": &schema.Schema{ + Type: schema.TypeSet, + Optional: true, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "id": &schema.Schema{ + Type: schema.TypeString, + Optional: true, + Computed: true, + }, + "volume_id": &schema.Schema{ + Type: schema.TypeString, + Required: true, + }, + "device": &schema.Schema{ + Type: schema.TypeString, + Optional: true, + Computed: true, + }, + }, + }, + Set: resourceComputeVolumeAttachmentHash, + }, + }, + } +} + +func resourceComputeInstanceV2Create(d *schema.ResourceData, meta interface{}) error { + config := meta.(*Config) + computeClient, err := config.computeV2Client(d.Get("region").(string)) + if err != nil { + return fmt.Errorf("Error creating OpenStack compute client: %s", err) + } + + var createOpts servers.CreateOptsBuilder + + imageId, err := getImageID(computeClient, d) + if err != nil { + return err + } + + flavorId, err := getFlavorID(computeClient, d) + if err != nil { + return err + } + + createOpts = &servers.CreateOpts{ + Name: d.Get("name").(string), + ImageRef: imageId, + FlavorRef: flavorId, + SecurityGroups: resourceInstanceSecGroupsV2(d), + AvailabilityZone: d.Get("availability_zone").(string), + Networks: resourceInstanceNetworksV2(d), + Metadata: resourceInstanceMetadataV2(d), + ConfigDrive: d.Get("config_drive").(bool), + AdminPass: d.Get("admin_pass").(string), + UserData: []byte(d.Get("user_data").(string)), + } + + if keyName, ok := d.Get("key_pair").(string); ok && keyName != "" { + createOpts = &keypairs.CreateOptsExt{ + createOpts, + keyName, + } + } + + if blockDeviceRaw, ok := d.Get("block_device").(map[string]interface{}); ok && blockDeviceRaw != nil { + blockDevice := resourceInstanceBlockDeviceV2(d, blockDeviceRaw) + createOpts = &bootfromvolume.CreateOptsExt{ + createOpts, + blockDevice, + } + } + + log.Printf("[DEBUG] Create Options: %#v", createOpts) + server, err := servers.Create(computeClient, createOpts).Extract() + if err != nil { + return fmt.Errorf("Error creating OpenStack server: %s", err) + } + log.Printf("[INFO] Instance ID: %s", server.ID) + + // Store the ID now + d.SetId(server.ID) + + // Wait for the instance to become running so we can get some attributes + // that aren't available until later. + log.Printf( + "[DEBUG] Waiting for instance (%s) to become running", + server.ID) + + stateConf := &resource.StateChangeConf{ + Pending: []string{"BUILD"}, + Target: "ACTIVE", + Refresh: ServerV2StateRefreshFunc(computeClient, server.ID), + Timeout: 10 * time.Minute, + Delay: 10 * time.Second, + MinTimeout: 3 * time.Second, + } + + _, err = stateConf.WaitForState() + if err != nil { + return fmt.Errorf( + "Error waiting for instance (%s) to become ready: %s", + server.ID, err) + } + floatingIP := d.Get("floating_ip").(string) + if floatingIP != "" { + networkingClient, err := config.networkingV2Client(d.Get("region").(string)) + if err != nil { + return fmt.Errorf("Error creating OpenStack compute client: %s", err) + } + + allFloatingIPs, err := getFloatingIPs(networkingClient) + if err != nil { + return fmt.Errorf("Error listing OpenStack floating IPs: %s", err) + } + err = assignFloatingIP(networkingClient, extractFloatingIPFromIP(allFloatingIPs, floatingIP), server.ID) + if err != nil { + return fmt.Errorf("Error assigning floating IP to OpenStack compute instance: %s", err) + } + } + + // were volume attachments specified? + if v := d.Get("volume"); v != nil { + vols := v.(*schema.Set).List() + if len(vols) > 0 { + if blockClient, err := config.blockStorageV1Client(d.Get("region").(string)); err != nil { + return fmt.Errorf("Error creating OpenStack block storage client: %s", err) + } else { + if err := attachVolumesToInstance(computeClient, blockClient, d.Id(), vols); err != nil { + return err + } + } + } + } + + return resourceComputeInstanceV2Read(d, meta) +} + +func resourceComputeInstanceV2Read(d *schema.ResourceData, meta interface{}) error { + config := meta.(*Config) + computeClient, err := config.computeV2Client(d.Get("region").(string)) + if err != nil { + return fmt.Errorf("Error creating OpenStack compute client: %s", err) + } + + server, err := servers.Get(computeClient, d.Id()).Extract() + if err != nil { + return CheckDeleted(d, err, "server") + } + + log.Printf("[DEBUG] Retreived Server %s: %+v", d.Id(), server) + + d.Set("name", server.Name) + d.Set("access_ip_v4", server.AccessIPv4) + d.Set("access_ip_v6", server.AccessIPv6) + + hostv4 := server.AccessIPv4 + if hostv4 == "" { + if publicAddressesRaw, ok := server.Addresses["public"]; ok { + publicAddresses := publicAddressesRaw.([]interface{}) + for _, paRaw := range publicAddresses { + pa := paRaw.(map[string]interface{}) + if pa["version"].(float64) == 4 { + hostv4 = pa["addr"].(string) + break + } + } + } + } + + // If no host found, just get the first IPv4 we find + if hostv4 == "" { + for _, networkAddresses := range server.Addresses { + for _, element := range networkAddresses.([]interface{}) { + address := element.(map[string]interface{}) + if address["version"].(float64) == 4 { + hostv4 = address["addr"].(string) + break + } + } + } + } + d.Set("access_ip_v4", hostv4) + log.Printf("hostv4: %s", hostv4) + + hostv6 := server.AccessIPv6 + if hostv6 == "" { + if publicAddressesRaw, ok := server.Addresses["public"]; ok { + publicAddresses := publicAddressesRaw.([]interface{}) + for _, paRaw := range publicAddresses { + pa := paRaw.(map[string]interface{}) + if pa["version"].(float64) == 6 { + hostv6 = fmt.Sprintf("[%s]", pa["addr"].(string)) + break + } + } + } + } + + // If no hostv6 found, just get the first IPv6 we find + if hostv6 == "" { + for _, networkAddresses := range server.Addresses { + for _, element := range networkAddresses.([]interface{}) { + address := element.(map[string]interface{}) + if address["version"].(float64) == 6 { + hostv6 = fmt.Sprintf("[%s]", address["addr"].(string)) + break + } + } + } + } + d.Set("access_ip_v6", hostv6) + log.Printf("hostv6: %s", hostv6) + + preferredv := "" + if hostv4 != "" { + preferredv = hostv4 + } else if hostv6 != "" { + preferredv = hostv6 + } + + if preferredv != "" { + // Initialize the connection info + d.SetConnInfo(map[string]string{ + "type": "ssh", + "host": preferredv, + }) + } + + d.Set("metadata", server.Metadata) + + secGrpNames := []string{} + for _, sg := range server.SecurityGroups { + secGrpNames = append(secGrpNames, sg["name"].(string)) + } + d.Set("security_groups", secGrpNames) + + flavorId, ok := server.Flavor["id"].(string) + if !ok { + return fmt.Errorf("Error setting OpenStack server's flavor: %v", server.Flavor) + } + d.Set("flavor_id", flavorId) + + flavor, err := flavors.Get(computeClient, flavorId).Extract() + if err != nil { + return err + } + d.Set("flavor_name", flavor.Name) + + imageId, ok := server.Image["id"].(string) + if !ok { + return fmt.Errorf("Error setting OpenStack server's image: %v", server.Image) + } + d.Set("image_id", imageId) + + image, err := images.Get(computeClient, imageId).Extract() + if err != nil { + return err + } + d.Set("image_name", image.Name) + + // volume attachments + vas, err := getVolumeAttachments(computeClient, d.Id()) + if err != nil { + return err + } + if len(vas) > 0 { + attachments := make([]map[string]interface{}, len(vas)) + for i, attachment := range vas { + attachments[i] = make(map[string]interface{}) + attachments[i]["id"] = attachment.ID + attachments[i]["volume_id"] = attachment.VolumeID + attachments[i]["device"] = attachment.Device + } + log.Printf("[INFO] Volume attachments: %v", attachments) + d.Set("volume", attachments) + } + + return nil +} + +func resourceComputeInstanceV2Update(d *schema.ResourceData, meta interface{}) error { + config := meta.(*Config) + computeClient, err := config.computeV2Client(d.Get("region").(string)) + if err != nil { + return fmt.Errorf("Error creating OpenStack compute client: %s", err) + } + + var updateOpts servers.UpdateOpts + if d.HasChange("name") { + updateOpts.Name = d.Get("name").(string) + } + if d.HasChange("access_ip_v4") { + updateOpts.AccessIPv4 = d.Get("access_ip_v4").(string) + } + if d.HasChange("access_ip_v6") { + updateOpts.AccessIPv4 = d.Get("access_ip_v6").(string) + } + + if updateOpts != (servers.UpdateOpts{}) { + _, err := servers.Update(computeClient, d.Id(), updateOpts).Extract() + if err != nil { + return fmt.Errorf("Error updating OpenStack server: %s", err) + } + } + + if d.HasChange("metadata") { + var metadataOpts servers.MetadataOpts + metadataOpts = make(servers.MetadataOpts) + newMetadata := d.Get("metadata").(map[string]interface{}) + for k, v := range newMetadata { + metadataOpts[k] = v.(string) + } + + _, err := servers.UpdateMetadata(computeClient, d.Id(), metadataOpts).Extract() + if err != nil { + return fmt.Errorf("Error updating OpenStack server (%s) metadata: %s", d.Id(), err) + } + } + + if d.HasChange("security_groups") { + oldSGRaw, newSGRaw := d.GetChange("security_groups") + oldSGSlice, newSGSlice := oldSGRaw.([]interface{}), newSGRaw.([]interface{}) + oldSGSet := schema.NewSet(func(v interface{}) int { return hashcode.String(v.(string)) }, oldSGSlice) + newSGSet := schema.NewSet(func(v interface{}) int { return hashcode.String(v.(string)) }, newSGSlice) + secgroupsToAdd := newSGSet.Difference(oldSGSet) + secgroupsToRemove := oldSGSet.Difference(newSGSet) + + log.Printf("[DEBUG] Security groups to add: %v", secgroupsToAdd) + + log.Printf("[DEBUG] Security groups to remove: %v", secgroupsToRemove) + + for _, g := range secgroupsToAdd.List() { + err := secgroups.AddServerToGroup(computeClient, d.Id(), g.(string)).ExtractErr() + if err != nil { + return fmt.Errorf("Error adding security group to OpenStack server (%s): %s", d.Id(), err) + } + log.Printf("[DEBUG] Added security group (%s) to instance (%s)", g.(string), d.Id()) + } + + for _, g := range secgroupsToRemove.List() { + err := secgroups.RemoveServerFromGroup(computeClient, d.Id(), g.(string)).ExtractErr() + if err != nil { + errCode, ok := err.(*gophercloud.UnexpectedResponseCodeError) + if !ok { + return fmt.Errorf("Error removing security group from OpenStack server (%s): %s", d.Id(), err) + } + if errCode.Actual == 404 { + continue + } else { + return fmt.Errorf("Error removing security group from OpenStack server (%s): %s", d.Id(), err) + } + } else { + log.Printf("[DEBUG] Removed security group (%s) from instance (%s)", g.(string), d.Id()) + } + } + } + + if d.HasChange("admin_pass") { + if newPwd, ok := d.Get("admin_pass").(string); ok { + err := servers.ChangeAdminPassword(computeClient, d.Id(), newPwd).ExtractErr() + if err != nil { + return fmt.Errorf("Error changing admin password of OpenStack server (%s): %s", d.Id(), err) + } + } + } + + if d.HasChange("floating_ip") { + floatingIP := d.Get("floating_ip").(string) + if floatingIP != "" { + networkingClient, err := config.networkingV2Client(d.Get("region").(string)) + if err != nil { + return fmt.Errorf("Error creating OpenStack compute client: %s", err) + } + + allFloatingIPs, err := getFloatingIPs(networkingClient) + if err != nil { + return fmt.Errorf("Error listing OpenStack floating IPs: %s", err) + } + err = assignFloatingIP(networkingClient, extractFloatingIPFromIP(allFloatingIPs, floatingIP), d.Id()) + if err != nil { + fmt.Errorf("Error assigning floating IP to OpenStack compute instance: %s", err) + } + } + } + + if d.HasChange("volume") { + // old attachments and new attachments + oldAttachments, newAttachments := d.GetChange("volume") + + // for each old attachment, detach the volume + oldAttachmentSet := oldAttachments.(*schema.Set).List() + if len(oldAttachmentSet) > 0 { + if blockClient, err := config.blockStorageV1Client(d.Get("region").(string)); err != nil { + return err + } else { + if err := detachVolumesFromInstance(computeClient, blockClient, d.Id(), oldAttachmentSet); err != nil { + return err + } + } + } + + // for each new attachment, attach the volume + newAttachmentSet := newAttachments.(*schema.Set).List() + if len(newAttachmentSet) > 0 { + if blockClient, err := config.blockStorageV1Client(d.Get("region").(string)); err != nil { + return err + } else { + if err := attachVolumesToInstance(computeClient, blockClient, d.Id(), newAttachmentSet); err != nil { + return err + } + } + } + + d.SetPartial("volume") + } + + if d.HasChange("flavor_id") || d.HasChange("flavor_name") { + flavorId, err := getFlavorID(computeClient, d) + if err != nil { + return err + } + resizeOpts := &servers.ResizeOpts{ + FlavorRef: flavorId, + } + log.Printf("[DEBUG] Resize configuration: %#v", resizeOpts) + err = servers.Resize(computeClient, d.Id(), resizeOpts).ExtractErr() + if err != nil { + return fmt.Errorf("Error resizing OpenStack server: %s", err) + } + + // Wait for the instance to finish resizing. + log.Printf("[DEBUG] Waiting for instance (%s) to finish resizing", d.Id()) + + stateConf := &resource.StateChangeConf{ + Pending: []string{"RESIZE"}, + Target: "VERIFY_RESIZE", + Refresh: ServerV2StateRefreshFunc(computeClient, d.Id()), + Timeout: 3 * time.Minute, + Delay: 10 * time.Second, + MinTimeout: 3 * time.Second, + } + + _, err = stateConf.WaitForState() + if err != nil { + return fmt.Errorf("Error waiting for instance (%s) to resize: %s", d.Id(), err) + } + + // Confirm resize. + log.Printf("[DEBUG] Confirming resize") + err = servers.ConfirmResize(computeClient, d.Id()).ExtractErr() + if err != nil { + return fmt.Errorf("Error confirming resize of OpenStack server: %s", err) + } + + stateConf = &resource.StateChangeConf{ + Pending: []string{"VERIFY_RESIZE"}, + Target: "ACTIVE", + Refresh: ServerV2StateRefreshFunc(computeClient, d.Id()), + Timeout: 3 * time.Minute, + Delay: 10 * time.Second, + MinTimeout: 3 * time.Second, + } + + _, err = stateConf.WaitForState() + if err != nil { + return fmt.Errorf("Error waiting for instance (%s) to confirm resize: %s", d.Id(), err) + } + } + + return resourceComputeInstanceV2Read(d, meta) +} + +func resourceComputeInstanceV2Delete(d *schema.ResourceData, meta interface{}) error { + config := meta.(*Config) + computeClient, err := config.computeV2Client(d.Get("region").(string)) + if err != nil { + return fmt.Errorf("Error creating OpenStack compute client: %s", err) + } + + err = servers.Delete(computeClient, d.Id()).ExtractErr() + if err != nil { + return fmt.Errorf("Error deleting OpenStack server: %s", err) + } + + // Wait for the instance to delete before moving on. + log.Printf("[DEBUG] Waiting for instance (%s) to delete", d.Id()) + + stateConf := &resource.StateChangeConf{ + Pending: []string{"ACTIVE"}, + Target: "DELETED", + Refresh: ServerV2StateRefreshFunc(computeClient, d.Id()), + Timeout: 10 * time.Minute, + Delay: 10 * time.Second, + MinTimeout: 3 * time.Second, + } + + _, err = stateConf.WaitForState() + if err != nil { + return fmt.Errorf( + "Error waiting for instance (%s) to delete: %s", + d.Id(), err) + } + + d.SetId("") + return nil +} + +// ServerV2StateRefreshFunc returns a resource.StateRefreshFunc that is used to watch +// an OpenStack instance. +func ServerV2StateRefreshFunc(client *gophercloud.ServiceClient, instanceID string) resource.StateRefreshFunc { + return func() (interface{}, string, error) { + s, err := servers.Get(client, instanceID).Extract() + if err != nil { + errCode, ok := err.(*gophercloud.UnexpectedResponseCodeError) + if !ok { + return nil, "", err + } + if errCode.Actual == 404 { + return s, "DELETED", nil + } + return nil, "", err + } + + return s, s.Status, nil + } +} + +func resourceInstanceSecGroupsV2(d *schema.ResourceData) []string { + rawSecGroups := d.Get("security_groups").([]interface{}) + secgroups := make([]string, len(rawSecGroups)) + for i, raw := range rawSecGroups { + secgroups[i] = raw.(string) + } + return secgroups +} + +func resourceInstanceNetworksV2(d *schema.ResourceData) []servers.Network { + rawNetworks := d.Get("network").([]interface{}) + networks := make([]servers.Network, len(rawNetworks)) + for i, raw := range rawNetworks { + rawMap := raw.(map[string]interface{}) + networks[i] = servers.Network{ + UUID: rawMap["uuid"].(string), + Port: rawMap["port"].(string), + FixedIP: rawMap["fixed_ip"].(string), + } + } + return networks +} + +func resourceInstanceMetadataV2(d *schema.ResourceData) map[string]string { + m := make(map[string]string) + for key, val := range d.Get("metadata").(map[string]interface{}) { + m[key] = val.(string) + } + return m +} + +func resourceInstanceBlockDeviceV2(d *schema.ResourceData, bd map[string]interface{}) []bootfromvolume.BlockDevice { + sourceType := bootfromvolume.SourceType(bd["source_type"].(string)) + bfvOpts := []bootfromvolume.BlockDevice{ + bootfromvolume.BlockDevice{ + UUID: bd["uuid"].(string), + SourceType: sourceType, + VolumeSize: bd["volume_size"].(int), + DestinationType: bd["destination_type"].(string), + BootIndex: bd["boot_index"].(int), + }, + } + + return bfvOpts +} + +func extractFloatingIPFromIP(ips []floatingips.FloatingIP, IP string) *floatingips.FloatingIP { + for _, floatingIP := range ips { + if floatingIP.FloatingIP == IP { + return &floatingIP + } + } + return nil +} + +func assignFloatingIP(networkingClient *gophercloud.ServiceClient, floatingIP *floatingips.FloatingIP, instanceID string) error { + portID, err := getInstancePortID(networkingClient, instanceID) + if err != nil { + return err + } + return floatingips.Update(networkingClient, floatingIP.ID, floatingips.UpdateOpts{ + PortID: portID, + }).Err +} + +func getInstancePortID(networkingClient *gophercloud.ServiceClient, instanceID string) (string, error) { + pager := ports.List(networkingClient, ports.ListOpts{ + DeviceID: instanceID, + }) + + var portID string + err := pager.EachPage(func(page pagination.Page) (bool, error) { + portList, err := ports.ExtractPorts(page) + if err != nil { + return false, err + } + for _, port := range portList { + portID = port.ID + return false, nil + } + return true, nil + }) + + if err != nil { + return "", err + } + + if portID == "" { + return "", fmt.Errorf("Cannot find port for instance %s", instanceID) + } + + return portID, nil +} + +func getFloatingIPs(networkingClient *gophercloud.ServiceClient) ([]floatingips.FloatingIP, error) { + pager := floatingips.List(networkingClient, floatingips.ListOpts{}) + + ips := []floatingips.FloatingIP{} + err := pager.EachPage(func(page pagination.Page) (bool, error) { + floatingipList, err := floatingips.ExtractFloatingIPs(page) + if err != nil { + return false, err + } + for _, f := range floatingipList { + ips = append(ips, f) + } + return true, nil + }) + + if err != nil { + return nil, err + } + return ips, nil +} + +func getImageID(client *gophercloud.ServiceClient, d *schema.ResourceData) (string, error) { + imageId := d.Get("image_id").(string) + + if imageId != "" { + return imageId, nil + } + + imageCount := 0 + imageName := d.Get("image_name").(string) + if imageName != "" { + pager := images.ListDetail(client, &images.ListOpts{ + Name: imageName, + }) + pager.EachPage(func(page pagination.Page) (bool, error) { + imageList, err := images.ExtractImages(page) + if err != nil { + return false, err + } + + for _, i := range imageList { + if i.Name == imageName { + imageCount++ + imageId = i.ID + } + } + return true, nil + }) + + switch imageCount { + case 0: + return "", fmt.Errorf("Unable to find image: %s", imageName) + case 1: + return imageId, nil + default: + return "", fmt.Errorf("Found %d images matching %s", imageCount, imageName) + } + } + return "", fmt.Errorf("Neither an image ID nor an image name were able to be determined.") +} + +func getFlavorID(client *gophercloud.ServiceClient, d *schema.ResourceData) (string, error) { + flavorId := d.Get("flavor_id").(string) + + if flavorId != "" { + return flavorId, nil + } + + flavorCount := 0 + flavorName := d.Get("flavor_name").(string) + if flavorName != "" { + pager := flavors.ListDetail(client, nil) + pager.EachPage(func(page pagination.Page) (bool, error) { + flavorList, err := flavors.ExtractFlavors(page) + if err != nil { + return false, err + } + + for _, f := range flavorList { + if f.Name == flavorName { + flavorCount++ + flavorId = f.ID + } + } + return true, nil + }) + + switch flavorCount { + case 0: + return "", fmt.Errorf("Unable to find flavor: %s", flavorName) + case 1: + return flavorId, nil + default: + return "", fmt.Errorf("Found %d flavors matching %s", flavorCount, flavorName) + } + } + return "", fmt.Errorf("Neither a flavor ID nor a flavor name were able to be determined.") +} + +func resourceComputeVolumeAttachmentHash(v interface{}) int { + var buf bytes.Buffer + m := v.(map[string]interface{}) + buf.WriteString(fmt.Sprintf("%s-", m["volume_id"].(string))) + buf.WriteString(fmt.Sprintf("%s-", m["device"].(string))) + return hashcode.String(buf.String()) +} + +func attachVolumesToInstance(computeClient *gophercloud.ServiceClient, blockClient *gophercloud.ServiceClient, serverId string, vols []interface{}) error { + if len(vols) > 0 { + for _, v := range vols { + va := v.(map[string]interface{}) + volumeId := va["volume_id"].(string) + device := va["device"].(string) + + s := "" + if serverId != "" { + s = serverId + } else if va["server_id"] != "" { + s = va["server_id"].(string) + } else { + return fmt.Errorf("Unable to determine server ID to attach volume.") + } + + vaOpts := &volumeattach.CreateOpts{ + Device: device, + VolumeID: volumeId, + } + + if _, err := volumeattach.Create(computeClient, s, vaOpts).Extract(); err != nil { + return err + } + + stateConf := &resource.StateChangeConf{ + Pending: []string{"attaching", "available"}, + Target: "in-use", + Refresh: VolumeV1StateRefreshFunc(blockClient, va["volume_id"].(string)), + Timeout: 30 * time.Minute, + Delay: 5 * time.Second, + MinTimeout: 2 * time.Second, + } + + if _, err := stateConf.WaitForState(); err != nil { + return err + } + + log.Printf("[INFO] Attached volume %s to instance %s", volumeId, serverId) + } + } + return nil +} + +func detachVolumesFromInstance(computeClient *gophercloud.ServiceClient, blockClient *gophercloud.ServiceClient, serverId string, vols []interface{}) error { + if len(vols) > 0 { + for _, v := range vols { + va := v.(map[string]interface{}) + aId := va["id"].(string) + + if err := volumeattach.Delete(computeClient, serverId, aId).ExtractErr(); err != nil { + return err + } + + stateConf := &resource.StateChangeConf{ + Pending: []string{"detaching", "in-use"}, + Target: "available", + Refresh: VolumeV1StateRefreshFunc(blockClient, va["volume_id"].(string)), + Timeout: 30 * time.Minute, + Delay: 5 * time.Second, + MinTimeout: 2 * time.Second, + } + + if _, err := stateConf.WaitForState(); err != nil { + return err + } + log.Printf("[INFO] Detached volume %s from instance %s", va["volume_id"], serverId) + } + } + + return nil +} + +func getVolumeAttachments(computeClient *gophercloud.ServiceClient, serverId string) ([]volumeattach.VolumeAttachment, error) { + var attachments []volumeattach.VolumeAttachment + err := volumeattach.List(computeClient, serverId).EachPage(func(page pagination.Page) (bool, error) { + actual, err := volumeattach.ExtractVolumeAttachments(page) + if err != nil { + return false, err + } + + attachments = actual + return true, nil + }) + + if err != nil { + return nil, err + } + + return attachments, nil +} diff --git a/builtin/providers/openstack/resource_openstack_compute_instance_v2_test.go b/builtin/providers/openstack/resource_openstack_compute_instance_v2_test.go new file mode 100644 index 000000000..f4c6c8557 --- /dev/null +++ b/builtin/providers/openstack/resource_openstack_compute_instance_v2_test.go @@ -0,0 +1,185 @@ +package openstack + +import ( + "fmt" + "testing" + + "github.com/hashicorp/terraform/helper/resource" + "github.com/hashicorp/terraform/terraform" + + "github.com/rackspace/gophercloud/openstack/blockstorage/v1/volumes" + "github.com/rackspace/gophercloud/openstack/compute/v2/extensions/volumeattach" + "github.com/rackspace/gophercloud/openstack/compute/v2/servers" + "github.com/rackspace/gophercloud/pagination" +) + +func TestAccComputeV2Instance_basic(t *testing.T) { + var instance servers.Server + + resource.Test(t, resource.TestCase{ + PreCheck: func() { testAccPreCheck(t) }, + Providers: testAccProviders, + CheckDestroy: testAccCheckComputeV2InstanceDestroy, + Steps: []resource.TestStep{ + resource.TestStep{ + Config: testAccComputeV2Instance_basic, + Check: resource.ComposeTestCheckFunc( + testAccCheckComputeV2InstanceExists(t, "openstack_compute_instance_v2.foo", &instance), + testAccCheckComputeV2InstanceMetadata(&instance, "foo", "bar"), + ), + }, + }, + }) +} + +func TestAccComputeV2Instance_volumeAttach(t *testing.T) { + var instance servers.Server + var volume volumes.Volume + + resource.Test(t, resource.TestCase{ + PreCheck: func() { testAccPreCheck(t) }, + Providers: testAccProviders, + CheckDestroy: testAccCheckComputeV2InstanceDestroy, + Steps: []resource.TestStep{ + resource.TestStep{ + Config: testAccComputeV2Instance_volumeAttach, + Check: resource.ComposeTestCheckFunc( + testAccCheckBlockStorageV1VolumeExists(t, "openstack_blockstorage_volume_v1.myvol", &volume), + testAccCheckComputeV2InstanceExists(t, "openstack_compute_instance_v2.foo", &instance), + testAccCheckComputeV2InstanceVolumeAttachment(&instance, &volume), + ), + }, + }, + }) +} + +func testAccCheckComputeV2InstanceDestroy(s *terraform.State) error { + config := testAccProvider.Meta().(*Config) + computeClient, err := config.computeV2Client(OS_REGION_NAME) + if err != nil { + return fmt.Errorf("(testAccCheckComputeV2InstanceDestroy) Error creating OpenStack compute client: %s", err) + } + + for _, rs := range s.RootModule().Resources { + if rs.Type != "openstack_compute_instance_v2" { + continue + } + + _, err := servers.Get(computeClient, rs.Primary.ID).Extract() + if err == nil { + return fmt.Errorf("Instance still exists") + } + } + + return nil +} + +func testAccCheckComputeV2InstanceExists(t *testing.T, n string, instance *servers.Server) resource.TestCheckFunc { + return func(s *terraform.State) error { + rs, ok := s.RootModule().Resources[n] + if !ok { + return fmt.Errorf("Not found: %s", n) + } + + if rs.Primary.ID == "" { + return fmt.Errorf("No ID is set") + } + + config := testAccProvider.Meta().(*Config) + computeClient, err := config.computeV2Client(OS_REGION_NAME) + if err != nil { + return fmt.Errorf("(testAccCheckComputeV2InstanceExists) Error creating OpenStack compute client: %s", err) + } + + found, err := servers.Get(computeClient, rs.Primary.ID).Extract() + if err != nil { + return err + } + + if found.ID != rs.Primary.ID { + return fmt.Errorf("Instance not found") + } + + *instance = *found + + return nil + } +} + +func testAccCheckComputeV2InstanceMetadata( + instance *servers.Server, k string, v string) resource.TestCheckFunc { + return func(s *terraform.State) error { + if instance.Metadata == nil { + return fmt.Errorf("No metadata") + } + + for key, value := range instance.Metadata { + if k != key { + continue + } + + if v == value.(string) { + return nil + } + + return fmt.Errorf("Bad value for %s: %s", k, value) + } + + return fmt.Errorf("Metadata not found: %s", k) + } +} + +func testAccCheckComputeV2InstanceVolumeAttachment( + instance *servers.Server, volume *volumes.Volume) resource.TestCheckFunc { + return func(s *terraform.State) error { + var attachments []volumeattach.VolumeAttachment + + config := testAccProvider.Meta().(*Config) + computeClient, err := config.computeV2Client(OS_REGION_NAME) + if err != nil { + return err + } + err = volumeattach.List(computeClient, instance.ID).EachPage(func(page pagination.Page) (bool, error) { + actual, err := volumeattach.ExtractVolumeAttachments(page) + if err != nil { + return false, fmt.Errorf("Unable to lookup attachment: %s", err) + } + + attachments = actual + return true, nil + }) + + for _, attachment := range attachments { + if attachment.VolumeID == volume.ID { + return nil + } + } + + return fmt.Errorf("Volume not found: %s", volume.ID) + } +} + +var testAccComputeV2Instance_basic = fmt.Sprintf(` + resource "openstack_compute_instance_v2" "foo" { + region = "%s" + name = "terraform-test" + metadata { + foo = "bar" + } + }`, + OS_REGION_NAME) + +var testAccComputeV2Instance_volumeAttach = fmt.Sprintf(` + resource "openstack_blockstorage_volume_v1" "myvol" { + name = "myvol" + size = 1 + } + + resource "openstack_compute_instance_v2" "foo" { + region = "%s" + name = "terraform-test" + volume { + volume_id = "${openstack_blockstorage_volume_v1.myvol.id}" + } + }`, + OS_REGION_NAME) diff --git a/builtin/providers/openstack/resource_openstack_compute_keypair_v2.go b/builtin/providers/openstack/resource_openstack_compute_keypair_v2.go new file mode 100644 index 000000000..db6bed5b2 --- /dev/null +++ b/builtin/providers/openstack/resource_openstack_compute_keypair_v2.go @@ -0,0 +1,92 @@ +package openstack + +import ( + "fmt" + "log" + + "github.com/hashicorp/terraform/helper/schema" + "github.com/rackspace/gophercloud/openstack/compute/v2/extensions/keypairs" +) + +func resourceComputeKeypairV2() *schema.Resource { + return &schema.Resource{ + Create: resourceComputeKeypairV2Create, + Read: resourceComputeKeypairV2Read, + Delete: resourceComputeKeypairV2Delete, + + Schema: map[string]*schema.Schema{ + "region": &schema.Schema{ + Type: schema.TypeString, + Required: true, + ForceNew: true, + DefaultFunc: envDefaultFunc("OS_REGION_NAME"), + }, + "name": &schema.Schema{ + Type: schema.TypeString, + Required: true, + ForceNew: true, + }, + "public_key": &schema.Schema{ + Type: schema.TypeString, + Optional: true, + ForceNew: true, + }, + }, + } +} + +func resourceComputeKeypairV2Create(d *schema.ResourceData, meta interface{}) error { + config := meta.(*Config) + computeClient, err := config.computeV2Client(d.Get("region").(string)) + if err != nil { + return fmt.Errorf("Error creating OpenStack compute client: %s", err) + } + + createOpts := keypairs.CreateOpts{ + Name: d.Get("name").(string), + PublicKey: d.Get("public_key").(string), + } + + log.Printf("[DEBUG] Create Options: %#v", createOpts) + kp, err := keypairs.Create(computeClient, createOpts).Extract() + if err != nil { + return fmt.Errorf("Error creating OpenStack keypair: %s", err) + } + + d.SetId(kp.Name) + + return resourceComputeKeypairV2Read(d, meta) +} + +func resourceComputeKeypairV2Read(d *schema.ResourceData, meta interface{}) error { + config := meta.(*Config) + computeClient, err := config.computeV2Client(d.Get("region").(string)) + if err != nil { + return fmt.Errorf("Error creating OpenStack compute client: %s", err) + } + + kp, err := keypairs.Get(computeClient, d.Id()).Extract() + if err != nil { + return CheckDeleted(d, err, "keypair") + } + + d.Set("name", kp.Name) + d.Set("public_key", kp.PublicKey) + + return nil +} + +func resourceComputeKeypairV2Delete(d *schema.ResourceData, meta interface{}) error { + config := meta.(*Config) + computeClient, err := config.computeV2Client(d.Get("region").(string)) + if err != nil { + return fmt.Errorf("Error creating OpenStack compute client: %s", err) + } + + err = keypairs.Delete(computeClient, d.Id()).ExtractErr() + if err != nil { + return fmt.Errorf("Error deleting OpenStack keypair: %s", err) + } + d.SetId("") + return nil +} diff --git a/builtin/providers/openstack/resource_openstack_compute_keypair_v2_test.go b/builtin/providers/openstack/resource_openstack_compute_keypair_v2_test.go new file mode 100644 index 000000000..da090bcd8 --- /dev/null +++ b/builtin/providers/openstack/resource_openstack_compute_keypair_v2_test.go @@ -0,0 +1,90 @@ +package openstack + +import ( + "fmt" + "testing" + + "github.com/hashicorp/terraform/helper/resource" + "github.com/hashicorp/terraform/terraform" + + "github.com/rackspace/gophercloud/openstack/compute/v2/extensions/keypairs" +) + +func TestAccComputeV2Keypair_basic(t *testing.T) { + var keypair keypairs.KeyPair + + resource.Test(t, resource.TestCase{ + PreCheck: func() { testAccPreCheck(t) }, + Providers: testAccProviders, + CheckDestroy: testAccCheckComputeV2KeypairDestroy, + Steps: []resource.TestStep{ + resource.TestStep{ + Config: testAccComputeV2Keypair_basic, + Check: resource.ComposeTestCheckFunc( + testAccCheckComputeV2KeypairExists(t, "openstack_compute_keypair_v2.foo", &keypair), + ), + }, + }, + }) +} + +func testAccCheckComputeV2KeypairDestroy(s *terraform.State) error { + config := testAccProvider.Meta().(*Config) + computeClient, err := config.computeV2Client(OS_REGION_NAME) + if err != nil { + return fmt.Errorf("(testAccCheckComputeV2KeypairDestroy) Error creating OpenStack compute client: %s", err) + } + + for _, rs := range s.RootModule().Resources { + if rs.Type != "openstack_compute_keypair_v2" { + continue + } + + _, err := keypairs.Get(computeClient, rs.Primary.ID).Extract() + if err == nil { + return fmt.Errorf("Keypair still exists") + } + } + + return nil +} + +func testAccCheckComputeV2KeypairExists(t *testing.T, n string, kp *keypairs.KeyPair) resource.TestCheckFunc { + return func(s *terraform.State) error { + rs, ok := s.RootModule().Resources[n] + if !ok { + return fmt.Errorf("Not found: %s", n) + } + + if rs.Primary.ID == "" { + return fmt.Errorf("No ID is set") + } + + config := testAccProvider.Meta().(*Config) + computeClient, err := config.computeV2Client(OS_REGION_NAME) + if err != nil { + return fmt.Errorf("(testAccCheckComputeV2KeypairExists) Error creating OpenStack compute client: %s", err) + } + + found, err := keypairs.Get(computeClient, rs.Primary.ID).Extract() + if err != nil { + return err + } + + if found.Name != rs.Primary.ID { + return fmt.Errorf("Keypair not found") + } + + *kp = *found + + return nil + } +} + +var testAccComputeV2Keypair_basic = fmt.Sprintf(` + resource "openstack_compute_keypair_v2" "foo" { + region = "%s" + name = "test-keypair-tf" + public_key = "ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAABAQDAjpC1hwiOCCmKEWxJ4qzTTsJbKzndLo1BCz5PcwtUnflmU+gHJtWMZKpuEGVi29h0A/+ydKek1O18k10Ff+4tyFjiHDQAT9+OfgWf7+b1yK+qDip3X1C0UPMbwHlTfSGWLGZquwhvEFx9k3h/M+VtMvwR1lJ9LUyTAImnNjWG7TAIPmui30HvM2UiFEmqkr4ijq45MyX2+fLIePLRIFuu1p4whjHAQYufqyno3BS48icQb4p6iVEZPo4AE2o9oIyQvj2mx4dk5Y8CgSETOZTYDOR3rU2fZTRDRgPJDH9FWvQjF5tA0p3d9CoWWd2s6GKKbfoUIi8R/Db1BSPJwkqB jrp-hp-pc" + }`, + OS_REGION_NAME) diff --git a/builtin/providers/openstack/resource_openstack_compute_secgroup_v2.go b/builtin/providers/openstack/resource_openstack_compute_secgroup_v2.go new file mode 100644 index 000000000..ca646d77d --- /dev/null +++ b/builtin/providers/openstack/resource_openstack_compute_secgroup_v2.go @@ -0,0 +1,294 @@ +package openstack + +import ( + "bytes" + "fmt" + "log" + + "github.com/hashicorp/terraform/helper/hashcode" + "github.com/hashicorp/terraform/helper/schema" + "github.com/rackspace/gophercloud" + "github.com/rackspace/gophercloud/openstack/compute/v2/extensions/secgroups" +) + +func resourceComputeSecGroupV2() *schema.Resource { + return &schema.Resource{ + Create: resourceComputeSecGroupV2Create, + Read: resourceComputeSecGroupV2Read, + Update: resourceComputeSecGroupV2Update, + Delete: resourceComputeSecGroupV2Delete, + + Schema: map[string]*schema.Schema{ + "region": &schema.Schema{ + Type: schema.TypeString, + Required: true, + ForceNew: true, + DefaultFunc: envDefaultFunc("OS_REGION_NAME"), + }, + "name": &schema.Schema{ + Type: schema.TypeString, + Required: true, + ForceNew: false, + }, + "description": &schema.Schema{ + Type: schema.TypeString, + Required: true, + ForceNew: false, + }, + "rule": &schema.Schema{ + Type: schema.TypeList, + Optional: true, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "id": &schema.Schema{ + Type: schema.TypeString, + Computed: true, + }, + "from_port": &schema.Schema{ + Type: schema.TypeInt, + Required: true, + ForceNew: false, + }, + "to_port": &schema.Schema{ + Type: schema.TypeInt, + Required: true, + ForceNew: false, + }, + "ip_protocol": &schema.Schema{ + Type: schema.TypeString, + Required: true, + ForceNew: false, + }, + "cidr": &schema.Schema{ + Type: schema.TypeString, + Optional: true, + ForceNew: false, + }, + "from_group_id": &schema.Schema{ + Type: schema.TypeString, + Optional: true, + ForceNew: false, + }, + "self": &schema.Schema{ + Type: schema.TypeBool, + Optional: true, + Default: false, + ForceNew: false, + }, + }, + }, + }, + }, + } +} + +func resourceComputeSecGroupV2Create(d *schema.ResourceData, meta interface{}) error { + config := meta.(*Config) + computeClient, err := config.computeV2Client(d.Get("region").(string)) + if err != nil { + return fmt.Errorf("Error creating OpenStack compute client: %s", err) + } + + createOpts := secgroups.CreateOpts{ + Name: d.Get("name").(string), + Description: d.Get("description").(string), + } + + log.Printf("[DEBUG] Create Options: %#v", createOpts) + sg, err := secgroups.Create(computeClient, createOpts).Extract() + if err != nil { + return fmt.Errorf("Error creating OpenStack security group: %s", err) + } + + d.SetId(sg.ID) + + createRuleOptsList := resourceSecGroupRulesV2(d) + for _, createRuleOpts := range createRuleOptsList { + _, err := secgroups.CreateRule(computeClient, createRuleOpts).Extract() + if err != nil { + return fmt.Errorf("Error creating OpenStack security group rule: %s", err) + } + } + + return resourceComputeSecGroupV2Read(d, meta) +} + +func resourceComputeSecGroupV2Read(d *schema.ResourceData, meta interface{}) error { + config := meta.(*Config) + computeClient, err := config.computeV2Client(d.Get("region").(string)) + if err != nil { + return fmt.Errorf("Error creating OpenStack compute client: %s", err) + } + + sg, err := secgroups.Get(computeClient, d.Id()).Extract() + if err != nil { + return CheckDeleted(d, err, "security group") + } + + d.Set("name", sg.Name) + d.Set("description", sg.Description) + rtm := rulesToMap(sg.Rules) + for _, v := range rtm { + if v["group"] == d.Get("name") { + v["self"] = "1" + } else { + v["self"] = "0" + } + } + log.Printf("[DEBUG] rulesToMap(sg.Rules): %+v", rtm) + d.Set("rule", rtm) + + return nil +} + +func resourceComputeSecGroupV2Update(d *schema.ResourceData, meta interface{}) error { + config := meta.(*Config) + computeClient, err := config.computeV2Client(d.Get("region").(string)) + if err != nil { + return fmt.Errorf("Error creating OpenStack compute client: %s", err) + } + + updateOpts := secgroups.UpdateOpts{ + Name: d.Get("name").(string), + Description: d.Get("description").(string), + } + + log.Printf("[DEBUG] Updating Security Group (%s) with options: %+v", d.Id(), updateOpts) + + _, err = secgroups.Update(computeClient, d.Id(), updateOpts).Extract() + if err != nil { + return fmt.Errorf("Error updating OpenStack security group (%s): %s", d.Id(), err) + } + + if d.HasChange("rule") { + oldSGRaw, newSGRaw := d.GetChange("rule") + oldSGRSlice, newSGRSlice := oldSGRaw.([]interface{}), newSGRaw.([]interface{}) + oldSGRSet := schema.NewSet(secgroupRuleV2Hash, oldSGRSlice) + newSGRSet := schema.NewSet(secgroupRuleV2Hash, newSGRSlice) + secgrouprulesToAdd := newSGRSet.Difference(oldSGRSet) + secgrouprulesToRemove := oldSGRSet.Difference(newSGRSet) + + log.Printf("[DEBUG] Security group rules to add: %v", secgrouprulesToAdd) + + log.Printf("[DEBUG] Security groups rules to remove: %v", secgrouprulesToRemove) + + for _, rawRule := range secgrouprulesToAdd.List() { + createRuleOpts := resourceSecGroupRuleCreateOptsV2(d, rawRule) + rule, err := secgroups.CreateRule(computeClient, createRuleOpts).Extract() + if err != nil { + return fmt.Errorf("Error adding rule to OpenStack security group (%s): %s", d.Id(), err) + } + log.Printf("[DEBUG] Added rule (%s) to OpenStack security group (%s) ", rule.ID, d.Id()) + } + + for _, r := range secgrouprulesToRemove.List() { + rule := resourceSecGroupRuleV2(d, r) + err := secgroups.DeleteRule(computeClient, rule.ID).ExtractErr() + if err != nil { + errCode, ok := err.(*gophercloud.UnexpectedResponseCodeError) + if !ok { + return fmt.Errorf("Error removing rule (%s) from OpenStack security group (%s): %s", rule.ID, d.Id(), err) + } + if errCode.Actual == 404 { + continue + } else { + return fmt.Errorf("Error removing rule (%s) from OpenStack security group (%s)", rule.ID, d.Id()) + } + } else { + log.Printf("[DEBUG] Removed rule (%s) from OpenStack security group (%s): %s", rule.ID, d.Id(), err) + } + } + } + + return resourceComputeSecGroupV2Read(d, meta) +} + +func resourceComputeSecGroupV2Delete(d *schema.ResourceData, meta interface{}) error { + config := meta.(*Config) + computeClient, err := config.computeV2Client(d.Get("region").(string)) + if err != nil { + return fmt.Errorf("Error creating OpenStack compute client: %s", err) + } + + err = secgroups.Delete(computeClient, d.Id()).ExtractErr() + if err != nil { + return fmt.Errorf("Error deleting OpenStack security group: %s", err) + } + d.SetId("") + return nil +} + +func resourceSecGroupRulesV2(d *schema.ResourceData) []secgroups.CreateRuleOpts { + rawRules := (d.Get("rule")).([]interface{}) + createRuleOptsList := make([]secgroups.CreateRuleOpts, len(rawRules)) + for i, raw := range rawRules { + rawMap := raw.(map[string]interface{}) + groupId := rawMap["from_group_id"].(string) + if rawMap["self"].(bool) { + groupId = d.Id() + } + createRuleOptsList[i] = secgroups.CreateRuleOpts{ + ParentGroupID: d.Id(), + FromPort: rawMap["from_port"].(int), + ToPort: rawMap["to_port"].(int), + IPProtocol: rawMap["ip_protocol"].(string), + CIDR: rawMap["cidr"].(string), + FromGroupID: groupId, + } + } + return createRuleOptsList +} + +func resourceSecGroupRuleCreateOptsV2(d *schema.ResourceData, raw interface{}) secgroups.CreateRuleOpts { + rawMap := raw.(map[string]interface{}) + groupId := rawMap["from_group_id"].(string) + if rawMap["self"].(bool) { + groupId = d.Id() + } + return secgroups.CreateRuleOpts{ + ParentGroupID: d.Id(), + FromPort: rawMap["from_port"].(int), + ToPort: rawMap["to_port"].(int), + IPProtocol: rawMap["ip_protocol"].(string), + CIDR: rawMap["cidr"].(string), + FromGroupID: groupId, + } +} + +func resourceSecGroupRuleV2(d *schema.ResourceData, raw interface{}) secgroups.Rule { + rawMap := raw.(map[string]interface{}) + return secgroups.Rule{ + ID: rawMap["id"].(string), + ParentGroupID: d.Id(), + FromPort: rawMap["from_port"].(int), + ToPort: rawMap["to_port"].(int), + IPProtocol: rawMap["ip_protocol"].(string), + IPRange: secgroups.IPRange{CIDR: rawMap["cidr"].(string)}, + } +} + +func rulesToMap(sgrs []secgroups.Rule) []map[string]interface{} { + sgrMap := make([]map[string]interface{}, len(sgrs)) + for i, sgr := range sgrs { + sgrMap[i] = map[string]interface{}{ + "id": sgr.ID, + "from_port": sgr.FromPort, + "to_port": sgr.ToPort, + "ip_protocol": sgr.IPProtocol, + "cidr": sgr.IPRange.CIDR, + "group": sgr.Group.Name, + } + } + return sgrMap +} + +func secgroupRuleV2Hash(v interface{}) int { + var buf bytes.Buffer + m := v.(map[string]interface{}) + buf.WriteString(fmt.Sprintf("%d-", m["from_port"].(int))) + buf.WriteString(fmt.Sprintf("%d-", m["to_port"].(int))) + buf.WriteString(fmt.Sprintf("%s-", m["ip_protocol"].(string))) + buf.WriteString(fmt.Sprintf("%s-", m["cidr"].(string))) + + return hashcode.String(buf.String()) +} diff --git a/builtin/providers/openstack/resource_openstack_compute_secgroup_v2_test.go b/builtin/providers/openstack/resource_openstack_compute_secgroup_v2_test.go new file mode 100644 index 000000000..e78865b8a --- /dev/null +++ b/builtin/providers/openstack/resource_openstack_compute_secgroup_v2_test.go @@ -0,0 +1,90 @@ +package openstack + +import ( + "fmt" + "testing" + + "github.com/hashicorp/terraform/helper/resource" + "github.com/hashicorp/terraform/terraform" + + "github.com/rackspace/gophercloud/openstack/compute/v2/extensions/secgroups" +) + +func TestAccComputeV2SecGroup_basic(t *testing.T) { + var secgroup secgroups.SecurityGroup + + resource.Test(t, resource.TestCase{ + PreCheck: func() { testAccPreCheck(t) }, + Providers: testAccProviders, + CheckDestroy: testAccCheckComputeV2SecGroupDestroy, + Steps: []resource.TestStep{ + resource.TestStep{ + Config: testAccComputeV2SecGroup_basic, + Check: resource.ComposeTestCheckFunc( + testAccCheckComputeV2SecGroupExists(t, "openstack_compute_secgroup_v2.foo", &secgroup), + ), + }, + }, + }) +} + +func testAccCheckComputeV2SecGroupDestroy(s *terraform.State) error { + config := testAccProvider.Meta().(*Config) + computeClient, err := config.computeV2Client(OS_REGION_NAME) + if err != nil { + return fmt.Errorf("(testAccCheckComputeV2SecGroupDestroy) Error creating OpenStack compute client: %s", err) + } + + for _, rs := range s.RootModule().Resources { + if rs.Type != "openstack_compute_secgroup_v2" { + continue + } + + _, err := secgroups.Get(computeClient, rs.Primary.ID).Extract() + if err == nil { + return fmt.Errorf("Security group still exists") + } + } + + return nil +} + +func testAccCheckComputeV2SecGroupExists(t *testing.T, n string, secgroup *secgroups.SecurityGroup) resource.TestCheckFunc { + return func(s *terraform.State) error { + rs, ok := s.RootModule().Resources[n] + if !ok { + return fmt.Errorf("Not found: %s", n) + } + + if rs.Primary.ID == "" { + return fmt.Errorf("No ID is set") + } + + config := testAccProvider.Meta().(*Config) + computeClient, err := config.computeV2Client(OS_REGION_NAME) + if err != nil { + return fmt.Errorf("(testAccCheckComputeV2SecGroupExists) Error creating OpenStack compute client: %s", err) + } + + found, err := secgroups.Get(computeClient, rs.Primary.ID).Extract() + if err != nil { + return err + } + + if found.ID != rs.Primary.ID { + return fmt.Errorf("Security group not found") + } + + *secgroup = *found + + return nil + } +} + +var testAccComputeV2SecGroup_basic = fmt.Sprintf(` + resource "openstack_compute_secgroup_v2" "foo" { + region = "%s" + name = "test_group_1" + description = "first test security group" + }`, + OS_REGION_NAME) diff --git a/builtin/providers/openstack/resource_openstack_fw_firewall_v1.go b/builtin/providers/openstack/resource_openstack_fw_firewall_v1.go new file mode 100644 index 000000000..8505ac3b3 --- /dev/null +++ b/builtin/providers/openstack/resource_openstack_fw_firewall_v1.go @@ -0,0 +1,242 @@ +package openstack + +import ( + "fmt" + "log" + "time" + + "github.com/hashicorp/terraform/helper/resource" + "github.com/hashicorp/terraform/helper/schema" + "github.com/rackspace/gophercloud" + "github.com/rackspace/gophercloud/openstack/networking/v2/extensions/fwaas/firewalls" +) + +func resourceFWFirewallV1() *schema.Resource { + return &schema.Resource{ + Create: resourceFWFirewallV1Create, + Read: resourceFWFirewallV1Read, + Update: resourceFWFirewallV1Update, + Delete: resourceFWFirewallV1Delete, + + Schema: map[string]*schema.Schema{ + "region": &schema.Schema{ + Type: schema.TypeString, + Required: true, + ForceNew: true, + DefaultFunc: envDefaultFunc("OS_REGION_NAME"), + }, + "name": &schema.Schema{ + Type: schema.TypeString, + Optional: true, + }, + "description": &schema.Schema{ + Type: schema.TypeString, + Optional: true, + }, + "policy_id": &schema.Schema{ + Type: schema.TypeString, + Required: true, + }, + "admin_state_up": &schema.Schema{ + Type: schema.TypeBool, + Optional: true, + Default: true, + }, + "tenant_id": &schema.Schema{ + Type: schema.TypeString, + Optional: true, + ForceNew: true, + }, + }, + } +} + +func resourceFWFirewallV1Create(d *schema.ResourceData, meta interface{}) error { + + config := meta.(*Config) + networkingClient, err := config.networkingV2Client(d.Get("region").(string)) + if err != nil { + return fmt.Errorf("Error creating OpenStack networking client: %s", err) + } + + adminStateUp := d.Get("admin_state_up").(bool) + + firewallConfiguration := firewalls.CreateOpts{ + Name: d.Get("name").(string), + Description: d.Get("description").(string), + PolicyID: d.Get("policy_id").(string), + AdminStateUp: &adminStateUp, + TenantID: d.Get("tenant_id").(string), + } + + log.Printf("[DEBUG] Create firewall: %#v", firewallConfiguration) + + firewall, err := firewalls.Create(networkingClient, firewallConfiguration).Extract() + if err != nil { + return err + } + + log.Printf("[DEBUG] Firewall created: %#v", firewall) + + stateConf := &resource.StateChangeConf{ + Pending: []string{"PENDING_CREATE"}, + Target: "ACTIVE", + Refresh: waitForFirewallActive(networkingClient, firewall.ID), + Timeout: 30 * time.Second, + Delay: 0, + MinTimeout: 2 * time.Second, + } + + _, err = stateConf.WaitForState() + + d.SetId(firewall.ID) + + return resourceFWFirewallV1Read(d, meta) +} + +func resourceFWFirewallV1Read(d *schema.ResourceData, meta interface{}) error { + log.Printf("[DEBUG] Retrieve information about firewall: %s", d.Id()) + + config := meta.(*Config) + networkingClient, err := config.networkingV2Client(d.Get("region").(string)) + if err != nil { + return fmt.Errorf("Error creating OpenStack networking client: %s", err) + } + + firewall, err := firewalls.Get(networkingClient, d.Id()).Extract() + + if err != nil { + return CheckDeleted(d, err, "LB pool") + } + + d.Set("name", firewall.Name) + d.Set("description", firewall.Description) + d.Set("policy_id", firewall.PolicyID) + d.Set("admin_state_up", firewall.AdminStateUp) + d.Set("tenant_id", firewall.TenantID) + + return nil +} + +func resourceFWFirewallV1Update(d *schema.ResourceData, meta interface{}) error { + + config := meta.(*Config) + networkingClient, err := config.networkingV2Client(d.Get("region").(string)) + if err != nil { + return fmt.Errorf("Error creating OpenStack networking client: %s", err) + } + + opts := firewalls.UpdateOpts{} + + if d.HasChange("name") { + opts.Name = d.Get("name").(string) + } + + if d.HasChange("description") { + opts.Description = d.Get("description").(string) + } + + if d.HasChange("policy_id") { + opts.PolicyID = d.Get("policy_id").(string) + } + + if d.HasChange("admin_state_up") { + adminStateUp := d.Get("admin_state_up").(bool) + opts.AdminStateUp = &adminStateUp + } + + log.Printf("[DEBUG] Updating firewall with id %s: %#v", d.Id(), opts) + + stateConf := &resource.StateChangeConf{ + Pending: []string{"PENDING_CREATE", "PENDING_UPDATE"}, + Target: "ACTIVE", + Refresh: waitForFirewallActive(networkingClient, d.Id()), + Timeout: 30 * time.Second, + Delay: 0, + MinTimeout: 2 * time.Second, + } + + _, err = stateConf.WaitForState() + + err = firewalls.Update(networkingClient, d.Id(), opts).Err + if err != nil { + return err + } + + return resourceFWFirewallV1Read(d, meta) +} + +func resourceFWFirewallV1Delete(d *schema.ResourceData, meta interface{}) error { + log.Printf("[DEBUG] Destroy firewall: %s", d.Id()) + + config := meta.(*Config) + networkingClient, err := config.networkingV2Client(d.Get("region").(string)) + if err != nil { + return fmt.Errorf("Error creating OpenStack networking client: %s", err) + } + + stateConf := &resource.StateChangeConf{ + Pending: []string{"PENDING_CREATE", "PENDING_UPDATE"}, + Target: "ACTIVE", + Refresh: waitForFirewallActive(networkingClient, d.Id()), + Timeout: 30 * time.Second, + Delay: 0, + MinTimeout: 2 * time.Second, + } + + _, err = stateConf.WaitForState() + + err = firewalls.Delete(networkingClient, d.Id()).Err + + if err != nil { + return err + } + + stateConf = &resource.StateChangeConf{ + Pending: []string{"DELETING"}, + Target: "DELETED", + Refresh: waitForFirewallDeletion(networkingClient, d.Id()), + Timeout: 2 * time.Minute, + Delay: 0, + MinTimeout: 2 * time.Second, + } + + _, err = stateConf.WaitForState() + + return err +} + +func waitForFirewallActive(networkingClient *gophercloud.ServiceClient, id string) resource.StateRefreshFunc { + + return func() (interface{}, string, error) { + fw, err := firewalls.Get(networkingClient, id).Extract() + log.Printf("[DEBUG] Get firewall %s => %#v", id, fw) + + if err != nil { + return nil, "", err + } + return fw, fw.Status, nil + } +} + +func waitForFirewallDeletion(networkingClient *gophercloud.ServiceClient, id string) resource.StateRefreshFunc { + + return func() (interface{}, string, error) { + fw, err := firewalls.Get(networkingClient, id).Extract() + log.Printf("[DEBUG] Get firewall %s => %#v", id, fw) + + if err != nil { + httpStatus := err.(*gophercloud.UnexpectedResponseCodeError) + log.Printf("[DEBUG] Get firewall %s status is %d", id, httpStatus.Actual) + + if httpStatus.Actual == 404 { + log.Printf("[DEBUG] Firewall %s is actually deleted", id) + return "", "DELETED", nil + } + return nil, "", fmt.Errorf("Unexpected status code %d", httpStatus.Actual) + } + + log.Printf("[DEBUG] Firewall %s deletion is pending", id) + return fw, "DELETING", nil + } +} diff --git a/builtin/providers/openstack/resource_openstack_fw_firewall_v1_test.go b/builtin/providers/openstack/resource_openstack_fw_firewall_v1_test.go new file mode 100644 index 000000000..34112f778 --- /dev/null +++ b/builtin/providers/openstack/resource_openstack_fw_firewall_v1_test.go @@ -0,0 +1,139 @@ +package openstack + +import ( + "fmt" + "testing" + "time" + + "github.com/hashicorp/terraform/helper/resource" + "github.com/hashicorp/terraform/terraform" + "github.com/rackspace/gophercloud" + "github.com/rackspace/gophercloud/openstack/networking/v2/extensions/fwaas/firewalls" +) + +func TestAccFWFirewallV1(t *testing.T) { + + var policyID *string + + resource.Test(t, resource.TestCase{ + PreCheck: func() { testAccPreCheck(t) }, + Providers: testAccProviders, + CheckDestroy: testAccCheckFWFirewallV1Destroy, + Steps: []resource.TestStep{ + resource.TestStep{ + Config: testFirewallConfig, + Check: resource.ComposeTestCheckFunc( + testAccCheckFWFirewallV1Exists("openstack_fw_firewall_v1.accept_test", "", "", policyID), + ), + }, + resource.TestStep{ + Config: testFirewallConfigUpdated, + Check: resource.ComposeTestCheckFunc( + testAccCheckFWFirewallV1Exists("openstack_fw_firewall_v1.accept_test", "accept_test", "terraform acceptance test", policyID), + ), + }, + }, + }) +} + +func testAccCheckFWFirewallV1Destroy(s *terraform.State) error { + + config := testAccProvider.Meta().(*Config) + networkingClient, err := config.networkingV2Client(OS_REGION_NAME) + if err != nil { + return fmt.Errorf("(testAccCheckOpenstackFirewallDestroy) Error creating OpenStack networking client: %s", err) + } + for _, rs := range s.RootModule().Resources { + if rs.Type != "openstack_firewall" { + continue + } + _, err = firewalls.Get(networkingClient, rs.Primary.ID).Extract() + if err == nil { + return fmt.Errorf("Firewall (%s) still exists.", rs.Primary.ID) + } + httpError, ok := err.(*gophercloud.UnexpectedResponseCodeError) + if !ok || httpError.Actual != 404 { + return httpError + } + } + return nil +} + +func testAccCheckFWFirewallV1Exists(n, expectedName, expectedDescription string, policyID *string) resource.TestCheckFunc { + + return func(s *terraform.State) error { + + rs, ok := s.RootModule().Resources[n] + if !ok { + return fmt.Errorf("Not found: %s", n) + } + + if rs.Primary.ID == "" { + return fmt.Errorf("No ID is set") + } + + config := testAccProvider.Meta().(*Config) + networkingClient, err := config.networkingV2Client(OS_REGION_NAME) + if err != nil { + return fmt.Errorf("(testAccCheckFirewallExists) Error creating OpenStack networking client: %s", err) + } + + var found *firewalls.Firewall + for i := 0; i < 5; i++ { + // Firewall creation is asynchronous. Retry some times + // if we get a 404 error. Fail on any other error. + found, err = firewalls.Get(networkingClient, rs.Primary.ID).Extract() + if err != nil { + httpError, ok := err.(*gophercloud.UnexpectedResponseCodeError) + if !ok || httpError.Actual != 404 { + time.Sleep(time.Second) + continue + } + } + break + } + + if err != nil { + return err + } + + if found.Name != expectedName { + return fmt.Errorf("Expected Name to be <%s> but found <%s>", expectedName, found.Name) + } + if found.Description != expectedDescription { + return fmt.Errorf("Expected Description to be <%s> but found <%s>", expectedDescription, found.Description) + } + if found.PolicyID == "" { + return fmt.Errorf("Policy should not be empty") + } + if policyID != nil && found.PolicyID == *policyID { + return fmt.Errorf("Policy had not been correctly updated. Went from <%s> to <%s>", expectedName, found.Name) + } + + policyID = &found.PolicyID + + return nil + } +} + +const testFirewallConfig = ` +resource "openstack_fw_firewall_v1" "accept_test" { + policy_id = "${openstack_fw_policy_v1.accept_test_policy_1.id}" +} + +resource "openstack_fw_policy_v1" "accept_test_policy_1" { + name = "policy-1" +} +` + +const testFirewallConfigUpdated = ` +resource "openstack_fw_firewall_v1" "accept_test" { + name = "accept_test" + description = "terraform acceptance test" + policy_id = "${openstack_fw_policy_v1.accept_test_policy_2.id}" +} + +resource "openstack_fw_policy_v1" "accept_test_policy_2" { + name = "policy-2" +} +` diff --git a/builtin/providers/openstack/resource_openstack_fw_policy_v1.go b/builtin/providers/openstack/resource_openstack_fw_policy_v1.go new file mode 100644 index 000000000..0560bfcef --- /dev/null +++ b/builtin/providers/openstack/resource_openstack_fw_policy_v1.go @@ -0,0 +1,200 @@ +package openstack + +import ( + "fmt" + "log" + "time" + + "github.com/hashicorp/terraform/helper/hashcode" + "github.com/hashicorp/terraform/helper/schema" + "github.com/rackspace/gophercloud" + "github.com/rackspace/gophercloud/openstack/networking/v2/extensions/fwaas/policies" +) + +func resourceFWPolicyV1() *schema.Resource { + return &schema.Resource{ + Create: resourceFWPolicyV1Create, + Read: resourceFWPolicyV1Read, + Update: resourceFWPolicyV1Update, + Delete: resourceFWPolicyV1Delete, + + Schema: map[string]*schema.Schema{ + "region": &schema.Schema{ + Type: schema.TypeString, + Required: true, + ForceNew: true, + DefaultFunc: envDefaultFunc("OS_REGION_NAME"), + }, + "name": &schema.Schema{ + Type: schema.TypeString, + Optional: true, + }, + "description": &schema.Schema{ + Type: schema.TypeString, + Optional: true, + }, + "audited": &schema.Schema{ + Type: schema.TypeBool, + Optional: true, + Default: false, + }, + "shared": &schema.Schema{ + Type: schema.TypeBool, + Optional: true, + Default: false, + }, + "tenant_id": &schema.Schema{ + Type: schema.TypeString, + Optional: true, + ForceNew: true, + }, + "rules": &schema.Schema{ + Type: schema.TypeSet, + Optional: true, + Elem: &schema.Schema{Type: schema.TypeString}, + Set: func(v interface{}) int { + return hashcode.String(v.(string)) + }, + }, + }, + } +} + +func resourceFWPolicyV1Create(d *schema.ResourceData, meta interface{}) error { + + config := meta.(*Config) + networkingClient, err := config.networkingV2Client(d.Get("region").(string)) + if err != nil { + return fmt.Errorf("Error creating OpenStack networking client: %s", err) + } + + v := d.Get("rules").(*schema.Set) + + log.Printf("[DEBUG] Rules found : %#v", v) + log.Printf("[DEBUG] Rules count : %d", v.Len()) + + rules := make([]string, v.Len()) + for i, v := range v.List() { + rules[i] = v.(string) + } + + audited := d.Get("audited").(bool) + shared := d.Get("shared").(bool) + + opts := policies.CreateOpts{ + Name: d.Get("name").(string), + Description: d.Get("description").(string), + Audited: &audited, + Shared: &shared, + TenantID: d.Get("tenant_id").(string), + Rules: rules, + } + + log.Printf("[DEBUG] Create firewall policy: %#v", opts) + + policy, err := policies.Create(networkingClient, opts).Extract() + if err != nil { + return err + } + + log.Printf("[DEBUG] Firewall policy created: %#v", policy) + + d.SetId(policy.ID) + + return resourceFWPolicyV1Read(d, meta) +} + +func resourceFWPolicyV1Read(d *schema.ResourceData, meta interface{}) error { + log.Printf("[DEBUG] Retrieve information about firewall policy: %s", d.Id()) + + config := meta.(*Config) + networkingClient, err := config.networkingV2Client(d.Get("region").(string)) + if err != nil { + return fmt.Errorf("Error creating OpenStack networking client: %s", err) + } + + policy, err := policies.Get(networkingClient, d.Id()).Extract() + + if err != nil { + return CheckDeleted(d, err, "LB pool") + } + + d.Set("name", policy.Name) + d.Set("description", policy.Description) + d.Set("shared", policy.Shared) + d.Set("audited", policy.Audited) + d.Set("tenant_id", policy.TenantID) + return nil +} + +func resourceFWPolicyV1Update(d *schema.ResourceData, meta interface{}) error { + + config := meta.(*Config) + networkingClient, err := config.networkingV2Client(d.Get("region").(string)) + if err != nil { + return fmt.Errorf("Error creating OpenStack networking client: %s", err) + } + + opts := policies.UpdateOpts{} + + if d.HasChange("name") { + opts.Name = d.Get("name").(string) + } + + if d.HasChange("description") { + opts.Description = d.Get("description").(string) + } + + if d.HasChange("rules") { + v := d.Get("rules").(*schema.Set) + + log.Printf("[DEBUG] Rules found : %#v", v) + log.Printf("[DEBUG] Rules count : %d", v.Len()) + + rules := make([]string, v.Len()) + for i, v := range v.List() { + rules[i] = v.(string) + } + opts.Rules = rules + } + + log.Printf("[DEBUG] Updating firewall policy with id %s: %#v", d.Id(), opts) + + err = policies.Update(networkingClient, d.Id(), opts).Err + if err != nil { + return err + } + + return resourceFWPolicyV1Read(d, meta) +} + +func resourceFWPolicyV1Delete(d *schema.ResourceData, meta interface{}) error { + log.Printf("[DEBUG] Destroy firewall policy: %s", d.Id()) + + config := meta.(*Config) + networkingClient, err := config.networkingV2Client(d.Get("region").(string)) + if err != nil { + return fmt.Errorf("Error creating OpenStack networking client: %s", err) + } + + for i := 0; i < 15; i++ { + + err = policies.Delete(networkingClient, d.Id()).Err + if err == nil { + break + } + + httpError, ok := err.(*gophercloud.UnexpectedResponseCodeError) + if !ok || httpError.Actual != 409 { + return err + } + + // This error usualy means that the policy is attached + // to a firewall. At this point, the firewall is probably + // being delete. So, we retry a few times. + + time.Sleep(time.Second * 2) + } + + return err +} diff --git a/builtin/providers/openstack/resource_openstack_fw_policy_v1_test.go b/builtin/providers/openstack/resource_openstack_fw_policy_v1_test.go new file mode 100644 index 000000000..1a37a383f --- /dev/null +++ b/builtin/providers/openstack/resource_openstack_fw_policy_v1_test.go @@ -0,0 +1,165 @@ +package openstack + +import ( + "fmt" + "testing" + "time" + + "github.com/hashicorp/terraform/helper/resource" + "github.com/hashicorp/terraform/terraform" + "github.com/rackspace/gophercloud" + "github.com/rackspace/gophercloud/openstack/networking/v2/extensions/fwaas/policies" +) + +func TestAccFWPolicyV1(t *testing.T) { + resource.Test(t, resource.TestCase{ + PreCheck: func() { testAccPreCheck(t) }, + Providers: testAccProviders, + CheckDestroy: testAccCheckFWPolicyV1Destroy, + Steps: []resource.TestStep{ + resource.TestStep{ + Config: testFirewallPolicyConfig, + Check: resource.ComposeTestCheckFunc( + testAccCheckFWPolicyV1Exists( + "openstack_fw_policy_v1.accept_test", + "", "", 0), + ), + }, + resource.TestStep{ + Config: testFirewallPolicyConfigAddRules, + Check: resource.ComposeTestCheckFunc( + testAccCheckFWPolicyV1Exists( + "openstack_fw_policy_v1.accept_test", + "accept_test", "terraform acceptance test", 2), + ), + }, + resource.TestStep{ + Config: testFirewallPolicyUpdateDeleteRule, + Check: resource.ComposeTestCheckFunc( + testAccCheckFWPolicyV1Exists( + "openstack_fw_policy_v1.accept_test", + "accept_test", "terraform acceptance test", 1), + ), + }, + }, + }) +} + +func testAccCheckFWPolicyV1Destroy(s *terraform.State) error { + + config := testAccProvider.Meta().(*Config) + networkingClient, err := config.networkingV2Client(OS_REGION_NAME) + if err != nil { + return fmt.Errorf("(testAccCheckOpenstackFirewallPolicyDestroy) Error creating OpenStack networking client: %s", err) + } + for _, rs := range s.RootModule().Resources { + if rs.Type != "openstack_fw_policy_v1" { + continue + } + _, err = policies.Get(networkingClient, rs.Primary.ID).Extract() + if err == nil { + return fmt.Errorf("Firewall policy (%s) still exists.", rs.Primary.ID) + } + httpError, ok := err.(*gophercloud.UnexpectedResponseCodeError) + if !ok || httpError.Actual != 404 { + return httpError + } + } + return nil +} + +func testAccCheckFWPolicyV1Exists(n, name, description string, ruleCount int) resource.TestCheckFunc { + + return func(s *terraform.State) error { + + rs, ok := s.RootModule().Resources[n] + if !ok { + return fmt.Errorf("Not found: %s", n) + } + + if rs.Primary.ID == "" { + return fmt.Errorf("No ID is set") + } + + config := testAccProvider.Meta().(*Config) + networkingClient, err := config.networkingV2Client(OS_REGION_NAME) + if err != nil { + return fmt.Errorf("(testAccCheckFirewallPolicyExists) Error creating OpenStack networking client: %s", err) + } + + var found *policies.Policy + for i := 0; i < 5; i++ { + // Firewall policy creation is asynchronous. Retry some times + // if we get a 404 error. Fail on any other error. + found, err = policies.Get(networkingClient, rs.Primary.ID).Extract() + if err != nil { + httpError, ok := err.(*gophercloud.UnexpectedResponseCodeError) + if !ok || httpError.Actual != 404 { + time.Sleep(time.Second) + continue + } + } + break + } + + if err != nil { + return err + } + + if name != found.Name { + return fmt.Errorf("Expected name <%s>, but found <%s>", name, found.Name) + } + + if description != found.Description { + return fmt.Errorf("Expected description <%s>, but found <%s>", description, found.Description) + } + + if ruleCount != len(found.Rules) { + return fmt.Errorf("Expected rule count <%d>, but found <%d>", ruleCount, len(found.Rules)) + } + + return nil + } +} + +const testFirewallPolicyConfig = ` +resource "openstack_fw_policy_v1" "accept_test" { + +} +` + +const testFirewallPolicyConfigAddRules = ` +resource "openstack_fw_policy_v1" "accept_test" { + name = "accept_test" + description = "terraform acceptance test" + rules = [ + "${openstack_fw_rule_v1.accept_test_udp_deny.id}", + "${openstack_fw_rule_v1.accept_test_tcp_allow.id}" + ] +} + +resource "openstack_fw_rule_v1" "accept_test_tcp_allow" { + protocol = "tcp" + action = "allow" +} + +resource "openstack_fw_rule_v1" "accept_test_udp_deny" { + protocol = "udp" + action = "deny" +} +` + +const testFirewallPolicyUpdateDeleteRule = ` +resource "openstack_fw_policy_v1" "accept_test" { + name = "accept_test" + description = "terraform acceptance test" + rules = [ + "${openstack_fw_rule_v1.accept_test_udp_deny.id}" + ] +} + +resource "openstack_fw_rule_v1" "accept_test_udp_deny" { + protocol = "udp" + action = "deny" +} +` diff --git a/builtin/providers/openstack/resource_openstack_fw_rule_v1.go b/builtin/providers/openstack/resource_openstack_fw_rule_v1.go new file mode 100644 index 000000000..f0f5affcc --- /dev/null +++ b/builtin/providers/openstack/resource_openstack_fw_rule_v1.go @@ -0,0 +1,223 @@ +package openstack + +import ( + "fmt" + "log" + + "github.com/hashicorp/terraform/helper/schema" + "github.com/rackspace/gophercloud/openstack/networking/v2/extensions/fwaas/policies" + "github.com/rackspace/gophercloud/openstack/networking/v2/extensions/fwaas/rules" +) + +func resourceFWRuleV1() *schema.Resource { + return &schema.Resource{ + Create: resourceFWRuleV1Create, + Read: resourceFWRuleV1Read, + Update: resourceFWRuleV1Update, + Delete: resourceFWRuleV1Delete, + + Schema: map[string]*schema.Schema{ + "region": &schema.Schema{ + Type: schema.TypeString, + Required: true, + ForceNew: true, + DefaultFunc: envDefaultFunc("OS_REGION_NAME"), + }, + "name": &schema.Schema{ + Type: schema.TypeString, + Optional: true, + }, + "description": &schema.Schema{ + Type: schema.TypeString, + Optional: true, + }, + "protocol": &schema.Schema{ + Type: schema.TypeString, + Required: true, + }, + "action": &schema.Schema{ + Type: schema.TypeString, + Required: true, + }, + "ip_version": &schema.Schema{ + Type: schema.TypeInt, + Optional: true, + Default: 4, + }, + "source_ip_address": &schema.Schema{ + Type: schema.TypeString, + Optional: true, + }, + "destination_ip_address": &schema.Schema{ + Type: schema.TypeString, + Optional: true, + }, + "source_port": &schema.Schema{ + Type: schema.TypeString, + Optional: true, + }, + "destination_port": &schema.Schema{ + Type: schema.TypeString, + Optional: true, + }, + "enabled": &schema.Schema{ + Type: schema.TypeBool, + Optional: true, + Default: true, + }, + "tenant_id": &schema.Schema{ + Type: schema.TypeString, + Optional: true, + ForceNew: true, + }, + }, + } +} + +func resourceFWRuleV1Create(d *schema.ResourceData, meta interface{}) error { + + config := meta.(*Config) + networkingClient, err := config.networkingV2Client(d.Get("region").(string)) + if err != nil { + return fmt.Errorf("Error creating OpenStack networking client: %s", err) + } + + enabled := d.Get("enabled").(bool) + + ruleConfiguration := rules.CreateOpts{ + Name: d.Get("name").(string), + Description: d.Get("description").(string), + Protocol: d.Get("protocol").(string), + Action: d.Get("action").(string), + IPVersion: d.Get("ip_version").(int), + SourceIPAddress: d.Get("source_ip_address").(string), + DestinationIPAddress: d.Get("destination_ip_address").(string), + SourcePort: d.Get("source_port").(string), + DestinationPort: d.Get("destination_port").(string), + Enabled: &enabled, + TenantID: d.Get("tenant_id").(string), + } + + log.Printf("[DEBUG] Create firewall rule: %#v", ruleConfiguration) + + rule, err := rules.Create(networkingClient, ruleConfiguration).Extract() + + if err != nil { + return err + } + + log.Printf("[DEBUG] Firewall rule with id %s : %#v", rule.ID, rule) + + d.SetId(rule.ID) + + return resourceFWRuleV1Read(d, meta) +} + +func resourceFWRuleV1Read(d *schema.ResourceData, meta interface{}) error { + log.Printf("[DEBUG] Retrieve information about firewall rule: %s", d.Id()) + + config := meta.(*Config) + networkingClient, err := config.networkingV2Client(d.Get("region").(string)) + if err != nil { + return fmt.Errorf("Error creating OpenStack networking client: %s", err) + } + + rule, err := rules.Get(networkingClient, d.Id()).Extract() + + if err != nil { + return CheckDeleted(d, err, "LB pool") + } + + d.Set("protocol", rule.Protocol) + d.Set("action", rule.Action) + + d.Set("name", rule.Name) + d.Set("description", rule.Description) + d.Set("ip_version", rule.IPVersion) + d.Set("source_ip_address", rule.SourceIPAddress) + d.Set("destination_ip_address", rule.DestinationIPAddress) + d.Set("source_port", rule.SourcePort) + d.Set("destination_port", rule.DestinationPort) + d.Set("enabled", rule.Enabled) + + return nil +} + +func resourceFWRuleV1Update(d *schema.ResourceData, meta interface{}) error { + config := meta.(*Config) + networkingClient, err := config.networkingV2Client(d.Get("region").(string)) + if err != nil { + return fmt.Errorf("Error creating OpenStack networking client: %s", err) + } + + opts := rules.UpdateOpts{} + + if d.HasChange("name") { + opts.Name = d.Get("name").(string) + } + if d.HasChange("description") { + opts.Description = d.Get("description").(string) + } + if d.HasChange("protocol") { + opts.Protocol = d.Get("protocol").(string) + } + if d.HasChange("action") { + opts.Action = d.Get("action").(string) + } + if d.HasChange("ip_version") { + opts.IPVersion = d.Get("ip_version").(int) + } + if d.HasChange("source_ip_address") { + sourceIPAddress := d.Get("source_ip_address").(string) + opts.SourceIPAddress = &sourceIPAddress + } + if d.HasChange("destination_ip_address") { + destinationIPAddress := d.Get("destination_ip_address").(string) + opts.DestinationIPAddress = &destinationIPAddress + } + if d.HasChange("source_port") { + sourcePort := d.Get("source_port").(string) + opts.SourcePort = &sourcePort + } + if d.HasChange("destination_port") { + destinationPort := d.Get("destination_port").(string) + opts.DestinationPort = &destinationPort + } + if d.HasChange("enabled") { + enabled := d.Get("enabled").(bool) + opts.Enabled = &enabled + } + + log.Printf("[DEBUG] Updating firewall rules: %#v", opts) + + err = rules.Update(networkingClient, d.Id(), opts).Err + if err != nil { + return err + } + + return resourceFWRuleV1Read(d, meta) +} + +func resourceFWRuleV1Delete(d *schema.ResourceData, meta interface{}) error { + log.Printf("[DEBUG] Destroy firewall rule: %s", d.Id()) + + config := meta.(*Config) + networkingClient, err := config.networkingV2Client(d.Get("region").(string)) + if err != nil { + return fmt.Errorf("Error creating OpenStack networking client: %s", err) + } + + rule, err := rules.Get(networkingClient, d.Id()).Extract() + if err != nil { + return err + } + + if rule.PolicyID != "" { + err := policies.RemoveRule(networkingClient, rule.PolicyID, rule.ID) + if err != nil { + return err + } + } + + return rules.Delete(networkingClient, d.Id()).Err +} diff --git a/builtin/providers/openstack/resource_openstack_fw_rule_v1_test.go b/builtin/providers/openstack/resource_openstack_fw_rule_v1_test.go new file mode 100644 index 000000000..ba96bb8b1 --- /dev/null +++ b/builtin/providers/openstack/resource_openstack_fw_rule_v1_test.go @@ -0,0 +1,185 @@ +package openstack + +import ( + "fmt" + "reflect" + "testing" + "time" + + "github.com/hashicorp/terraform/helper/resource" + "github.com/hashicorp/terraform/terraform" + "github.com/rackspace/gophercloud" + "github.com/rackspace/gophercloud/openstack/networking/v2/extensions/fwaas/rules" +) + +func TestAccFWRuleV1(t *testing.T) { + resource.Test(t, resource.TestCase{ + PreCheck: func() { testAccPreCheck(t) }, + Providers: testAccProviders, + CheckDestroy: testAccCheckFWRuleV1Destroy, + Steps: []resource.TestStep{ + resource.TestStep{ + Config: testFirewallRuleMinimalConfig, + Check: resource.ComposeTestCheckFunc( + testAccCheckFWRuleV1Exists( + "openstack_fw_rule_v1.accept_test_minimal", + &rules.Rule{ + Protocol: "udp", + Action: "deny", + IPVersion: 4, + Enabled: true, + }), + ), + }, + resource.TestStep{ + Config: testFirewallRuleConfig, + Check: resource.ComposeTestCheckFunc( + testAccCheckFWRuleV1Exists( + "openstack_fw_rule_v1.accept_test", + &rules.Rule{ + Name: "accept_test", + Protocol: "udp", + Action: "deny", + Description: "Terraform accept test", + IPVersion: 4, + SourceIPAddress: "1.2.3.4", + DestinationIPAddress: "4.3.2.0/24", + SourcePort: "444", + DestinationPort: "555", + Enabled: true, + }), + ), + }, + resource.TestStep{ + Config: testFirewallRuleUpdateAllFieldsConfig, + Check: resource.ComposeTestCheckFunc( + testAccCheckFWRuleV1Exists( + "openstack_fw_rule_v1.accept_test", + &rules.Rule{ + Name: "accept_test_updated_2", + Protocol: "tcp", + Action: "allow", + Description: "Terraform accept test updated", + IPVersion: 4, + SourceIPAddress: "1.2.3.0/24", + DestinationIPAddress: "4.3.2.8", + SourcePort: "666", + DestinationPort: "777", + Enabled: false, + }), + ), + }, + }, + }) +} + +func testAccCheckFWRuleV1Destroy(s *terraform.State) error { + + config := testAccProvider.Meta().(*Config) + networkingClient, err := config.networkingV2Client(OS_REGION_NAME) + if err != nil { + return fmt.Errorf("(testAccCheckOpenstackFirewallRuleDestroy) Error creating OpenStack networking client: %s", err) + } + for _, rs := range s.RootModule().Resources { + if rs.Type != "openstack_firewall_rule" { + continue + } + _, err = rules.Get(networkingClient, rs.Primary.ID).Extract() + if err == nil { + return fmt.Errorf("Firewall rule (%s) still exists.", rs.Primary.ID) + } + httpError, ok := err.(*gophercloud.UnexpectedResponseCodeError) + if !ok || httpError.Actual != 404 { + return httpError + } + } + return nil +} + +func testAccCheckFWRuleV1Exists(n string, expected *rules.Rule) resource.TestCheckFunc { + + return func(s *terraform.State) error { + + rs, ok := s.RootModule().Resources[n] + if !ok { + return fmt.Errorf("Not found: %s", n) + } + + if rs.Primary.ID == "" { + return fmt.Errorf("No ID is set") + } + + config := testAccProvider.Meta().(*Config) + networkingClient, err := config.networkingV2Client(OS_REGION_NAME) + if err != nil { + return fmt.Errorf("(testAccCheckFirewallRuleExists) Error creating OpenStack networking client: %s", err) + } + + var found *rules.Rule + for i := 0; i < 5; i++ { + // Firewall rule creation is asynchronous. Retry some times + // if we get a 404 error. Fail on any other error. + found, err = rules.Get(networkingClient, rs.Primary.ID).Extract() + if err != nil { + httpError, ok := err.(*gophercloud.UnexpectedResponseCodeError) + if !ok || httpError.Actual != 404 { + time.Sleep(time.Second) + continue + } + } + break + } + + if err != nil { + return err + } + + expected.ID = found.ID + // Erase the tenant id because we don't want to compare + // it as long it is not present in the expected + found.TenantID = "" + + if !reflect.DeepEqual(expected, found) { + return fmt.Errorf("Expected:\n%#v\nFound:\n%#v", expected, found) + } + + return nil + } +} + +const testFirewallRuleMinimalConfig = ` +resource "openstack_fw_rule_v1" "accept_test_minimal" { + protocol = "udp" + action = "deny" +} +` + +const testFirewallRuleConfig = ` +resource "openstack_fw_rule_v1" "accept_test" { + name = "accept_test" + description = "Terraform accept test" + protocol = "udp" + action = "deny" + ip_version = 4 + source_ip_address = "1.2.3.4" + destination_ip_address = "4.3.2.0/24" + source_port = "444" + destination_port = "555" + enabled = true +} +` + +const testFirewallRuleUpdateAllFieldsConfig = ` +resource "openstack_fw_rule_v1" "accept_test" { + name = "accept_test_updated_2" + description = "Terraform accept test updated" + protocol = "tcp" + action = "allow" + ip_version = 4 + source_ip_address = "1.2.3.0/24" + destination_ip_address = "4.3.2.8" + source_port = "666" + destination_port = "777" + enabled = false +} +` diff --git a/builtin/providers/openstack/resource_openstack_lb_monitor_v1.go b/builtin/providers/openstack/resource_openstack_lb_monitor_v1.go new file mode 100644 index 000000000..cdfd54ccc --- /dev/null +++ b/builtin/providers/openstack/resource_openstack_lb_monitor_v1.go @@ -0,0 +1,192 @@ +package openstack + +import ( + "fmt" + "log" + "strconv" + + "github.com/hashicorp/terraform/helper/schema" + "github.com/rackspace/gophercloud/openstack/networking/v2/extensions/lbaas/monitors" +) + +func resourceLBMonitorV1() *schema.Resource { + return &schema.Resource{ + Create: resourceLBMonitorV1Create, + Read: resourceLBMonitorV1Read, + Update: resourceLBMonitorV1Update, + Delete: resourceLBMonitorV1Delete, + + Schema: map[string]*schema.Schema{ + "region": &schema.Schema{ + Type: schema.TypeString, + Required: true, + ForceNew: true, + DefaultFunc: envDefaultFunc("OS_REGION_NAME"), + }, + "tenant_id": &schema.Schema{ + Type: schema.TypeString, + Optional: true, + ForceNew: true, + }, + "type": &schema.Schema{ + Type: schema.TypeString, + Required: true, + ForceNew: true, + }, + "delay": &schema.Schema{ + Type: schema.TypeInt, + Required: true, + ForceNew: false, + }, + "timeout": &schema.Schema{ + Type: schema.TypeInt, + Required: true, + ForceNew: false, + }, + "max_retries": &schema.Schema{ + Type: schema.TypeInt, + Required: true, + ForceNew: false, + }, + "url_path": &schema.Schema{ + Type: schema.TypeString, + Optional: true, + ForceNew: false, + }, + "http_method": &schema.Schema{ + Type: schema.TypeString, + Optional: true, + ForceNew: false, + }, + "expected_codes": &schema.Schema{ + Type: schema.TypeString, + Optional: true, + ForceNew: false, + }, + "admin_state_up": &schema.Schema{ + Type: schema.TypeString, + Optional: true, + ForceNew: false, + }, + }, + } +} + +func resourceLBMonitorV1Create(d *schema.ResourceData, meta interface{}) error { + config := meta.(*Config) + networkingClient, err := config.networkingV2Client(d.Get("region").(string)) + if err != nil { + return fmt.Errorf("Error creating OpenStack networking client: %s", err) + } + + createOpts := monitors.CreateOpts{ + TenantID: d.Get("tenant_id").(string), + Type: d.Get("type").(string), + Delay: d.Get("delay").(int), + Timeout: d.Get("timeout").(int), + MaxRetries: d.Get("max_retries").(int), + URLPath: d.Get("url_path").(string), + ExpectedCodes: d.Get("expected_codes").(string), + HTTPMethod: d.Get("http_method").(string), + } + + asuRaw := d.Get("admin_state_up").(string) + if asuRaw != "" { + asu, err := strconv.ParseBool(asuRaw) + if err != nil { + return fmt.Errorf("admin_state_up, if provided, must be either 'true' or 'false'") + } + createOpts.AdminStateUp = &asu + } + + log.Printf("[DEBUG] Create Options: %#v", createOpts) + m, err := monitors.Create(networkingClient, createOpts).Extract() + if err != nil { + return fmt.Errorf("Error creating OpenStack LB Monitor: %s", err) + } + log.Printf("[INFO] LB Monitor ID: %s", m.ID) + + d.SetId(m.ID) + + return resourceLBMonitorV1Read(d, meta) +} + +func resourceLBMonitorV1Read(d *schema.ResourceData, meta interface{}) error { + config := meta.(*Config) + networkingClient, err := config.networkingV2Client(d.Get("region").(string)) + if err != nil { + return fmt.Errorf("Error creating OpenStack networking client: %s", err) + } + + m, err := monitors.Get(networkingClient, d.Id()).Extract() + if err != nil { + return CheckDeleted(d, err, "LB monitor") + } + + log.Printf("[DEBUG] Retreived OpenStack LB Monitor %s: %+v", d.Id(), m) + + d.Set("type", m.Type) + d.Set("delay", m.Delay) + d.Set("timeout", m.Timeout) + d.Set("max_retries", m.MaxRetries) + d.Set("tenant_id", m.TenantID) + d.Set("url_path", m.URLPath) + d.Set("http_method", m.HTTPMethod) + d.Set("expected_codes", m.ExpectedCodes) + d.Set("admin_state_up", strconv.FormatBool(m.AdminStateUp)) + + return nil +} + +func resourceLBMonitorV1Update(d *schema.ResourceData, meta interface{}) error { + config := meta.(*Config) + networkingClient, err := config.networkingV2Client(d.Get("region").(string)) + if err != nil { + return fmt.Errorf("Error creating OpenStack networking client: %s", err) + } + + updateOpts := monitors.UpdateOpts{ + Delay: d.Get("delay").(int), + Timeout: d.Get("timeout").(int), + MaxRetries: d.Get("max_retries").(int), + URLPath: d.Get("url_path").(string), + HTTPMethod: d.Get("http_method").(string), + ExpectedCodes: d.Get("expected_codes").(string), + } + + if d.HasChange("admin_state_up") { + asuRaw := d.Get("admin_state_up").(string) + if asuRaw != "" { + asu, err := strconv.ParseBool(asuRaw) + if err != nil { + return fmt.Errorf("admin_state_up, if provided, must be either 'true' or 'false'") + } + updateOpts.AdminStateUp = &asu + } + } + + log.Printf("[DEBUG] Updating OpenStack LB Monitor %s with options: %+v", d.Id(), updateOpts) + + _, err = monitors.Update(networkingClient, d.Id(), updateOpts).Extract() + if err != nil { + return fmt.Errorf("Error updating OpenStack LB Monitor: %s", err) + } + + return resourceLBMonitorV1Read(d, meta) +} + +func resourceLBMonitorV1Delete(d *schema.ResourceData, meta interface{}) error { + config := meta.(*Config) + networkingClient, err := config.networkingV2Client(d.Get("region").(string)) + if err != nil { + return fmt.Errorf("Error creating OpenStack networking client: %s", err) + } + + err = monitors.Delete(networkingClient, d.Id()).ExtractErr() + if err != nil { + return fmt.Errorf("Error deleting OpenStack LB Monitor: %s", err) + } + + d.SetId("") + return nil +} diff --git a/builtin/providers/openstack/resource_openstack_lb_monitor_v1_test.go b/builtin/providers/openstack/resource_openstack_lb_monitor_v1_test.go new file mode 100644 index 000000000..5aaf61d2c --- /dev/null +++ b/builtin/providers/openstack/resource_openstack_lb_monitor_v1_test.go @@ -0,0 +1,110 @@ +package openstack + +import ( + "fmt" + "testing" + + "github.com/hashicorp/terraform/helper/resource" + "github.com/hashicorp/terraform/terraform" + + "github.com/rackspace/gophercloud/openstack/networking/v2/extensions/lbaas/monitors" +) + +func TestAccLBV1Monitor_basic(t *testing.T) { + var monitor monitors.Monitor + + resource.Test(t, resource.TestCase{ + PreCheck: func() { testAccPreCheck(t) }, + Providers: testAccProviders, + CheckDestroy: testAccCheckLBV1MonitorDestroy, + Steps: []resource.TestStep{ + resource.TestStep{ + Config: testAccLBV1Monitor_basic, + Check: resource.ComposeTestCheckFunc( + testAccCheckLBV1MonitorExists(t, "openstack_lb_monitor_v1.monitor_1", &monitor), + ), + }, + resource.TestStep{ + Config: testAccLBV1Monitor_update, + Check: resource.ComposeTestCheckFunc( + resource.TestCheckResourceAttr("openstack_lb_monitor_v1.monitor_1", "delay", "20"), + ), + }, + }, + }) +} + +func testAccCheckLBV1MonitorDestroy(s *terraform.State) error { + config := testAccProvider.Meta().(*Config) + networkingClient, err := config.networkingV2Client(OS_REGION_NAME) + if err != nil { + return fmt.Errorf("(testAccCheckLBV1MonitorDestroy) Error creating OpenStack networking client: %s", err) + } + + for _, rs := range s.RootModule().Resources { + if rs.Type != "openstack_lb_monitor_v1" { + continue + } + + _, err := monitors.Get(networkingClient, rs.Primary.ID).Extract() + if err == nil { + return fmt.Errorf("LB monitor still exists") + } + } + + return nil +} + +func testAccCheckLBV1MonitorExists(t *testing.T, n string, monitor *monitors.Monitor) resource.TestCheckFunc { + return func(s *terraform.State) error { + rs, ok := s.RootModule().Resources[n] + if !ok { + return fmt.Errorf("Not found: %s", n) + } + + if rs.Primary.ID == "" { + return fmt.Errorf("No ID is set") + } + + config := testAccProvider.Meta().(*Config) + networkingClient, err := config.networkingV2Client(OS_REGION_NAME) + if err != nil { + return fmt.Errorf("(testAccCheckLBV1MonitorExists) Error creating OpenStack networking client: %s", err) + } + + found, err := monitors.Get(networkingClient, rs.Primary.ID).Extract() + if err != nil { + return err + } + + if found.ID != rs.Primary.ID { + return fmt.Errorf("Monitor not found") + } + + *monitor = *found + + return nil + } +} + +var testAccLBV1Monitor_basic = fmt.Sprintf(` + resource "openstack_lb_monitor_v1" "monitor_1" { + region = "%s" + type = "PING" + delay = 30 + timeout = 5 + max_retries = 3 + admin_state_up = "true" + }`, + OS_REGION_NAME) + +var testAccLBV1Monitor_update = fmt.Sprintf(` + resource "openstack_lb_monitor_v1" "monitor_1" { + region = "%s" + type = "PING" + delay = 20 + timeout = 5 + max_retries = 3 + admin_state_up = "true" + }`, + OS_REGION_NAME) diff --git a/builtin/providers/openstack/resource_openstack_lb_pool_v1.go b/builtin/providers/openstack/resource_openstack_lb_pool_v1.go new file mode 100644 index 000000000..6b69f2fac --- /dev/null +++ b/builtin/providers/openstack/resource_openstack_lb_pool_v1.go @@ -0,0 +1,327 @@ +package openstack + +import ( + "bytes" + "fmt" + "log" + + "github.com/hashicorp/terraform/helper/hashcode" + "github.com/hashicorp/terraform/helper/schema" + "github.com/rackspace/gophercloud/openstack/networking/v2/extensions/lbaas/members" + "github.com/rackspace/gophercloud/openstack/networking/v2/extensions/lbaas/pools" + "github.com/rackspace/gophercloud/pagination" +) + +func resourceLBPoolV1() *schema.Resource { + return &schema.Resource{ + Create: resourceLBPoolV1Create, + Read: resourceLBPoolV1Read, + Update: resourceLBPoolV1Update, + Delete: resourceLBPoolV1Delete, + + Schema: map[string]*schema.Schema{ + "region": &schema.Schema{ + Type: schema.TypeString, + Required: true, + ForceNew: true, + DefaultFunc: envDefaultFunc("OS_REGION_NAME"), + }, + "name": &schema.Schema{ + Type: schema.TypeString, + Required: true, + ForceNew: false, + }, + "protocol": &schema.Schema{ + Type: schema.TypeString, + Required: true, + ForceNew: true, + }, + "subnet_id": &schema.Schema{ + Type: schema.TypeString, + Required: true, + ForceNew: true, + }, + + "lb_method": &schema.Schema{ + Type: schema.TypeString, + Required: true, + ForceNew: false, + }, + "tenant_id": &schema.Schema{ + Type: schema.TypeString, + Optional: true, + ForceNew: true, + }, + "member": &schema.Schema{ + Type: schema.TypeSet, + Optional: true, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "region": &schema.Schema{ + Type: schema.TypeString, + Required: true, + ForceNew: true, + DefaultFunc: envDefaultFunc("OS_REGION_NAME"), + }, + "tenant_id": &schema.Schema{ + Type: schema.TypeString, + Optional: true, + ForceNew: true, + }, + "address": &schema.Schema{ + Type: schema.TypeString, + Required: true, + ForceNew: true, + }, + "port": &schema.Schema{ + Type: schema.TypeInt, + Required: true, + ForceNew: true, + }, + "admin_state_up": &schema.Schema{ + Type: schema.TypeBool, + Required: true, + ForceNew: false, + }, + }, + }, + Set: resourceLBMemberV1Hash, + }, + "monitor_ids": &schema.Schema{ + Type: schema.TypeSet, + Optional: true, + ForceNew: false, + Elem: &schema.Schema{Type: schema.TypeString}, + Set: func(v interface{}) int { + return hashcode.String(v.(string)) + }, + }, + }, + } +} + +func resourceLBPoolV1Create(d *schema.ResourceData, meta interface{}) error { + config := meta.(*Config) + networkingClient, err := config.networkingV2Client(d.Get("region").(string)) + if err != nil { + return fmt.Errorf("Error creating OpenStack networking client: %s", err) + } + + createOpts := pools.CreateOpts{ + Name: d.Get("name").(string), + Protocol: d.Get("protocol").(string), + SubnetID: d.Get("subnet_id").(string), + LBMethod: d.Get("lb_method").(string), + TenantID: d.Get("tenant_id").(string), + } + + log.Printf("[DEBUG] Create Options: %#v", createOpts) + p, err := pools.Create(networkingClient, createOpts).Extract() + if err != nil { + return fmt.Errorf("Error creating OpenStack LB pool: %s", err) + } + log.Printf("[INFO] LB Pool ID: %s", p.ID) + + d.SetId(p.ID) + + if mIDs := resourcePoolMonitorIDsV1(d); mIDs != nil { + for _, mID := range mIDs { + _, err := pools.AssociateMonitor(networkingClient, p.ID, mID).Extract() + if err != nil { + return fmt.Errorf("Error associating monitor (%s) with OpenStack LB pool (%s): %s", mID, p.ID, err) + } + } + } + + if memberOpts := resourcePoolMembersV1(d); memberOpts != nil { + for _, memberOpt := range memberOpts { + _, err := members.Create(networkingClient, memberOpt).Extract() + if err != nil { + return fmt.Errorf("Error creating OpenStack LB member: %s", err) + } + } + } + + return resourceLBPoolV1Read(d, meta) +} + +func resourceLBPoolV1Read(d *schema.ResourceData, meta interface{}) error { + config := meta.(*Config) + networkingClient, err := config.networkingV2Client(d.Get("region").(string)) + if err != nil { + return fmt.Errorf("Error creating OpenStack networking client: %s", err) + } + + p, err := pools.Get(networkingClient, d.Id()).Extract() + if err != nil { + return CheckDeleted(d, err, "LB pool") + } + + log.Printf("[DEBUG] Retreived OpenStack LB Pool %s: %+v", d.Id(), p) + + d.Set("name", p.Name) + d.Set("protocol", p.Protocol) + d.Set("subnet_id", p.SubnetID) + d.Set("lb_method", p.LBMethod) + d.Set("tenant_id", p.TenantID) + d.Set("monitor_ids", p.MonitorIDs) + d.Set("member_ids", p.MemberIDs) + + return nil +} + +func resourceLBPoolV1Update(d *schema.ResourceData, meta interface{}) error { + config := meta.(*Config) + networkingClient, err := config.networkingV2Client(d.Get("region").(string)) + if err != nil { + return fmt.Errorf("Error creating OpenStack networking client: %s", err) + } + + var updateOpts pools.UpdateOpts + if d.HasChange("name") { + updateOpts.Name = d.Get("name").(string) + } + if d.HasChange("lb_method") { + updateOpts.LBMethod = d.Get("lb_method").(string) + } + + log.Printf("[DEBUG] Updating OpenStack LB Pool %s with options: %+v", d.Id(), updateOpts) + + _, err = pools.Update(networkingClient, d.Id(), updateOpts).Extract() + if err != nil { + return fmt.Errorf("Error updating OpenStack LB Pool: %s", err) + } + + if d.HasChange("monitor_ids") { + oldMIDsRaw, newMIDsRaw := d.GetChange("security_groups") + oldMIDsSet, newMIDsSet := oldMIDsRaw.(*schema.Set), newMIDsRaw.(*schema.Set) + monitorsToAdd := newMIDsSet.Difference(oldMIDsSet) + monitorsToRemove := oldMIDsSet.Difference(newMIDsSet) + + log.Printf("[DEBUG] Monitors to add: %v", monitorsToAdd) + + log.Printf("[DEBUG] Monitors to remove: %v", monitorsToRemove) + + for _, m := range monitorsToAdd.List() { + _, err := pools.AssociateMonitor(networkingClient, d.Id(), m.(string)).Extract() + if err != nil { + return fmt.Errorf("Error associating monitor (%s) with OpenStack server (%s): %s", m.(string), d.Id(), err) + } + log.Printf("[DEBUG] Associated monitor (%s) with pool (%s)", m.(string), d.Id()) + } + + for _, m := range monitorsToRemove.List() { + _, err := pools.DisassociateMonitor(networkingClient, d.Id(), m.(string)).Extract() + if err != nil { + return fmt.Errorf("Error disassociating monitor (%s) from OpenStack server (%s): %s", m.(string), d.Id(), err) + } + log.Printf("[DEBUG] Disassociated monitor (%s) from pool (%s)", m.(string), d.Id()) + } + } + + if d.HasChange("member") { + oldMembersRaw, newMembersRaw := d.GetChange("member") + oldMembersSet, newMembersSet := oldMembersRaw.(*schema.Set), newMembersRaw.(*schema.Set) + membersToAdd := newMembersSet.Difference(oldMembersSet) + membersToRemove := oldMembersSet.Difference(newMembersSet) + + log.Printf("[DEBUG] Members to add: %v", membersToAdd) + + log.Printf("[DEBUG] Members to remove: %v", membersToRemove) + + for _, m := range membersToRemove.List() { + oldMember := resourcePoolMemberV1(d, m) + listOpts := members.ListOpts{ + PoolID: d.Id(), + Address: oldMember.Address, + ProtocolPort: oldMember.ProtocolPort, + } + err = members.List(networkingClient, listOpts).EachPage(func(page pagination.Page) (bool, error) { + extractedMembers, err := members.ExtractMembers(page) + if err != nil { + return false, err + } + for _, member := range extractedMembers { + err := members.Delete(networkingClient, member.ID).ExtractErr() + if err != nil { + return false, fmt.Errorf("Error deleting member (%s) from OpenStack LB pool (%s): %s", member.ID, d.Id(), err) + } + log.Printf("[DEBUG] Deleted member (%s) from pool (%s)", member.ID, d.Id()) + } + return true, nil + }) + } + + for _, m := range membersToAdd.List() { + createOpts := resourcePoolMemberV1(d, m) + newMember, err := members.Create(networkingClient, createOpts).Extract() + if err != nil { + return fmt.Errorf("Error creating LB member: %s", err) + } + log.Printf("[DEBUG] Created member (%s) in OpenStack LB pool (%s)", newMember.ID, d.Id()) + } + } + + return resourceLBPoolV1Read(d, meta) +} + +func resourceLBPoolV1Delete(d *schema.ResourceData, meta interface{}) error { + config := meta.(*Config) + networkingClient, err := config.networkingV2Client(d.Get("region").(string)) + if err != nil { + return fmt.Errorf("Error creating OpenStack networking client: %s", err) + } + + err = pools.Delete(networkingClient, d.Id()).ExtractErr() + if err != nil { + return fmt.Errorf("Error deleting OpenStack LB Pool: %s", err) + } + + d.SetId("") + return nil +} + +func resourcePoolMonitorIDsV1(d *schema.ResourceData) []string { + mIDsRaw := d.Get("monitor_ids").(*schema.Set) + mIDs := make([]string, mIDsRaw.Len()) + for i, raw := range mIDsRaw.List() { + mIDs[i] = raw.(string) + } + return mIDs +} + +func resourcePoolMembersV1(d *schema.ResourceData) []members.CreateOpts { + memberOptsRaw := (d.Get("member")).(*schema.Set) + memberOpts := make([]members.CreateOpts, memberOptsRaw.Len()) + for i, raw := range memberOptsRaw.List() { + rawMap := raw.(map[string]interface{}) + memberOpts[i] = members.CreateOpts{ + TenantID: rawMap["tenant_id"].(string), + Address: rawMap["address"].(string), + ProtocolPort: rawMap["port"].(int), + PoolID: d.Id(), + } + } + return memberOpts +} + +func resourcePoolMemberV1(d *schema.ResourceData, raw interface{}) members.CreateOpts { + rawMap := raw.(map[string]interface{}) + return members.CreateOpts{ + TenantID: rawMap["tenant_id"].(string), + Address: rawMap["address"].(string), + ProtocolPort: rawMap["port"].(int), + PoolID: d.Id(), + } +} + +func resourceLBMemberV1Hash(v interface{}) int { + var buf bytes.Buffer + m := v.(map[string]interface{}) + buf.WriteString(fmt.Sprintf("%s-", m["region"].(string))) + buf.WriteString(fmt.Sprintf("%s-", m["tenant_id"].(string))) + buf.WriteString(fmt.Sprintf("%s-", m["address"].(string))) + buf.WriteString(fmt.Sprintf("%d-", m["port"].(int))) + + return hashcode.String(buf.String()) +} diff --git a/builtin/providers/openstack/resource_openstack_lb_pool_v1_test.go b/builtin/providers/openstack/resource_openstack_lb_pool_v1_test.go new file mode 100644 index 000000000..1889c2384 --- /dev/null +++ b/builtin/providers/openstack/resource_openstack_lb_pool_v1_test.go @@ -0,0 +1,134 @@ +package openstack + +import ( + "fmt" + "testing" + + "github.com/hashicorp/terraform/helper/resource" + "github.com/hashicorp/terraform/terraform" + + "github.com/rackspace/gophercloud/openstack/networking/v2/extensions/lbaas/pools" +) + +func TestAccLBV1Pool_basic(t *testing.T) { + var pool pools.Pool + + resource.Test(t, resource.TestCase{ + PreCheck: func() { testAccPreCheck(t) }, + Providers: testAccProviders, + CheckDestroy: testAccCheckLBV1PoolDestroy, + Steps: []resource.TestStep{ + resource.TestStep{ + Config: testAccLBV1Pool_basic, + Check: resource.ComposeTestCheckFunc( + testAccCheckLBV1PoolExists(t, "openstack_lb_pool_v1.pool_1", &pool), + ), + }, + resource.TestStep{ + Config: testAccLBV1Pool_update, + Check: resource.ComposeTestCheckFunc( + resource.TestCheckResourceAttr("openstack_lb_pool_v1.pool_1", "name", "tf_test_lb_pool_updated"), + ), + }, + }, + }) +} + +func testAccCheckLBV1PoolDestroy(s *terraform.State) error { + config := testAccProvider.Meta().(*Config) + networkingClient, err := config.networkingV2Client(OS_REGION_NAME) + if err != nil { + return fmt.Errorf("(testAccCheckLBV1PoolDestroy) Error creating OpenStack networking client: %s", err) + } + + for _, rs := range s.RootModule().Resources { + if rs.Type != "openstack_lb_pool_v1" { + continue + } + + _, err := pools.Get(networkingClient, rs.Primary.ID).Extract() + if err == nil { + return fmt.Errorf("LB Pool still exists") + } + } + + return nil +} + +func testAccCheckLBV1PoolExists(t *testing.T, n string, pool *pools.Pool) resource.TestCheckFunc { + return func(s *terraform.State) error { + rs, ok := s.RootModule().Resources[n] + if !ok { + return fmt.Errorf("Not found: %s", n) + } + + if rs.Primary.ID == "" { + return fmt.Errorf("No ID is set") + } + + config := testAccProvider.Meta().(*Config) + networkingClient, err := config.networkingV2Client(OS_REGION_NAME) + if err != nil { + return fmt.Errorf("(testAccCheckLBV1PoolExists) Error creating OpenStack networking client: %s", err) + } + + found, err := pools.Get(networkingClient, rs.Primary.ID).Extract() + if err != nil { + return err + } + + if found.ID != rs.Primary.ID { + return fmt.Errorf("Pool not found") + } + + *pool = *found + + return nil + } +} + +var testAccLBV1Pool_basic = fmt.Sprintf(` + resource "openstack_networking_network_v2" "network_1" { + region = "%s" + name = "network_1" + admin_state_up = "true" + } + + resource "openstack_networking_subnet_v2" "subnet_1" { + region = "%s" + network_id = "${openstack_networking_network_v2.network_1.id}" + cidr = "192.168.199.0/24" + ip_version = 4 + } + + resource "openstack_lb_pool_v1" "pool_1" { + region = "%s" + name = "tf_test_lb_pool" + protocol = "HTTP" + subnet_id = "${openstack_networking_subnet_v2.subnet_1.id}" + lb_method = "ROUND_ROBIN" + }`, + OS_REGION_NAME, OS_REGION_NAME, OS_REGION_NAME) + +var testAccLBV1Pool_update = fmt.Sprintf(` + resource "openstack_networking_network_v2" "network_1" { + region = "%s" + name = "network_1" + admin_state_up = "true" + } + + resource "openstack_networking_subnet_v2" "subnet_1" { + region = "%s" + network_id = "${openstack_networking_network_v2.network_1.id}" + cidr = "192.168.199.0/24" + ip_version = 4 + } + + resource "openstack_lb_pool_v1" "pool_1" { + region = "%s" + name = "tf_test_lb_pool_updated" + protocol = "HTTP" + subnet_id = "${openstack_networking_subnet_v2.subnet_1.id}" + lb_method = "ROUND_ROBIN" + }`, + OS_REGION_NAME, OS_REGION_NAME, OS_REGION_NAME) diff --git a/builtin/providers/openstack/resource_openstack_lb_vip_v1.go b/builtin/providers/openstack/resource_openstack_lb_vip_v1.go new file mode 100644 index 000000000..bd2ae135e --- /dev/null +++ b/builtin/providers/openstack/resource_openstack_lb_vip_v1.go @@ -0,0 +1,258 @@ +package openstack + +import ( + "fmt" + "log" + "strconv" + + "github.com/hashicorp/terraform/helper/schema" + "github.com/rackspace/gophercloud" + "github.com/rackspace/gophercloud/openstack/networking/v2/extensions/lbaas/vips" +) + +func resourceLBVipV1() *schema.Resource { + return &schema.Resource{ + Create: resourceLBVipV1Create, + Read: resourceLBVipV1Read, + Update: resourceLBVipV1Update, + Delete: resourceLBVipV1Delete, + + Schema: map[string]*schema.Schema{ + "region": &schema.Schema{ + Type: schema.TypeString, + Required: true, + ForceNew: true, + DefaultFunc: envDefaultFunc("OS_REGION_NAME"), + }, + "name": &schema.Schema{ + Type: schema.TypeString, + Required: true, + ForceNew: false, + }, + "subnet_id": &schema.Schema{ + Type: schema.TypeString, + Required: true, + ForceNew: true, + }, + "protocol": &schema.Schema{ + Type: schema.TypeString, + Required: true, + ForceNew: true, + }, + "port": &schema.Schema{ + Type: schema.TypeInt, + Required: true, + ForceNew: true, + }, + "pool_id": &schema.Schema{ + Type: schema.TypeString, + Required: true, + ForceNew: false, + }, + "tenant_id": &schema.Schema{ + Type: schema.TypeString, + Optional: true, + ForceNew: true, + }, + "address": &schema.Schema{ + Type: schema.TypeString, + Optional: true, + ForceNew: true, + }, + "description": &schema.Schema{ + Type: schema.TypeString, + Optional: true, + ForceNew: false, + }, + "persistence": &schema.Schema{ + Type: schema.TypeMap, + Optional: true, + ForceNew: false, + }, + "conn_limit": &schema.Schema{ + Type: schema.TypeInt, + Optional: true, + ForceNew: false, + }, + "admin_state_up": &schema.Schema{ + Type: schema.TypeString, + Optional: true, + ForceNew: false, + }, + }, + } +} + +func resourceLBVipV1Create(d *schema.ResourceData, meta interface{}) error { + config := meta.(*Config) + networkingClient, err := config.networkingV2Client(d.Get("region").(string)) + if err != nil { + return fmt.Errorf("Error creating OpenStack networking client: %s", err) + } + + createOpts := vips.CreateOpts{ + Name: d.Get("name").(string), + SubnetID: d.Get("subnet_id").(string), + Protocol: d.Get("protocol").(string), + ProtocolPort: d.Get("port").(int), + PoolID: d.Get("pool_id").(string), + TenantID: d.Get("tenant_id").(string), + Address: d.Get("address").(string), + Description: d.Get("description").(string), + Persistence: resourceVipPersistenceV1(d), + ConnLimit: gophercloud.MaybeInt(d.Get("conn_limit").(int)), + } + + asuRaw := d.Get("admin_state_up").(string) + if asuRaw != "" { + asu, err := strconv.ParseBool(asuRaw) + if err != nil { + return fmt.Errorf("admin_state_up, if provided, must be either 'true' or 'false'") + } + createOpts.AdminStateUp = &asu + } + + log.Printf("[DEBUG] Create Options: %#v", createOpts) + p, err := vips.Create(networkingClient, createOpts).Extract() + if err != nil { + return fmt.Errorf("Error creating OpenStack LB VIP: %s", err) + } + log.Printf("[INFO] LB VIP ID: %s", p.ID) + + d.SetId(p.ID) + + return resourceLBVipV1Read(d, meta) +} + +func resourceLBVipV1Read(d *schema.ResourceData, meta interface{}) error { + config := meta.(*Config) + networkingClient, err := config.networkingV2Client(d.Get("region").(string)) + if err != nil { + return fmt.Errorf("Error creating OpenStack networking client: %s", err) + } + + p, err := vips.Get(networkingClient, d.Id()).Extract() + if err != nil { + return CheckDeleted(d, err, "LB VIP") + } + + log.Printf("[DEBUG] Retreived OpenStack LB VIP %s: %+v", d.Id(), p) + + d.Set("name", p.Name) + d.Set("subnet_id", p.SubnetID) + d.Set("protocol", p.Protocol) + d.Set("port", p.ProtocolPort) + d.Set("pool_id", p.PoolID) + + if t, exists := d.GetOk("tenant_id"); exists && t != "" { + d.Set("tenant_id", p.TenantID) + } else { + d.Set("tenant_id", "") + } + + if t, exists := d.GetOk("address"); exists && t != "" { + d.Set("address", p.Address) + } else { + d.Set("address", "") + } + + if t, exists := d.GetOk("description"); exists && t != "" { + d.Set("description", p.Description) + } else { + d.Set("description", "") + } + + if t, exists := d.GetOk("persistence"); exists && t != "" { + d.Set("persistence", p.Description) + } + + if t, exists := d.GetOk("conn_limit"); exists && t != "" { + d.Set("conn_limit", p.ConnLimit) + } else { + d.Set("conn_limit", "") + } + + if t, exists := d.GetOk("admin_state_up"); exists && t != "" { + d.Set("admin_state_up", strconv.FormatBool(p.AdminStateUp)) + } else { + d.Set("admin_state_up", "") + } + + return nil +} + +func resourceLBVipV1Update(d *schema.ResourceData, meta interface{}) error { + config := meta.(*Config) + networkingClient, err := config.networkingV2Client(d.Get("region").(string)) + if err != nil { + return fmt.Errorf("Error creating OpenStack networking client: %s", err) + } + + var updateOpts vips.UpdateOpts + if d.HasChange("name") { + updateOpts.Name = d.Get("name").(string) + } + if d.HasChange("pool_id") { + updateOpts.PoolID = d.Get("pool_id").(string) + } + if d.HasChange("description") { + updateOpts.Description = d.Get("description").(string) + } + if d.HasChange("persistence") { + updateOpts.Persistence = resourceVipPersistenceV1(d) + } + if d.HasChange("conn_limit") { + updateOpts.ConnLimit = gophercloud.MaybeInt(d.Get("conn_limit").(int)) + } + if d.HasChange("admin_state_up") { + asuRaw := d.Get("admin_state_up").(string) + if asuRaw != "" { + asu, err := strconv.ParseBool(asuRaw) + if err != nil { + return fmt.Errorf("admin_state_up, if provided, must be either 'true' or 'false'") + } + updateOpts.AdminStateUp = &asu + } + } + + log.Printf("[DEBUG] Updating OpenStack LB VIP %s with options: %+v", d.Id(), updateOpts) + + _, err = vips.Update(networkingClient, d.Id(), updateOpts).Extract() + if err != nil { + return fmt.Errorf("Error updating OpenStack LB VIP: %s", err) + } + + return resourceLBVipV1Read(d, meta) +} + +func resourceLBVipV1Delete(d *schema.ResourceData, meta interface{}) error { + config := meta.(*Config) + networkingClient, err := config.networkingV2Client(d.Get("region").(string)) + if err != nil { + return fmt.Errorf("Error creating OpenStack networking client: %s", err) + } + + err = vips.Delete(networkingClient, d.Id()).ExtractErr() + if err != nil { + return fmt.Errorf("Error deleting OpenStack LB VIP: %s", err) + } + + d.SetId("") + return nil +} + +func resourceVipPersistenceV1(d *schema.ResourceData) *vips.SessionPersistence { + rawP := d.Get("persistence").(interface{}) + rawMap := rawP.(map[string]interface{}) + if len(rawMap) != 0 { + p := vips.SessionPersistence{} + if t, ok := rawMap["type"]; ok { + p.Type = t.(string) + } + if c, ok := rawMap["cookie_name"]; ok { + p.CookieName = c.(string) + } + return &p + } + return nil +} diff --git a/builtin/providers/openstack/resource_openstack_lb_vip_v1_test.go b/builtin/providers/openstack/resource_openstack_lb_vip_v1_test.go new file mode 100644 index 000000000..f30cd9d56 --- /dev/null +++ b/builtin/providers/openstack/resource_openstack_lb_vip_v1_test.go @@ -0,0 +1,152 @@ +package openstack + +import ( + "fmt" + "testing" + + "github.com/hashicorp/terraform/helper/resource" + "github.com/hashicorp/terraform/terraform" + + "github.com/rackspace/gophercloud/openstack/networking/v2/extensions/lbaas/vips" +) + +func TestAccLBV1VIP_basic(t *testing.T) { + var vip vips.VirtualIP + + resource.Test(t, resource.TestCase{ + PreCheck: func() { testAccPreCheck(t) }, + Providers: testAccProviders, + CheckDestroy: testAccCheckLBV1VIPDestroy, + Steps: []resource.TestStep{ + resource.TestStep{ + Config: testAccLBV1VIP_basic, + Check: resource.ComposeTestCheckFunc( + testAccCheckLBV1VIPExists(t, "openstack_lb_vip_v1.vip_1", &vip), + ), + }, + resource.TestStep{ + Config: testAccLBV1VIP_update, + Check: resource.ComposeTestCheckFunc( + resource.TestCheckResourceAttr("openstack_lb_vip_v1.vip_1", "name", "tf_test_lb_vip_updated"), + ), + }, + }, + }) +} + +func testAccCheckLBV1VIPDestroy(s *terraform.State) error { + config := testAccProvider.Meta().(*Config) + networkingClient, err := config.networkingV2Client(OS_REGION_NAME) + if err != nil { + return fmt.Errorf("(testAccCheckLBV1VIPDestroy) Error creating OpenStack networking client: %s", err) + } + + for _, rs := range s.RootModule().Resources { + if rs.Type != "openstack_lb_vip_v1" { + continue + } + + _, err := vips.Get(networkingClient, rs.Primary.ID).Extract() + if err == nil { + return fmt.Errorf("LB VIP still exists") + } + } + + return nil +} + +func testAccCheckLBV1VIPExists(t *testing.T, n string, vip *vips.VirtualIP) resource.TestCheckFunc { + return func(s *terraform.State) error { + rs, ok := s.RootModule().Resources[n] + if !ok { + return fmt.Errorf("Not found: %s", n) + } + + if rs.Primary.ID == "" { + return fmt.Errorf("No ID is set") + } + + config := testAccProvider.Meta().(*Config) + networkingClient, err := config.networkingV2Client(OS_REGION_NAME) + if err != nil { + return fmt.Errorf("(testAccCheckLBV1VIPExists) Error creating OpenStack networking client: %s", err) + } + + found, err := vips.Get(networkingClient, rs.Primary.ID).Extract() + if err != nil { + return err + } + + if found.ID != rs.Primary.ID { + return fmt.Errorf("VIP not found") + } + + *vip = *found + + return nil + } +} + +var testAccLBV1VIP_basic = fmt.Sprintf(` + resource "openstack_networking_network_v2" "network_1" { + region = "%s" + name = "network_1" + admin_state_up = "true" + } + + resource "openstack_networking_subnet_v2" "subnet_1" { + region = "%s" + network_id = "${openstack_networking_network_v2.network_1.id}" + cidr = "192.168.199.0/24" + ip_version = 4 + } + + resource "openstack_lb_pool_v1" "pool_1" { + region = "%s" + name = "tf_test_lb_pool" + protocol = "HTTP" + subnet_id = "${openstack_networking_subnet_v2.subnet_1.id}" + lb_method = "ROUND_ROBIN" + } + + resource "openstack_lb_vip_v1" "vip_1" { + region = "RegionOne" + name = "tf_test_lb_vip" + subnet_id = "${openstack_networking_subnet_v2.subnet_1.id}" + protocol = "HTTP" + port = 80 + pool_id = "${openstack_lb_pool_v1.pool_1.id}" + }`, + OS_REGION_NAME, OS_REGION_NAME, OS_REGION_NAME) + +var testAccLBV1VIP_update = fmt.Sprintf(` + resource "openstack_networking_network_v2" "network_1" { + region = "%s" + name = "network_1" + admin_state_up = "true" + } + + resource "openstack_networking_subnet_v2" "subnet_1" { + region = "%s" + network_id = "${openstack_networking_network_v2.network_1.id}" + cidr = "192.168.199.0/24" + ip_version = 4 + } + + resource "openstack_lb_pool_v1" "pool_1" { + region = "%s" + name = "tf_test_lb_pool" + protocol = "HTTP" + subnet_id = "${openstack_networking_subnet_v2.subnet_1.id}" + lb_method = "ROUND_ROBIN" + } + + resource "openstack_lb_vip_v1" "vip_1" { + region = "RegionOne" + name = "tf_test_lb_vip_updated" + subnet_id = "${openstack_networking_subnet_v2.subnet_1.id}" + protocol = "HTTP" + port = 80 + pool_id = "${openstack_lb_pool_v1.pool_1.id}" + }`, + OS_REGION_NAME, OS_REGION_NAME, OS_REGION_NAME) diff --git a/builtin/providers/openstack/resource_openstack_networking_floatingip_v2.go b/builtin/providers/openstack/resource_openstack_networking_floatingip_v2.go new file mode 100644 index 000000000..fd8b3fc65 --- /dev/null +++ b/builtin/providers/openstack/resource_openstack_networking_floatingip_v2.go @@ -0,0 +1,163 @@ +package openstack + +import ( + "fmt" + "log" + + "github.com/hashicorp/terraform/helper/schema" + "github.com/rackspace/gophercloud/openstack/networking/v2/extensions/layer3/floatingips" + "github.com/rackspace/gophercloud/openstack/networking/v2/networks" + "github.com/rackspace/gophercloud/pagination" +) + +func resourceNetworkingFloatingIPV2() *schema.Resource { + return &schema.Resource{ + Create: resourceNetworkFloatingIPV2Create, + Read: resourceNetworkFloatingIPV2Read, + Delete: resourceNetworkFloatingIPV2Delete, + + Schema: map[string]*schema.Schema{ + "region": &schema.Schema{ + Type: schema.TypeString, + Required: true, + ForceNew: true, + DefaultFunc: envDefaultFunc("OS_REGION_NAME"), + }, + "address": &schema.Schema{ + Type: schema.TypeString, + Computed: true, + }, + "pool": &schema.Schema{ + Type: schema.TypeString, + Required: true, + ForceNew: true, + DefaultFunc: envDefaultFunc("OS_POOL_NAME"), + }, + }, + } +} + +func resourceNetworkFloatingIPV2Create(d *schema.ResourceData, meta interface{}) error { + config := meta.(*Config) + networkClient, err := config.networkingV2Client(d.Get("region").(string)) + if err != nil { + return fmt.Errorf("Error creating OpenStack network client: %s", err) + } + + poolID, err := getNetworkID(d, meta, d.Get("pool").(string)) + if err != nil { + return fmt.Errorf("Error retrieving floating IP pool name: %s", err) + } + if len(poolID) == 0 { + return fmt.Errorf("No network found with name: %s", d.Get("pool").(string)) + } + createOpts := floatingips.CreateOpts{ + FloatingNetworkID: poolID, + } + log.Printf("[DEBUG] Create Options: %#v", createOpts) + floatingIP, err := floatingips.Create(networkClient, createOpts).Extract() + if err != nil { + return fmt.Errorf("Error allocating floating IP: %s", err) + } + + d.SetId(floatingIP.ID) + + return resourceNetworkFloatingIPV2Read(d, meta) +} + +func resourceNetworkFloatingIPV2Read(d *schema.ResourceData, meta interface{}) error { + config := meta.(*Config) + networkClient, err := config.networkingV2Client(d.Get("region").(string)) + if err != nil { + return fmt.Errorf("Error creating OpenStack network client: %s", err) + } + + floatingIP, err := floatingips.Get(networkClient, d.Id()).Extract() + if err != nil { + return CheckDeleted(d, err, "floating IP") + } + + d.Set("address", floatingIP.FloatingIP) + poolName, err := getNetworkName(d, meta, floatingIP.FloatingNetworkID) + if err != nil { + return fmt.Errorf("Error retrieving floating IP pool name: %s", err) + } + d.Set("pool", poolName) + + return nil +} + +func resourceNetworkFloatingIPV2Delete(d *schema.ResourceData, meta interface{}) error { + config := meta.(*Config) + networkClient, err := config.networkingV2Client(d.Get("region").(string)) + if err != nil { + return fmt.Errorf("Error creating OpenStack network client: %s", err) + } + + err = floatingips.Delete(networkClient, d.Id()).ExtractErr() + if err != nil { + return fmt.Errorf("Error deleting floating IP: %s", err) + } + d.SetId("") + return nil +} + +func getNetworkID(d *schema.ResourceData, meta interface{}, networkName string) (string, error) { + config := meta.(*Config) + networkClient, err := config.networkingV2Client(d.Get("region").(string)) + if err != nil { + return "", fmt.Errorf("Error creating OpenStack network client: %s", err) + } + + opts := networks.ListOpts{Name: networkName} + pager := networks.List(networkClient, opts) + networkID := "" + + err = pager.EachPage(func(page pagination.Page) (bool, error) { + networkList, err := networks.ExtractNetworks(page) + if err != nil { + return false, err + } + + for _, n := range networkList { + if n.Name == networkName { + networkID = n.ID + return false, nil + } + } + + return true, nil + }) + + return networkID, err +} + +func getNetworkName(d *schema.ResourceData, meta interface{}, networkID string) (string, error) { + config := meta.(*Config) + networkClient, err := config.networkingV2Client(d.Get("region").(string)) + if err != nil { + return "", fmt.Errorf("Error creating OpenStack network client: %s", err) + } + + opts := networks.ListOpts{ID: networkID} + pager := networks.List(networkClient, opts) + networkName := "" + + err = pager.EachPage(func(page pagination.Page) (bool, error) { + networkList, err := networks.ExtractNetworks(page) + if err != nil { + return false, err + } + + for _, n := range networkList { + if n.ID == networkID { + networkName = n.Name + return false, nil + } + } + + return true, nil + }) + + return networkName, err +} diff --git a/builtin/providers/openstack/resource_openstack_networking_floatingip_v2_test.go b/builtin/providers/openstack/resource_openstack_networking_floatingip_v2_test.go new file mode 100644 index 000000000..5c8ae38e3 --- /dev/null +++ b/builtin/providers/openstack/resource_openstack_networking_floatingip_v2_test.go @@ -0,0 +1,91 @@ +package openstack + +import ( + "fmt" + "testing" + + "github.com/hashicorp/terraform/helper/resource" + "github.com/hashicorp/terraform/terraform" + + "github.com/rackspace/gophercloud/openstack/networking/v2/extensions/layer3/floatingips" +) + +func TestAccNetworkingV2FloatingIP_basic(t *testing.T) { + var floatingIP floatingips.FloatingIP + + resource.Test(t, resource.TestCase{ + PreCheck: func() { testAccPreCheck(t) }, + Providers: testAccProviders, + CheckDestroy: testAccCheckNetworkingV2FloatingIPDestroy, + Steps: []resource.TestStep{ + resource.TestStep{ + Config: testAccNetworkingV2FloatingIP_basic, + Check: resource.ComposeTestCheckFunc( + testAccCheckNetworkingV2FloatingIPExists(t, "openstack_networking_floatingip_v2.foo", &floatingIP), + ), + }, + }, + }) +} + +func testAccCheckNetworkingV2FloatingIPDestroy(s *terraform.State) error { + config := testAccProvider.Meta().(*Config) + networkClient, err := config.networkingV2Client(OS_REGION_NAME) + if err != nil { + return fmt.Errorf("(testAccCheckNetworkingV2FloatingIPDestroy) Error creating OpenStack floating IP: %s", err) + } + + for _, rs := range s.RootModule().Resources { + if rs.Type != "openstack_networking_floatingip_v2" { + continue + } + + _, err := floatingips.Get(networkClient, rs.Primary.ID).Extract() + if err == nil { + return fmt.Errorf("FloatingIP still exists") + } + } + + return nil +} + +func testAccCheckNetworkingV2FloatingIPExists(t *testing.T, n string, kp *floatingips.FloatingIP) resource.TestCheckFunc { + return func(s *terraform.State) error { + rs, ok := s.RootModule().Resources[n] + if !ok { + return fmt.Errorf("Not found: %s", n) + } + + if rs.Primary.ID == "" { + return fmt.Errorf("No ID is set") + } + + config := testAccProvider.Meta().(*Config) + networkClient, err := config.networkingV2Client(OS_REGION_NAME) + if err != nil { + return fmt.Errorf("(testAccCheckNetworkingV2FloatingIPExists) Error creating OpenStack networking client: %s", err) + } + + found, err := floatingips.Get(networkClient, rs.Primary.ID).Extract() + if err != nil { + return err + } + + if found.ID != rs.Primary.ID { + return fmt.Errorf("FloatingIP not found") + } + + *kp = *found + + return nil + } +} + +var testAccNetworkingV2FloatingIP_basic = ` + resource "openstack_networking_floatingip_v2" "foo" { + } + + resource "openstack_compute_instance_v2" "bar" { + name = "terraform-acc-floating-ip-test" + floating_ip = "${openstack_networking_floatingip_v2.foo.address}" + }` diff --git a/builtin/providers/openstack/resource_openstack_networking_network_v2.go b/builtin/providers/openstack/resource_openstack_networking_network_v2.go new file mode 100644 index 000000000..2ac4ab94e --- /dev/null +++ b/builtin/providers/openstack/resource_openstack_networking_network_v2.go @@ -0,0 +1,170 @@ +package openstack + +import ( + "fmt" + "log" + "strconv" + + "github.com/hashicorp/terraform/helper/schema" + "github.com/rackspace/gophercloud/openstack/networking/v2/networks" +) + +func resourceNetworkingNetworkV2() *schema.Resource { + return &schema.Resource{ + Create: resourceNetworkingNetworkV2Create, + Read: resourceNetworkingNetworkV2Read, + Update: resourceNetworkingNetworkV2Update, + Delete: resourceNetworkingNetworkV2Delete, + + Schema: map[string]*schema.Schema{ + "region": &schema.Schema{ + Type: schema.TypeString, + Required: true, + ForceNew: true, + DefaultFunc: envDefaultFunc("OS_REGION_NAME"), + }, + "name": &schema.Schema{ + Type: schema.TypeString, + Optional: true, + ForceNew: false, + }, + "admin_state_up": &schema.Schema{ + Type: schema.TypeString, + Optional: true, + ForceNew: false, + }, + "shared": &schema.Schema{ + Type: schema.TypeString, + Optional: true, + ForceNew: false, + }, + "tenant_id": &schema.Schema{ + Type: schema.TypeString, + Optional: true, + ForceNew: true, + }, + }, + } +} + +func resourceNetworkingNetworkV2Create(d *schema.ResourceData, meta interface{}) error { + config := meta.(*Config) + networkingClient, err := config.networkingV2Client(d.Get("region").(string)) + if err != nil { + return fmt.Errorf("Error creating OpenStack networking client: %s", err) + } + + createOpts := networks.CreateOpts{ + Name: d.Get("name").(string), + TenantID: d.Get("tenant_id").(string), + } + + asuRaw := d.Get("admin_state_up").(string) + if asuRaw != "" { + asu, err := strconv.ParseBool(asuRaw) + if err != nil { + return fmt.Errorf("admin_state_up, if provided, must be either 'true' or 'false'") + } + createOpts.AdminStateUp = &asu + } + + sharedRaw := d.Get("shared").(string) + if sharedRaw != "" { + shared, err := strconv.ParseBool(sharedRaw) + if err != nil { + return fmt.Errorf("shared, if provided, must be either 'true' or 'false': %v", err) + } + createOpts.Shared = &shared + } + + log.Printf("[DEBUG] Create Options: %#v", createOpts) + n, err := networks.Create(networkingClient, createOpts).Extract() + if err != nil { + return fmt.Errorf("Error creating OpenStack Neutron network: %s", err) + } + log.Printf("[INFO] Network ID: %s", n.ID) + + d.SetId(n.ID) + + return resourceNetworkingNetworkV2Read(d, meta) +} + +func resourceNetworkingNetworkV2Read(d *schema.ResourceData, meta interface{}) error { + config := meta.(*Config) + networkingClient, err := config.networkingV2Client(d.Get("region").(string)) + if err != nil { + return fmt.Errorf("Error creating OpenStack networking client: %s", err) + } + + n, err := networks.Get(networkingClient, d.Id()).Extract() + if err != nil { + return CheckDeleted(d, err, "network") + } + + log.Printf("[DEBUG] Retreived Network %s: %+v", d.Id(), n) + + d.Set("name", n.Name) + d.Set("admin_state_up", strconv.FormatBool(n.AdminStateUp)) + d.Set("shared", strconv.FormatBool(n.Shared)) + d.Set("tenant_id", n.TenantID) + + return nil +} + +func resourceNetworkingNetworkV2Update(d *schema.ResourceData, meta interface{}) error { + config := meta.(*Config) + networkingClient, err := config.networkingV2Client(d.Get("region").(string)) + if err != nil { + return fmt.Errorf("Error creating OpenStack networking client: %s", err) + } + + var updateOpts networks.UpdateOpts + if d.HasChange("name") { + updateOpts.Name = d.Get("name").(string) + } + if d.HasChange("admin_state_up") { + asuRaw := d.Get("admin_state_up").(string) + if asuRaw != "" { + asu, err := strconv.ParseBool(asuRaw) + if err != nil { + return fmt.Errorf("admin_state_up, if provided, must be either 'true' or 'false'") + } + updateOpts.AdminStateUp = &asu + } + } + if d.HasChange("shared") { + sharedRaw := d.Get("shared").(string) + if sharedRaw != "" { + shared, err := strconv.ParseBool(sharedRaw) + if err != nil { + return fmt.Errorf("shared, if provided, must be either 'true' or 'false': %v", err) + } + updateOpts.Shared = &shared + } + } + + log.Printf("[DEBUG] Updating Network %s with options: %+v", d.Id(), updateOpts) + + _, err = networks.Update(networkingClient, d.Id(), updateOpts).Extract() + if err != nil { + return fmt.Errorf("Error updating OpenStack Neutron Network: %s", err) + } + + return resourceNetworkingNetworkV2Read(d, meta) +} + +func resourceNetworkingNetworkV2Delete(d *schema.ResourceData, meta interface{}) error { + config := meta.(*Config) + networkingClient, err := config.networkingV2Client(d.Get("region").(string)) + if err != nil { + return fmt.Errorf("Error creating OpenStack networking client: %s", err) + } + + err = networks.Delete(networkingClient, d.Id()).ExtractErr() + if err != nil { + return fmt.Errorf("Error deleting OpenStack Neutron Network: %s", err) + } + + d.SetId("") + return nil +} diff --git a/builtin/providers/openstack/resource_openstack_networking_network_v2_test.go b/builtin/providers/openstack/resource_openstack_networking_network_v2_test.go new file mode 100644 index 000000000..5bff60532 --- /dev/null +++ b/builtin/providers/openstack/resource_openstack_networking_network_v2_test.go @@ -0,0 +1,104 @@ +package openstack + +import ( + "fmt" + "testing" + + "github.com/hashicorp/terraform/helper/resource" + "github.com/hashicorp/terraform/terraform" + + "github.com/rackspace/gophercloud/openstack/networking/v2/networks" +) + +func TestAccNetworkingV2Network_basic(t *testing.T) { + var network networks.Network + + resource.Test(t, resource.TestCase{ + PreCheck: func() { testAccPreCheck(t) }, + Providers: testAccProviders, + CheckDestroy: testAccCheckNetworkingV2NetworkDestroy, + Steps: []resource.TestStep{ + resource.TestStep{ + Config: testAccNetworkingV2Network_basic, + Check: resource.ComposeTestCheckFunc( + testAccCheckNetworkingV2NetworkExists(t, "openstack_networking_network_v2.foo", &network), + ), + }, + resource.TestStep{ + Config: testAccNetworkingV2Network_update, + Check: resource.ComposeTestCheckFunc( + resource.TestCheckResourceAttr("openstack_networking_network_v2.foo", "name", "network_2"), + ), + }, + }, + }) +} + +func testAccCheckNetworkingV2NetworkDestroy(s *terraform.State) error { + config := testAccProvider.Meta().(*Config) + networkingClient, err := config.networkingV2Client(OS_REGION_NAME) + if err != nil { + return fmt.Errorf("(testAccCheckNetworkingV2NetworkDestroy) Error creating OpenStack networking client: %s", err) + } + + for _, rs := range s.RootModule().Resources { + if rs.Type != "openstack_networking_network_v2" { + continue + } + + _, err := networks.Get(networkingClient, rs.Primary.ID).Extract() + if err == nil { + return fmt.Errorf("Network still exists") + } + } + + return nil +} + +func testAccCheckNetworkingV2NetworkExists(t *testing.T, n string, network *networks.Network) resource.TestCheckFunc { + return func(s *terraform.State) error { + rs, ok := s.RootModule().Resources[n] + if !ok { + return fmt.Errorf("Not found: %s", n) + } + + if rs.Primary.ID == "" { + return fmt.Errorf("No ID is set") + } + + config := testAccProvider.Meta().(*Config) + networkingClient, err := config.networkingV2Client(OS_REGION_NAME) + if err != nil { + return fmt.Errorf("(testAccCheckNetworkingV2NetworkExists) Error creating OpenStack networking client: %s", err) + } + + found, err := networks.Get(networkingClient, rs.Primary.ID).Extract() + if err != nil { + return err + } + + if found.ID != rs.Primary.ID { + return fmt.Errorf("Network not found") + } + + *network = *found + + return nil + } +} + +var testAccNetworkingV2Network_basic = fmt.Sprintf(` + resource "openstack_networking_network_v2" "foo" { + region = "%s" + name = "network_1" + admin_state_up = "true" + }`, + OS_REGION_NAME) + +var testAccNetworkingV2Network_update = fmt.Sprintf(` + resource "openstack_networking_network_v2" "foo" { + region = "%s" + name = "network_2" + admin_state_up = "true" + }`, + OS_REGION_NAME) diff --git a/builtin/providers/openstack/resource_openstack_networking_router_interface_v2.go b/builtin/providers/openstack/resource_openstack_networking_router_interface_v2.go new file mode 100644 index 000000000..e67ff6f58 --- /dev/null +++ b/builtin/providers/openstack/resource_openstack_networking_router_interface_v2.go @@ -0,0 +1,107 @@ +package openstack + +import ( + "fmt" + "log" + + "github.com/hashicorp/terraform/helper/schema" + "github.com/rackspace/gophercloud" + "github.com/rackspace/gophercloud/openstack/networking/v2/extensions/layer3/routers" + "github.com/rackspace/gophercloud/openstack/networking/v2/ports" +) + +func resourceNetworkingRouterInterfaceV2() *schema.Resource { + return &schema.Resource{ + Create: resourceNetworkingRouterInterfaceV2Create, + Read: resourceNetworkingRouterInterfaceV2Read, + Delete: resourceNetworkingRouterInterfaceV2Delete, + + Schema: map[string]*schema.Schema{ + "region": &schema.Schema{ + Type: schema.TypeString, + Required: true, + ForceNew: true, + DefaultFunc: envDefaultFunc("OS_REGION_NAME"), + }, + "router_id": &schema.Schema{ + Type: schema.TypeString, + Required: true, + ForceNew: true, + }, + "subnet_id": &schema.Schema{ + Type: schema.TypeString, + Required: true, + ForceNew: true, + }, + }, + } +} + +func resourceNetworkingRouterInterfaceV2Create(d *schema.ResourceData, meta interface{}) error { + config := meta.(*Config) + networkingClient, err := config.networkingV2Client(d.Get("region").(string)) + if err != nil { + return fmt.Errorf("Error creating OpenStack networking client: %s", err) + } + + createOpts := routers.InterfaceOpts{ + SubnetID: d.Get("subnet_id").(string), + } + + log.Printf("[DEBUG] Create Options: %#v", createOpts) + n, err := routers.AddInterface(networkingClient, d.Get("router_id").(string), createOpts).Extract() + if err != nil { + return fmt.Errorf("Error creating OpenStack Neutron router interface: %s", err) + } + log.Printf("[INFO] Router interface Port ID: %s", n.PortID) + + d.SetId(n.PortID) + + return resourceNetworkingRouterInterfaceV2Read(d, meta) +} + +func resourceNetworkingRouterInterfaceV2Read(d *schema.ResourceData, meta interface{}) error { + config := meta.(*Config) + networkingClient, err := config.networkingV2Client(d.Get("region").(string)) + if err != nil { + return fmt.Errorf("Error creating OpenStack networking client: %s", err) + } + + n, err := ports.Get(networkingClient, d.Id()).Extract() + if err != nil { + httpError, ok := err.(*gophercloud.UnexpectedResponseCodeError) + if !ok { + return fmt.Errorf("Error retrieving OpenStack Neutron Router Interface: %s", err) + } + + if httpError.Actual == 404 { + d.SetId("") + return nil + } + return fmt.Errorf("Error retrieving OpenStack Neutron Router Interface: %s", err) + } + + log.Printf("[DEBUG] Retreived Router Interface %s: %+v", d.Id(), n) + + return nil +} + +func resourceNetworkingRouterInterfaceV2Delete(d *schema.ResourceData, meta interface{}) error { + config := meta.(*Config) + networkingClient, err := config.networkingV2Client(d.Get("region").(string)) + if err != nil { + return fmt.Errorf("Error creating OpenStack networking client: %s", err) + } + + removeOpts := routers.InterfaceOpts{ + SubnetID: d.Get("subnet_id").(string), + } + + _, err = routers.RemoveInterface(networkingClient, d.Get("router_id").(string), removeOpts).Extract() + if err != nil { + return fmt.Errorf("Error deleting OpenStack Neutron Router Interface: %s", err) + } + + d.SetId("") + return nil +} diff --git a/builtin/providers/openstack/resource_openstack_networking_router_interface_v2_test.go b/builtin/providers/openstack/resource_openstack_networking_router_interface_v2_test.go new file mode 100644 index 000000000..be3b12c0b --- /dev/null +++ b/builtin/providers/openstack/resource_openstack_networking_router_interface_v2_test.go @@ -0,0 +1,100 @@ +package openstack + +import ( + "fmt" + "testing" + + "github.com/hashicorp/terraform/helper/resource" + "github.com/hashicorp/terraform/terraform" + + "github.com/rackspace/gophercloud/openstack/networking/v2/ports" +) + +func TestAccNetworkingV2RouterInterface_basic(t *testing.T) { + resource.Test(t, resource.TestCase{ + PreCheck: func() { testAccPreCheck(t) }, + Providers: testAccProviders, + CheckDestroy: testAccCheckNetworkingV2RouterInterfaceDestroy, + Steps: []resource.TestStep{ + resource.TestStep{ + Config: testAccNetworkingV2RouterInterface_basic, + Check: resource.ComposeTestCheckFunc( + testAccCheckNetworkingV2RouterInterfaceExists(t, "openstack_networking_router_interface_v2.int_1"), + ), + }, + }, + }) +} + +func testAccCheckNetworkingV2RouterInterfaceDestroy(s *terraform.State) error { + config := testAccProvider.Meta().(*Config) + networkingClient, err := config.networkingV2Client(OS_REGION_NAME) + if err != nil { + return fmt.Errorf("(testAccCheckNetworkingV2RouterInterfaceDestroy) Error creating OpenStack networking client: %s", err) + } + + for _, rs := range s.RootModule().Resources { + if rs.Type != "openstack_networking_router_interface_v2" { + continue + } + + _, err := ports.Get(networkingClient, rs.Primary.ID).Extract() + if err == nil { + return fmt.Errorf("Router interface still exists") + } + } + + return nil +} + +func testAccCheckNetworkingV2RouterInterfaceExists(t *testing.T, n string) resource.TestCheckFunc { + return func(s *terraform.State) error { + rs, ok := s.RootModule().Resources[n] + if !ok { + return fmt.Errorf("Not found: %s", n) + } + + if rs.Primary.ID == "" { + return fmt.Errorf("No ID is set") + } + + config := testAccProvider.Meta().(*Config) + networkingClient, err := config.networkingV2Client(OS_REGION_NAME) + if err != nil { + return fmt.Errorf("(testAccCheckNetworkingV2RouterInterfaceExists) Error creating OpenStack networking client: %s", err) + } + + found, err := ports.Get(networkingClient, rs.Primary.ID).Extract() + if err != nil { + return err + } + + if found.ID != rs.Primary.ID { + return fmt.Errorf("Router interface not found") + } + + return nil + } +} + +var testAccNetworkingV2RouterInterface_basic = fmt.Sprintf(` +resource "openstack_networking_router_v2" "router_1" { + name = "router_1" + admin_state_up = "true" +} + +resource "openstack_networking_router_interface_v2" "int_1" { + subnet_id = "${openstack_networking_subnet_v2.subnet_1.id}" + router_id = "${openstack_networking_router_v2.router_1.id}" +} + +resource "openstack_networking_network_v2" "network_1" { + name = "network_1" + admin_state_up = "true" +} + +resource "openstack_networking_subnet_v2" "subnet_1" { + network_id = "${openstack_networking_network_v2.network_1.id}" + cidr = "192.168.199.0/24" + ip_version = 4 +}`) diff --git a/builtin/providers/openstack/resource_openstack_networking_router_v2.go b/builtin/providers/openstack/resource_openstack_networking_router_v2.go new file mode 100644 index 000000000..3b6df4816 --- /dev/null +++ b/builtin/providers/openstack/resource_openstack_networking_router_v2.go @@ -0,0 +1,169 @@ +package openstack + +import ( + "fmt" + "log" + "strconv" + + "github.com/hashicorp/terraform/helper/schema" + "github.com/rackspace/gophercloud" + "github.com/rackspace/gophercloud/openstack/networking/v2/extensions/layer3/routers" +) + +func resourceNetworkingRouterV2() *schema.Resource { + return &schema.Resource{ + Create: resourceNetworkingRouterV2Create, + Read: resourceNetworkingRouterV2Read, + Update: resourceNetworkingRouterV2Update, + Delete: resourceNetworkingRouterV2Delete, + + Schema: map[string]*schema.Schema{ + "region": &schema.Schema{ + Type: schema.TypeString, + Required: true, + ForceNew: true, + DefaultFunc: envDefaultFunc("OS_REGION_NAME"), + }, + "name": &schema.Schema{ + Type: schema.TypeString, + Optional: true, + ForceNew: false, + }, + "admin_state_up": &schema.Schema{ + Type: schema.TypeString, + Optional: true, + ForceNew: false, + }, + "external_gateway": &schema.Schema{ + Type: schema.TypeString, + Optional: true, + ForceNew: false, + }, + "tenant_id": &schema.Schema{ + Type: schema.TypeString, + Optional: true, + ForceNew: true, + }, + }, + } +} + +func resourceNetworkingRouterV2Create(d *schema.ResourceData, meta interface{}) error { + config := meta.(*Config) + networkingClient, err := config.networkingV2Client(d.Get("region").(string)) + if err != nil { + return fmt.Errorf("Error creating OpenStack networking client: %s", err) + } + + createOpts := routers.CreateOpts{ + Name: d.Get("name").(string), + TenantID: d.Get("tenant_id").(string), + } + + asuRaw := d.Get("admin_state_up").(string) + if asuRaw != "" { + asu, err := strconv.ParseBool(asuRaw) + if err != nil { + return fmt.Errorf("admin_state_up, if provided, must be either 'true' or 'false'") + } + createOpts.AdminStateUp = &asu + } + + externalGateway := d.Get("external_gateway").(string) + if externalGateway != "" { + gatewayInfo := routers.GatewayInfo{ + NetworkID: externalGateway, + } + createOpts.GatewayInfo = &gatewayInfo + } + + log.Printf("[DEBUG] Create Options: %#v", createOpts) + n, err := routers.Create(networkingClient, createOpts).Extract() + if err != nil { + return fmt.Errorf("Error creating OpenStack Neutron router: %s", err) + } + log.Printf("[INFO] Router ID: %s", n.ID) + + d.SetId(n.ID) + + return resourceNetworkingRouterV2Read(d, meta) +} + +func resourceNetworkingRouterV2Read(d *schema.ResourceData, meta interface{}) error { + config := meta.(*Config) + networkingClient, err := config.networkingV2Client(d.Get("region").(string)) + if err != nil { + return fmt.Errorf("Error creating OpenStack networking client: %s", err) + } + + n, err := routers.Get(networkingClient, d.Id()).Extract() + if err != nil { + httpError, ok := err.(*gophercloud.UnexpectedResponseCodeError) + if !ok { + return fmt.Errorf("Error retrieving OpenStack Neutron Router: %s", err) + } + + if httpError.Actual == 404 { + d.SetId("") + return nil + } + return fmt.Errorf("Error retrieving OpenStack Neutron Router: %s", err) + } + + log.Printf("[DEBUG] Retreived Router %s: %+v", d.Id(), n) + + d.Set("name", n.Name) + d.Set("admin_state_up", strconv.FormatBool(n.AdminStateUp)) + d.Set("tenant_id", n.TenantID) + d.Set("external_gateway", n.GatewayInfo.NetworkID) + + return nil +} + +func resourceNetworkingRouterV2Update(d *schema.ResourceData, meta interface{}) error { + config := meta.(*Config) + networkingClient, err := config.networkingV2Client(d.Get("region").(string)) + if err != nil { + return fmt.Errorf("Error creating OpenStack networking client: %s", err) + } + + var updateOpts routers.UpdateOpts + if d.HasChange("name") { + updateOpts.Name = d.Get("name").(string) + } + if d.HasChange("admin_state_up") { + asuRaw := d.Get("admin_state_up").(string) + if asuRaw != "" { + asu, err := strconv.ParseBool(asuRaw) + if err != nil { + return fmt.Errorf("admin_state_up, if provided, must be either 'true' or 'false'") + } + updateOpts.AdminStateUp = &asu + } + } + + log.Printf("[DEBUG] Updating Router %s with options: %+v", d.Id(), updateOpts) + + _, err = routers.Update(networkingClient, d.Id(), updateOpts).Extract() + if err != nil { + return fmt.Errorf("Error updating OpenStack Neutron Router: %s", err) + } + + return resourceNetworkingRouterV2Read(d, meta) +} + +func resourceNetworkingRouterV2Delete(d *schema.ResourceData, meta interface{}) error { + config := meta.(*Config) + networkingClient, err := config.networkingV2Client(d.Get("region").(string)) + if err != nil { + return fmt.Errorf("Error creating OpenStack networking client: %s", err) + } + + err = routers.Delete(networkingClient, d.Id()).ExtractErr() + if err != nil { + return fmt.Errorf("Error deleting OpenStack Neutron Router: %s", err) + } + + d.SetId("") + return nil +} diff --git a/builtin/providers/openstack/resource_openstack_networking_router_v2_test.go b/builtin/providers/openstack/resource_openstack_networking_router_v2_test.go new file mode 100644 index 000000000..248f4e721 --- /dev/null +++ b/builtin/providers/openstack/resource_openstack_networking_router_v2_test.go @@ -0,0 +1,100 @@ +package openstack + +import ( + "fmt" + "testing" + + "github.com/hashicorp/terraform/helper/resource" + "github.com/hashicorp/terraform/terraform" + + "github.com/rackspace/gophercloud/openstack/networking/v2/extensions/layer3/routers" +) + +func TestAccNetworkingV2Router_basic(t *testing.T) { + var router routers.Router + + resource.Test(t, resource.TestCase{ + PreCheck: func() { testAccPreCheck(t) }, + Providers: testAccProviders, + CheckDestroy: testAccCheckNetworkingV2RouterDestroy, + Steps: []resource.TestStep{ + resource.TestStep{ + Config: testAccNetworkingV2Router_basic, + Check: resource.ComposeTestCheckFunc( + testAccCheckNetworkingV2RouterExists(t, "openstack_networking_router_v2.foo", &router), + ), + }, + resource.TestStep{ + Config: testAccNetworkingV2Router_update, + Check: resource.ComposeTestCheckFunc( + resource.TestCheckResourceAttr("openstack_networking_router_v2.foo", "name", "router_2"), + ), + }, + }, + }) +} + +func testAccCheckNetworkingV2RouterDestroy(s *terraform.State) error { + config := testAccProvider.Meta().(*Config) + networkingClient, err := config.networkingV2Client(OS_REGION_NAME) + if err != nil { + return fmt.Errorf("(testAccCheckNetworkingV2RouterDestroy) Error creating OpenStack networking client: %s", err) + } + + for _, rs := range s.RootModule().Resources { + if rs.Type != "openstack_networking_router_v2" { + continue + } + + _, err := routers.Get(networkingClient, rs.Primary.ID).Extract() + if err == nil { + return fmt.Errorf("Router still exists") + } + } + + return nil +} + +func testAccCheckNetworkingV2RouterExists(t *testing.T, n string, router *routers.Router) resource.TestCheckFunc { + return func(s *terraform.State) error { + rs, ok := s.RootModule().Resources[n] + if !ok { + return fmt.Errorf("Not found: %s", n) + } + + if rs.Primary.ID == "" { + return fmt.Errorf("No ID is set") + } + + config := testAccProvider.Meta().(*Config) + networkingClient, err := config.networkingV2Client(OS_REGION_NAME) + if err != nil { + return fmt.Errorf("(testAccCheckNetworkingV2RouterExists) Error creating OpenStack networking client: %s", err) + } + + found, err := routers.Get(networkingClient, rs.Primary.ID).Extract() + if err != nil { + return err + } + + if found.ID != rs.Primary.ID { + return fmt.Errorf("Router not found") + } + + *router = *found + + return nil + } +} + +var testAccNetworkingV2Router_basic = fmt.Sprintf(` + resource "openstack_networking_router_v2" "foo" { + name = "router" + admin_state_up = "true" + }`) + +var testAccNetworkingV2Router_update = fmt.Sprintf(` + resource "openstack_networking_router_v2" "foo" { + name = "router_2" + admin_state_up = "true" + }`) diff --git a/builtin/providers/openstack/resource_openstack_networking_subnet_v2.go b/builtin/providers/openstack/resource_openstack_networking_subnet_v2.go new file mode 100644 index 000000000..2f898bb4c --- /dev/null +++ b/builtin/providers/openstack/resource_openstack_networking_subnet_v2.go @@ -0,0 +1,272 @@ +package openstack + +import ( + "fmt" + "log" + "strconv" + + "github.com/hashicorp/terraform/helper/hashcode" + "github.com/hashicorp/terraform/helper/schema" + "github.com/rackspace/gophercloud/openstack/networking/v2/subnets" +) + +func resourceNetworkingSubnetV2() *schema.Resource { + return &schema.Resource{ + Create: resourceNetworkingSubnetV2Create, + Read: resourceNetworkingSubnetV2Read, + Update: resourceNetworkingSubnetV2Update, + Delete: resourceNetworkingSubnetV2Delete, + + Schema: map[string]*schema.Schema{ + "region": &schema.Schema{ + Type: schema.TypeString, + Required: true, + ForceNew: true, + DefaultFunc: envDefaultFunc("OS_REGION_NAME"), + }, + "network_id": &schema.Schema{ + Type: schema.TypeString, + Required: true, + ForceNew: true, + }, + "cidr": &schema.Schema{ + Type: schema.TypeString, + Required: true, + ForceNew: true, + }, + "name": &schema.Schema{ + Type: schema.TypeString, + Optional: true, + ForceNew: false, + }, + "tenant_id": &schema.Schema{ + Type: schema.TypeString, + Optional: true, + ForceNew: true, + }, + "allocation_pools": &schema.Schema{ + Type: schema.TypeList, + Optional: true, + ForceNew: true, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "start": &schema.Schema{ + Type: schema.TypeString, + Required: true, + }, + "end": &schema.Schema{ + Type: schema.TypeString, + Required: true, + }, + }, + }, + }, + "gateway_ip": &schema.Schema{ + Type: schema.TypeString, + Optional: true, + ForceNew: false, + }, + "ip_version": &schema.Schema{ + Type: schema.TypeInt, + Required: true, + ForceNew: true, + }, + "enable_dhcp": &schema.Schema{ + Type: schema.TypeString, + Optional: true, + ForceNew: false, + }, + "dns_nameservers": &schema.Schema{ + Type: schema.TypeSet, + Optional: true, + ForceNew: false, + Elem: &schema.Schema{Type: schema.TypeString}, + Set: func(v interface{}) int { + return hashcode.String(v.(string)) + }, + }, + "host_routes": &schema.Schema{ + Type: schema.TypeList, + Optional: true, + ForceNew: false, + Elem: &schema.Resource{ + Schema: map[string]*schema.Schema{ + "destination_cidr": &schema.Schema{ + Type: schema.TypeString, + Required: true, + }, + "next_hop": &schema.Schema{ + Type: schema.TypeString, + Required: true, + }, + }, + }, + }, + }, + } +} + +func resourceNetworkingSubnetV2Create(d *schema.ResourceData, meta interface{}) error { + config := meta.(*Config) + networkingClient, err := config.networkingV2Client(d.Get("region").(string)) + if err != nil { + return fmt.Errorf("Error creating OpenStack networking client: %s", err) + } + + createOpts := subnets.CreateOpts{ + NetworkID: d.Get("network_id").(string), + CIDR: d.Get("cidr").(string), + Name: d.Get("name").(string), + TenantID: d.Get("tenant_id").(string), + AllocationPools: resourceSubnetAllocationPoolsV2(d), + GatewayIP: d.Get("gateway_ip").(string), + IPVersion: d.Get("ip_version").(int), + DNSNameservers: resourceSubnetDNSNameserversV2(d), + HostRoutes: resourceSubnetHostRoutesV2(d), + } + + edRaw := d.Get("enable_dhcp").(string) + if edRaw != "" { + ed, err := strconv.ParseBool(edRaw) + if err != nil { + return fmt.Errorf("enable_dhcp, if provided, must be either 'true' or 'false'") + } + createOpts.EnableDHCP = &ed + } + + log.Printf("[DEBUG] Create Options: %#v", createOpts) + s, err := subnets.Create(networkingClient, createOpts).Extract() + if err != nil { + return fmt.Errorf("Error creating OpenStack Neutron subnet: %s", err) + } + log.Printf("[INFO] Subnet ID: %s", s.ID) + + d.SetId(s.ID) + + return resourceNetworkingSubnetV2Read(d, meta) +} + +func resourceNetworkingSubnetV2Read(d *schema.ResourceData, meta interface{}) error { + config := meta.(*Config) + networkingClient, err := config.networkingV2Client(d.Get("region").(string)) + if err != nil { + return fmt.Errorf("Error creating OpenStack networking client: %s", err) + } + + s, err := subnets.Get(networkingClient, d.Id()).Extract() + if err != nil { + return CheckDeleted(d, err, "subnet") + } + + log.Printf("[DEBUG] Retreived Subnet %s: %+v", d.Id(), s) + + d.Set("newtork_id", s.NetworkID) + d.Set("cidr", s.CIDR) + d.Set("ip_version", s.IPVersion) + d.Set("name", s.Name) + d.Set("tenant_id", s.TenantID) + d.Set("allocation_pools", s.AllocationPools) + d.Set("gateway_ip", s.GatewayIP) + d.Set("enable_dhcp", strconv.FormatBool(s.EnableDHCP)) + d.Set("dns_nameservers", s.DNSNameservers) + d.Set("host_routes", s.HostRoutes) + + return nil +} + +func resourceNetworkingSubnetV2Update(d *schema.ResourceData, meta interface{}) error { + config := meta.(*Config) + networkingClient, err := config.networkingV2Client(d.Get("region").(string)) + if err != nil { + return fmt.Errorf("Error creating OpenStack networking client: %s", err) + } + + var updateOpts subnets.UpdateOpts + + if d.HasChange("name") { + updateOpts.Name = d.Get("name").(string) + } + + if d.HasChange("gateway_ip") { + updateOpts.GatewayIP = d.Get("gateway_ip").(string) + } + + if d.HasChange("dns_nameservers") { + updateOpts.DNSNameservers = resourceSubnetDNSNameserversV2(d) + } + + if d.HasChange("host_routes") { + updateOpts.HostRoutes = resourceSubnetHostRoutesV2(d) + } + + if d.HasChange("enable_dhcp") { + edRaw := d.Get("enable_dhcp").(string) + if edRaw != "" { + ed, err := strconv.ParseBool(edRaw) + if err != nil { + return fmt.Errorf("enable_dhcp, if provided, must be either 'true' or 'false'") + } + updateOpts.EnableDHCP = &ed + } + } + + log.Printf("[DEBUG] Updating Subnet %s with options: %+v", d.Id(), updateOpts) + + _, err = subnets.Update(networkingClient, d.Id(), updateOpts).Extract() + if err != nil { + return fmt.Errorf("Error updating OpenStack Neutron Subnet: %s", err) + } + + return resourceNetworkingSubnetV2Read(d, meta) +} + +func resourceNetworkingSubnetV2Delete(d *schema.ResourceData, meta interface{}) error { + config := meta.(*Config) + networkingClient, err := config.networkingV2Client(d.Get("region").(string)) + if err != nil { + return fmt.Errorf("Error creating OpenStack networking client: %s", err) + } + + err = subnets.Delete(networkingClient, d.Id()).ExtractErr() + if err != nil { + return fmt.Errorf("Error deleting OpenStack Neutron Subnet: %s", err) + } + + d.SetId("") + return nil +} + +func resourceSubnetAllocationPoolsV2(d *schema.ResourceData) []subnets.AllocationPool { + rawAPs := d.Get("allocation_pools").([]interface{}) + aps := make([]subnets.AllocationPool, len(rawAPs)) + for i, raw := range rawAPs { + rawMap := raw.(map[string]interface{}) + aps[i] = subnets.AllocationPool{ + Start: rawMap["start"].(string), + End: rawMap["end"].(string), + } + } + return aps +} + +func resourceSubnetDNSNameserversV2(d *schema.ResourceData) []string { + rawDNSN := d.Get("dns_nameservers").(*schema.Set) + dnsn := make([]string, rawDNSN.Len()) + for i, raw := range rawDNSN.List() { + dnsn[i] = raw.(string) + } + return dnsn +} + +func resourceSubnetHostRoutesV2(d *schema.ResourceData) []subnets.HostRoute { + rawHR := d.Get("host_routes").([]interface{}) + hr := make([]subnets.HostRoute, len(rawHR)) + for i, raw := range rawHR { + rawMap := raw.(map[string]interface{}) + hr[i] = subnets.HostRoute{ + DestinationCIDR: rawMap["destination_cidr"].(string), + NextHop: rawMap["next_hop"].(string), + } + } + return hr +} diff --git a/builtin/providers/openstack/resource_openstack_networking_subnet_v2_test.go b/builtin/providers/openstack/resource_openstack_networking_subnet_v2_test.go new file mode 100644 index 000000000..d7f6116e9 --- /dev/null +++ b/builtin/providers/openstack/resource_openstack_networking_subnet_v2_test.go @@ -0,0 +1,119 @@ +package openstack + +import ( + "fmt" + "testing" + + "github.com/hashicorp/terraform/helper/resource" + "github.com/hashicorp/terraform/terraform" + + "github.com/rackspace/gophercloud/openstack/networking/v2/subnets" +) + +func TestAccNetworkingV2Subnet_basic(t *testing.T) { + var subnet subnets.Subnet + + resource.Test(t, resource.TestCase{ + PreCheck: func() { testAccPreCheck(t) }, + Providers: testAccProviders, + CheckDestroy: testAccCheckNetworkingV2SubnetDestroy, + Steps: []resource.TestStep{ + resource.TestStep{ + Config: testAccNetworkingV2Subnet_basic, + Check: resource.ComposeTestCheckFunc( + testAccCheckNetworkingV2SubnetExists(t, "openstack_networking_subnet_v2.subnet_1", &subnet), + ), + }, + resource.TestStep{ + Config: testAccNetworkingV2Subnet_update, + Check: resource.ComposeTestCheckFunc( + resource.TestCheckResourceAttr("openstack_networking_subnet_v2.subnet_1", "name", "tf-test-subnet"), + resource.TestCheckResourceAttr("openstack_networking_subnet_v2.subnet_1", "gateway_ip", "192.68.0.1"), + ), + }, + }, + }) +} + +func testAccCheckNetworkingV2SubnetDestroy(s *terraform.State) error { + config := testAccProvider.Meta().(*Config) + networkingClient, err := config.networkingV2Client(OS_REGION_NAME) + if err != nil { + return fmt.Errorf("(testAccCheckNetworkingV2SubnetDestroy) Error creating OpenStack networking client: %s", err) + } + + for _, rs := range s.RootModule().Resources { + if rs.Type != "openstack_networking_subnet_v2" { + continue + } + + _, err := subnets.Get(networkingClient, rs.Primary.ID).Extract() + if err == nil { + return fmt.Errorf("Subnet still exists") + } + } + + return nil +} + +func testAccCheckNetworkingV2SubnetExists(t *testing.T, n string, subnet *subnets.Subnet) resource.TestCheckFunc { + return func(s *terraform.State) error { + rs, ok := s.RootModule().Resources[n] + if !ok { + return fmt.Errorf("Not found: %s", n) + } + + if rs.Primary.ID == "" { + return fmt.Errorf("No ID is set") + } + + config := testAccProvider.Meta().(*Config) + networkingClient, err := config.networkingV2Client(OS_REGION_NAME) + if err != nil { + return fmt.Errorf("(testAccCheckNetworkingV2SubnetExists) Error creating OpenStack networking client: %s", err) + } + + found, err := subnets.Get(networkingClient, rs.Primary.ID).Extract() + if err != nil { + return err + } + + if found.ID != rs.Primary.ID { + return fmt.Errorf("Subnet not found") + } + + *subnet = *found + + return nil + } +} + +var testAccNetworkingV2Subnet_basic = fmt.Sprintf(` + resource "openstack_networking_network_v2" "network_1" { + region = "%s" + name = "network_1" + admin_state_up = "true" + } + + resource "openstack_networking_subnet_v2" "subnet_1" { + region = "%s" + network_id = "${openstack_networking_network_v2.network_1.id}" + cidr = "192.168.199.0/24" + ip_version = 4 + }`, OS_REGION_NAME, OS_REGION_NAME) + +var testAccNetworkingV2Subnet_update = fmt.Sprintf(` + resource "openstack_networking_network_v2" "network_1" { + region = "%s" + name = "network_1" + admin_state_up = "true" + } + + resource "openstack_networking_subnet_v2" "subnet_1" { + region = "%s" + name = "tf-test-subnet" + network_id = "${openstack_networking_network_v2.network_1.id}" + cidr = "192.168.199.0/24" + ip_version = 4 + gateway_ip = "192.68.0.1" + }`, OS_REGION_NAME, OS_REGION_NAME) diff --git a/builtin/providers/openstack/resource_openstack_objectstorage_container_v1.go b/builtin/providers/openstack/resource_openstack_objectstorage_container_v1.go new file mode 100644 index 000000000..31666a356 --- /dev/null +++ b/builtin/providers/openstack/resource_openstack_objectstorage_container_v1.go @@ -0,0 +1,148 @@ +package openstack + +import ( + "fmt" + "log" + + "github.com/hashicorp/terraform/helper/schema" + "github.com/rackspace/gophercloud/openstack/objectstorage/v1/containers" +) + +func resourceObjectStorageContainerV1() *schema.Resource { + return &schema.Resource{ + Create: resourceObjectStorageContainerV1Create, + Read: resourceObjectStorageContainerV1Read, + Update: resourceObjectStorageContainerV1Update, + Delete: resourceObjectStorageContainerV1Delete, + + Schema: map[string]*schema.Schema{ + "region": &schema.Schema{ + Type: schema.TypeString, + Required: true, + ForceNew: true, + DefaultFunc: envDefaultFunc("OS_REGION_NAME"), + }, + "name": &schema.Schema{ + Type: schema.TypeString, + Required: true, + ForceNew: false, + }, + "container_read": &schema.Schema{ + Type: schema.TypeString, + Optional: true, + ForceNew: false, + }, + "container_sync_to": &schema.Schema{ + Type: schema.TypeString, + Optional: true, + ForceNew: false, + }, + "container_sync_key": &schema.Schema{ + Type: schema.TypeString, + Optional: true, + ForceNew: false, + }, + "container_write": &schema.Schema{ + Type: schema.TypeString, + Optional: true, + ForceNew: false, + }, + "content_type": &schema.Schema{ + Type: schema.TypeString, + Optional: true, + ForceNew: false, + }, + "metadata": &schema.Schema{ + Type: schema.TypeMap, + Optional: true, + ForceNew: false, + }, + }, + } +} + +func resourceObjectStorageContainerV1Create(d *schema.ResourceData, meta interface{}) error { + config := meta.(*Config) + objectStorageClient, err := config.objectStorageV1Client(d.Get("region").(string)) + if err != nil { + return fmt.Errorf("Error creating OpenStack object storage client: %s", err) + } + + cn := d.Get("name").(string) + + createOpts := &containers.CreateOpts{ + ContainerRead: d.Get("container_read").(string), + ContainerSyncTo: d.Get("container_sync_to").(string), + ContainerSyncKey: d.Get("container_sync_key").(string), + ContainerWrite: d.Get("container_write").(string), + ContentType: d.Get("content_type").(string), + Metadata: resourceContainerMetadataV2(d), + } + + log.Printf("[DEBUG] Create Options: %#v", createOpts) + _, err = containers.Create(objectStorageClient, cn, createOpts).Extract() + if err != nil { + return fmt.Errorf("Error creating OpenStack container: %s", err) + } + log.Printf("[INFO] Container ID: %s", cn) + + // Store the ID now + d.SetId(cn) + + return resourceObjectStorageContainerV1Read(d, meta) +} + +func resourceObjectStorageContainerV1Read(d *schema.ResourceData, meta interface{}) error { + return nil +} + +func resourceObjectStorageContainerV1Update(d *schema.ResourceData, meta interface{}) error { + config := meta.(*Config) + objectStorageClient, err := config.objectStorageV1Client(d.Get("region").(string)) + if err != nil { + return fmt.Errorf("Error creating OpenStack object storage client: %s", err) + } + + updateOpts := containers.UpdateOpts{ + ContainerRead: d.Get("container_read").(string), + ContainerSyncTo: d.Get("container_sync_to").(string), + ContainerSyncKey: d.Get("container_sync_key").(string), + ContainerWrite: d.Get("container_write").(string), + ContentType: d.Get("content_type").(string), + } + + if d.HasChange("metadata") { + updateOpts.Metadata = resourceContainerMetadataV2(d) + } + + _, err = containers.Update(objectStorageClient, d.Id(), updateOpts).Extract() + if err != nil { + return fmt.Errorf("Error updating OpenStack container: %s", err) + } + + return resourceObjectStorageContainerV1Read(d, meta) +} + +func resourceObjectStorageContainerV1Delete(d *schema.ResourceData, meta interface{}) error { + config := meta.(*Config) + objectStorageClient, err := config.objectStorageV1Client(d.Get("region").(string)) + if err != nil { + return fmt.Errorf("Error creating OpenStack object storage client: %s", err) + } + + _, err = containers.Delete(objectStorageClient, d.Id()).Extract() + if err != nil { + return fmt.Errorf("Error deleting OpenStack container: %s", err) + } + + d.SetId("") + return nil +} + +func resourceContainerMetadataV2(d *schema.ResourceData) map[string]string { + m := make(map[string]string) + for key, val := range d.Get("metadata").(map[string]interface{}) { + m[key] = val.(string) + } + return m +} diff --git a/builtin/providers/openstack/resource_openstack_objectstorage_container_v1_test.go b/builtin/providers/openstack/resource_openstack_objectstorage_container_v1_test.go new file mode 100644 index 000000000..9377ad2fb --- /dev/null +++ b/builtin/providers/openstack/resource_openstack_objectstorage_container_v1_test.go @@ -0,0 +1,77 @@ +package openstack + +import ( + "fmt" + "testing" + + "github.com/hashicorp/terraform/helper/resource" + "github.com/hashicorp/terraform/terraform" + "github.com/rackspace/gophercloud/openstack/objectstorage/v1/containers" +) + +func TestAccObjectStorageV1Container_basic(t *testing.T) { + + resource.Test(t, resource.TestCase{ + PreCheck: func() { testAccPreCheck(t) }, + Providers: testAccProviders, + CheckDestroy: testAccCheckObjectStorageV1ContainerDestroy, + Steps: []resource.TestStep{ + resource.TestStep{ + Config: testAccObjectStorageV1Container_basic, + Check: resource.ComposeTestCheckFunc( + resource.TestCheckResourceAttr("openstack_objectstorage_container_v1.container_1", "name", "tf-test-container"), + resource.TestCheckResourceAttr("openstack_objectstorage_container_v1.container_1", "content_type", "application/json"), + ), + }, + resource.TestStep{ + Config: testAccObjectStorageV1Container_update, + Check: resource.ComposeTestCheckFunc( + resource.TestCheckResourceAttr("openstack_objectstorage_container_v1.container_1", "content_type", "text/plain"), + ), + }, + }, + }) +} + +func testAccCheckObjectStorageV1ContainerDestroy(s *terraform.State) error { + config := testAccProvider.Meta().(*Config) + objectStorageClient, err := config.objectStorageV1Client(OS_REGION_NAME) + if err != nil { + return fmt.Errorf("Error creating OpenStack object storage client: %s", err) + } + + for _, rs := range s.RootModule().Resources { + if rs.Type != "openstack_objectstorage_container_v1" { + continue + } + + _, err := containers.Get(objectStorageClient, rs.Primary.ID).Extract() + if err == nil { + return fmt.Errorf("Container still exists") + } + } + + return nil +} + +var testAccObjectStorageV1Container_basic = fmt.Sprintf(` + resource "openstack_objectstorage_container_v1" "container_1" { + region = "%s" + name = "tf-test-container" + metadata { + test = "true" + } + content_type = "application/json" + }`, + OS_REGION_NAME) + +var testAccObjectStorageV1Container_update = fmt.Sprintf(` + resource "openstack_objectstorage_container_v1" "container_1" { + region = "%s" + name = "tf-test-container" + metadata { + test = "true" + } + content_type = "text/plain" + }`, + OS_REGION_NAME) diff --git a/builtin/providers/openstack/util.go b/builtin/providers/openstack/util.go new file mode 100644 index 000000000..93a8bfbc5 --- /dev/null +++ b/builtin/providers/openstack/util.go @@ -0,0 +1,22 @@ +package openstack + +import ( + "fmt" + + "github.com/hashicorp/terraform/helper/schema" + "github.com/rackspace/gophercloud" +) + +// CheckDeleted checks the error to see if it's a 404 (Not Found) and, if so, +// sets the resource ID to the empty string instead of throwing an error. +func CheckDeleted(d *schema.ResourceData, err error, msg string) error { + errCode, ok := err.(*gophercloud.UnexpectedResponseCodeError) + if !ok { + return fmt.Errorf("%s: %s", msg, err) + } + if errCode.Actual == 404 { + d.SetId("") + return nil + } + return fmt.Errorf("%s: %s", msg, err) +} diff --git a/command/apply.go b/command/apply.go index d46b71679..529d6e701 100644 --- a/command/apply.go +++ b/command/apply.go @@ -93,6 +93,7 @@ func (c *ApplyCommand) Run(args []string) int { // Build the context based on the arguments given ctx, planned, err := c.Context(contextOpts{ + Destroy: c.Destroy, Path: configPath, StatePath: c.Meta.statePath, }) @@ -140,12 +141,7 @@ func (c *ApplyCommand) Run(args []string) int { } } - var opts terraform.PlanOpts - if c.Destroy { - opts.Destroy = true - } - - if _, err := ctx.Plan(&opts); err != nil { + if _, err := ctx.Plan(); err != nil { c.Ui.Error(fmt.Sprintf( "Error creating plan: %s", err)) return 1 @@ -319,6 +315,10 @@ Options: "-state". This can be used to preserve the old state. + -target=resource Resource to target. Operation will be limited to this + resource and its dependencies. This flag can be used + multiple times. + -var 'foo=bar' Set a variable in the Terraform configuration. This flag can be set multiple times. @@ -357,6 +357,10 @@ Options: "-state". This can be used to preserve the old state. + -target=resource Resource to target. Operation will be limited to this + resource and its dependencies. This flag can be used + multiple times. + -var 'foo=bar' Set a variable in the Terraform configuration. This flag can be set multiple times. diff --git a/command/apply_destroy_test.go b/command/apply_destroy_test.go index bdc2440f0..63afb15ed 100644 --- a/command/apply_destroy_test.go +++ b/command/apply_destroy_test.go @@ -116,6 +116,96 @@ func TestApply_destroyPlan(t *testing.T) { } } +func TestApply_destroyTargeted(t *testing.T) { + originalState := &terraform.State{ + Modules: []*terraform.ModuleState{ + &terraform.ModuleState{ + Path: []string{"root"}, + Resources: map[string]*terraform.ResourceState{ + "test_instance.foo": &terraform.ResourceState{ + Type: "test_instance", + Primary: &terraform.InstanceState{ + ID: "i-ab123", + }, + }, + "test_load_balancer.foo": &terraform.ResourceState{ + Type: "test_load_balancer", + Primary: &terraform.InstanceState{ + ID: "lb-abc123", + }, + }, + }, + }, + }, + } + + statePath := testStateFile(t, originalState) + + p := testProvider() + ui := new(cli.MockUi) + c := &ApplyCommand{ + Destroy: true, + Meta: Meta{ + ContextOpts: testCtxConfig(p), + Ui: ui, + }, + } + + // Run the apply command pointing to our existing state + args := []string{ + "-force", + "-target", "test_instance.foo", + "-state", statePath, + testFixturePath("apply-destroy-targeted"), + } + if code := c.Run(args); code != 0 { + t.Fatalf("bad: %d\n\n%s", code, ui.ErrorWriter.String()) + } + + // Verify a new state exists + if _, err := os.Stat(statePath); err != nil { + t.Fatalf("err: %s", err) + } + + f, err := os.Open(statePath) + if err != nil { + t.Fatalf("err: %s", err) + } + defer f.Close() + + state, err := terraform.ReadState(f) + if err != nil { + t.Fatalf("err: %s", err) + } + if state == nil { + t.Fatal("state should not be nil") + } + + actualStr := strings.TrimSpace(state.String()) + expectedStr := strings.TrimSpace(testApplyDestroyStr) + if actualStr != expectedStr { + t.Fatalf("bad:\n\n%s\n\n%s", actualStr, expectedStr) + } + + // Should have a backup file + f, err = os.Open(statePath + DefaultBackupExtention) + if err != nil { + t.Fatalf("err: %s", err) + } + + backupState, err := terraform.ReadState(f) + f.Close() + if err != nil { + t.Fatalf("err: %s", err) + } + + actualStr = strings.TrimSpace(backupState.String()) + expectedStr = strings.TrimSpace(originalState.String()) + if actualStr != expectedStr { + t.Fatalf("bad:\n\nactual:\n%s\n\nexpected:\nb%s", actualStr, expectedStr) + } +} + const testApplyDestroyStr = ` ` diff --git a/command/flag_kv.go b/command/flag_kv.go index fd9b57b3a..6e0198778 100644 --- a/command/flag_kv.go +++ b/command/flag_kv.go @@ -85,3 +85,17 @@ func loadKVFile(rawPath string) (map[string]string, error) { return result, nil } + +// FlagStringSlice is a flag.Value implementation for parsing targets from the +// command line, e.g. -target=aws_instance.foo -target=aws_vpc.bar + +type FlagStringSlice []string + +func (v *FlagStringSlice) String() string { + return "" +} +func (v *FlagStringSlice) Set(raw string) error { + *v = append(*v, raw) + + return nil +} diff --git a/command/meta.go b/command/meta.go index 28a01c542..b542304af 100644 --- a/command/meta.go +++ b/command/meta.go @@ -38,6 +38,9 @@ type Meta struct { input bool variables map[string]string + // Targets for this context (private) + targets []string + color bool oldUi cli.Ui @@ -126,6 +129,9 @@ func (m *Meta) Context(copts contextOpts) (*terraform.Context, bool, error) { m.statePath = copts.StatePath } + // Tell the context if we're in a destroy plan / apply + opts.Destroy = copts.Destroy + // Store the loaded state state, err := m.State() if err != nil { @@ -267,6 +273,7 @@ func (m *Meta) contextOpts() *terraform.ContextOpts { vs[k] = v } opts.Variables = vs + opts.Targets = m.targets opts.UIInput = m.UIInput() return &opts @@ -278,6 +285,7 @@ func (m *Meta) flagSet(n string) *flag.FlagSet { f.BoolVar(&m.input, "input", true, "input") f.Var((*FlagKV)(&m.variables), "var", "variables") f.Var((*FlagKVFile)(&m.variables), "var-file", "variable file") + f.Var((*FlagStringSlice)(&m.targets), "target", "resource to target") if m.autoKey != "" { f.Var((*FlagKVFile)(&m.autoVariables), m.autoKey, "variable file") @@ -388,4 +396,7 @@ type contextOpts struct { // GetMode is the module.GetMode to use when loading the module tree. GetMode module.GetMode + + // Set to true when running a destroy plan/apply. + Destroy bool } diff --git a/command/plan.go b/command/plan.go index 24365d185..5c884d632 100644 --- a/command/plan.go +++ b/command/plan.go @@ -53,6 +53,7 @@ func (c *PlanCommand) Run(args []string) int { } ctx, _, err := c.Context(contextOpts{ + Destroy: destroy, Path: path, StatePath: c.Meta.statePath, }) @@ -86,7 +87,7 @@ func (c *PlanCommand) Run(args []string) int { } } - plan, err := ctx.Plan(&terraform.PlanOpts{Destroy: destroy}) + plan, err := ctx.Plan() if err != nil { c.Ui.Error(fmt.Sprintf("Error running plan: %s", err)) return 1 @@ -168,6 +169,10 @@ Options: up Terraform-managed resources. By default it will use the state "terraform.tfstate" if it exists. + -target=resource Resource to target. Operation will be limited to this + resource and its dependencies. This flag can be used + multiple times. + -var 'foo=bar' Set a variable in the Terraform configuration. This flag can be set multiple times. diff --git a/command/refresh.go b/command/refresh.go index 38d630050..32e795047 100644 --- a/command/refresh.go +++ b/command/refresh.go @@ -135,6 +135,10 @@ Options: -state-out=path Path to write updated state file. By default, the "-state" path will be used. + -target=resource Resource to target. Operation will be limited to this + resource and its dependencies. This flag can be used + multiple times. + -var 'foo=bar' Set a variable in the Terraform configuration. This flag can be set multiple times. diff --git a/command/test-fixtures/apply-destroy-targeted/main.tf b/command/test-fixtures/apply-destroy-targeted/main.tf new file mode 100644 index 000000000..45ebc5b97 --- /dev/null +++ b/command/test-fixtures/apply-destroy-targeted/main.tf @@ -0,0 +1,7 @@ +resource "test_instance" "foo" { + count = 3 +} + +resource "test_load_balancer" "foo" { + instances = ["${test_instance.foo.*.id}"] +} diff --git a/config.go b/config.go index 583d7ddb2..648223888 100644 --- a/config.go +++ b/config.go @@ -179,7 +179,7 @@ func (c *Config) discoverSingle(glob string, m *map[string]string) error { continue } - log.Printf("[DEBUG] Discoverd plugin: %s = %s", parts[2], match) + log.Printf("[DEBUG] Discovered plugin: %s = %s", parts[2], match) (*m)[parts[2]] = match } diff --git a/config/interpolate_funcs.go b/config/interpolate_funcs.go index 8bb76c532..353c45500 100644 --- a/config/interpolate_funcs.go +++ b/config/interpolate_funcs.go @@ -9,6 +9,7 @@ import ( "strings" "github.com/hashicorp/terraform/config/lang/ast" + "github.com/mitchellh/go-homedir" ) // Funcs is the mapping of built-in functions for configuration. @@ -57,7 +58,11 @@ func interpolationFuncFile() ast.Function { ArgTypes: []ast.Type{ast.TypeString}, ReturnType: ast.TypeString, Callback: func(args []interface{}) (interface{}, error) { - data, err := ioutil.ReadFile(args[0].(string)) + path, err := homedir.Expand(args[0].(string)) + if err != nil { + return "", err + } + data, err := ioutil.ReadFile(path) if err != nil { return "", err } diff --git a/dag/dag.go b/dag/dag.go index b81cb2874..0f53fb1f0 100644 --- a/dag/dag.go +++ b/dag/dag.go @@ -17,6 +17,40 @@ type AcyclicGraph struct { // WalkFunc is the callback used for walking the graph. type WalkFunc func(Vertex) error +// Returns a Set that includes every Vertex yielded by walking down from the +// provided starting Vertex v. +func (g *AcyclicGraph) Ancestors(v Vertex) (*Set, error) { + s := new(Set) + start := asVertexList(g.DownEdges(v)) + memoFunc := func(v Vertex) error { + s.Add(v) + return nil + } + + if err := g.depthFirstWalk(start, memoFunc); err != nil { + return nil, err + } + + return s, nil +} + +// Returns a Set that includes every Vertex yielded by walking up from the +// provided starting Vertex v. +func (g *AcyclicGraph) Descendents(v Vertex) (*Set, error) { + s := new(Set) + start := asVertexList(g.UpEdges(v)) + memoFunc := func(v Vertex) error { + s.Add(v) + return nil + } + + if err := g.reverseDepthFirstWalk(start, memoFunc); err != nil { + return nil, err + } + + return s, nil +} + // Root returns the root of the DAG, or an error. // // Complexity: O(V) @@ -61,15 +95,11 @@ func (g *AcyclicGraph) TransitiveReduction() { for _, u := range g.Vertices() { uTargets := g.DownEdges(u) - vs := make([]Vertex, uTargets.Len()) - for i, vRaw := range uTargets.List() { - vs[i] = vRaw.(Vertex) - } + vs := asVertexList(g.DownEdges(u)) g.depthFirstWalk(vs, func(v Vertex) error { shared := uTargets.Intersection(g.DownEdges(v)) - for _, raw := range shared.List() { - vPrime := raw.(Vertex) + for _, vPrime := range asVertexList(shared) { g.RemoveEdge(BasicEdge(u, vPrime)) } @@ -145,12 +175,10 @@ func (g *AcyclicGraph) Walk(cb WalkFunc) error { for _, v := range vertices { // Build our list of dependencies and the list of channels to // wait on until we start executing for this vertex. - depsRaw := g.DownEdges(v).List() - deps := make([]Vertex, len(depsRaw)) + deps := asVertexList(g.DownEdges(v)) depChs := make([]<-chan struct{}, len(deps)) - for i, raw := range depsRaw { - deps[i] = raw.(Vertex) - depChs[i] = vertMap[deps[i]] + for i, dep := range deps { + depChs[i] = vertMap[dep] } // Get our channel so that we can close it when we're done @@ -200,6 +228,16 @@ func (g *AcyclicGraph) Walk(cb WalkFunc) error { return errs } +// simple convenience helper for converting a dag.Set to a []Vertex +func asVertexList(s *Set) []Vertex { + rawList := s.List() + vertexList := make([]Vertex, len(rawList)) + for i, raw := range rawList { + vertexList[i] = raw.(Vertex) + } + return vertexList +} + // depthFirstWalk does a depth-first walk of the graph starting from // the vertices in start. This is not exported now but it would make sense // to export this publicly at some point. @@ -233,3 +271,36 @@ func (g *AcyclicGraph) depthFirstWalk(start []Vertex, cb WalkFunc) error { return nil } + +// reverseDepthFirstWalk does a depth-first walk _up_ the graph starting from +// the vertices in start. +func (g *AcyclicGraph) reverseDepthFirstWalk(start []Vertex, cb WalkFunc) error { + seen := make(map[Vertex]struct{}) + frontier := make([]Vertex, len(start)) + copy(frontier, start) + for len(frontier) > 0 { + // Pop the current vertex + n := len(frontier) + current := frontier[n-1] + frontier = frontier[:n-1] + + // Check if we've seen this already and return... + if _, ok := seen[current]; ok { + continue + } + seen[current] = struct{}{} + + // Visit the current node + if err := cb(current); err != nil { + return err + } + + // Visit targets of this in reverse order. + targets := g.UpEdges(current).List() + for i := len(targets) - 1; i >= 0; i-- { + frontier = append(frontier, targets[i].(Vertex)) + } + } + + return nil +} diff --git a/dag/dag_test.go b/dag/dag_test.go index feead7968..e7b2db8d2 100644 --- a/dag/dag_test.go +++ b/dag/dag_test.go @@ -126,6 +126,68 @@ func TestAcyclicGraphValidate_cycleSelf(t *testing.T) { } } +func TestAcyclicGraphAncestors(t *testing.T) { + var g AcyclicGraph + g.Add(1) + g.Add(2) + g.Add(3) + g.Add(4) + g.Add(5) + g.Connect(BasicEdge(0, 1)) + g.Connect(BasicEdge(1, 2)) + g.Connect(BasicEdge(2, 3)) + g.Connect(BasicEdge(3, 4)) + g.Connect(BasicEdge(4, 5)) + + actual, err := g.Ancestors(2) + if err != nil { + t.Fatalf("err: %#v", err) + } + + expected := []Vertex{3, 4, 5} + + if actual.Len() != len(expected) { + t.Fatalf("bad length! expected %#v to have len %d", actual, len(expected)) + } + + for _, e := range expected { + if !actual.Include(e) { + t.Fatalf("expected: %#v to include: %#v", expected, actual) + } + } +} + +func TestAcyclicGraphDescendents(t *testing.T) { + var g AcyclicGraph + g.Add(1) + g.Add(2) + g.Add(3) + g.Add(4) + g.Add(5) + g.Connect(BasicEdge(0, 1)) + g.Connect(BasicEdge(1, 2)) + g.Connect(BasicEdge(2, 3)) + g.Connect(BasicEdge(3, 4)) + g.Connect(BasicEdge(4, 5)) + + actual, err := g.Descendents(2) + if err != nil { + t.Fatalf("err: %#v", err) + } + + expected := []Vertex{0, 1} + + if actual.Len() != len(expected) { + t.Fatalf("bad length! expected %#v to have len %d", actual, len(expected)) + } + + for _, e := range expected { + if !actual.Include(e) { + t.Fatalf("expected: %#v to include: %#v", expected, actual) + } + } +} + func TestAcyclicGraphWalk(t *testing.T) { var g AcyclicGraph g.Add(1) diff --git a/helper/resource/testing.go b/helper/resource/testing.go index cedadfc72..90cfc175f 100644 --- a/helper/resource/testing.go +++ b/helper/resource/testing.go @@ -190,6 +190,7 @@ func testStep( // Build the context opts.Module = mod opts.State = state + opts.Destroy = step.Destroy ctx := terraform.NewContext(&opts) if ws, es := ctx.Validate(); len(ws) > 0 || len(es) > 0 { estrs := make([]string, len(es)) @@ -209,7 +210,7 @@ func testStep( } // Plan! - if p, err := ctx.Plan(&terraform.PlanOpts{Destroy: step.Destroy}); err != nil { + if p, err := ctx.Plan(); err != nil { return state, fmt.Errorf( "Error planning: %s", err) } else { diff --git a/terraform/context.go b/terraform/context.go index 86a804548..6beaab636 100644 --- a/terraform/context.go +++ b/terraform/context.go @@ -33,6 +33,7 @@ const ( // ContextOpts are the user-configurable options to create a context with // NewContext. type ContextOpts struct { + Destroy bool Diff *Diff Hooks []Hook Module *module.Tree @@ -40,6 +41,7 @@ type ContextOpts struct { State *State Providers map[string]ResourceProviderFactory Provisioners map[string]ResourceProvisionerFactory + Targets []string Variables map[string]string UIInput UIInput @@ -49,6 +51,7 @@ type ContextOpts struct { // perform operations on infrastructure. This structure is built using // NewContext. See the documentation for that. type Context struct { + destroy bool diff *Diff diffLock sync.RWMutex hooks []Hook @@ -58,6 +61,7 @@ type Context struct { sh *stopHook state *State stateLock sync.RWMutex + targets []string uiInput UIInput variables map[string]string @@ -95,12 +99,14 @@ func NewContext(opts *ContextOpts) *Context { } return &Context{ + destroy: opts.Destroy, diff: opts.Diff, hooks: hooks, module: opts.Module, providers: opts.Providers, provisioners: opts.Provisioners, state: state, + targets: opts.Targets, uiInput: opts.UIInput, variables: opts.Variables, @@ -135,6 +141,8 @@ func (c *Context) GraphBuilder() GraphBuilder { Providers: providers, Provisioners: provisioners, State: c.state, + Targets: c.targets, + Destroy: c.destroy, } } @@ -253,7 +261,7 @@ func (c *Context) Apply() (*State, error) { // // Plan also updates the diff of this context to be the diff generated // by the plan, so Apply can be called after. -func (c *Context) Plan(opts *PlanOpts) (*Plan, error) { +func (c *Context) Plan() (*Plan, error) { v := c.acquireRun() defer c.releaseRun(v) @@ -264,7 +272,7 @@ func (c *Context) Plan(opts *PlanOpts) (*Plan, error) { } var operation walkOperation - if opts != nil && opts.Destroy { + if c.destroy { operation = walkPlanDestroy } else { // Set our state to be something temporary. We do this so that diff --git a/terraform/context_test.go b/terraform/context_test.go index abffbf5c3..b9de8d79d 100644 --- a/terraform/context_test.go +++ b/terraform/context_test.go @@ -24,7 +24,7 @@ func TestContext2Plan(t *testing.T) { }, }) - plan, err := ctx.Plan(nil) + plan, err := ctx.Plan() if err != nil { t.Fatalf("err: %s", err) } @@ -57,7 +57,7 @@ func TestContext2Plan_emptyDiff(t *testing.T) { }, }) - plan, err := ctx.Plan(nil) + plan, err := ctx.Plan() if err != nil { t.Fatalf("err: %s", err) } @@ -80,7 +80,7 @@ func TestContext2Plan_minimal(t *testing.T) { }, }) - plan, err := ctx.Plan(nil) + plan, err := ctx.Plan() if err != nil { t.Fatalf("err: %s", err) } @@ -103,7 +103,7 @@ func TestContext2Plan_modules(t *testing.T) { }, }) - plan, err := ctx.Plan(nil) + plan, err := ctx.Plan() if err != nil { t.Fatalf("err: %s", err) } @@ -126,7 +126,7 @@ func TestContext2Plan_moduleInput(t *testing.T) { }, }) - plan, err := ctx.Plan(nil) + plan, err := ctx.Plan() if err != nil { t.Fatalf("err: %s", err) } @@ -149,7 +149,7 @@ func TestContext2Plan_moduleInputComputed(t *testing.T) { }, }) - plan, err := ctx.Plan(nil) + plan, err := ctx.Plan() if err != nil { t.Fatalf("err: %s", err) } @@ -175,7 +175,7 @@ func TestContext2Plan_moduleInputFromVar(t *testing.T) { }, }) - plan, err := ctx.Plan(nil) + plan, err := ctx.Plan() if err != nil { t.Fatalf("err: %s", err) } @@ -198,7 +198,7 @@ func TestContext2Plan_moduleMultiVar(t *testing.T) { }, }) - plan, err := ctx.Plan(nil) + plan, err := ctx.Plan() if err != nil { t.Fatalf("err: %s", err) } @@ -237,7 +237,7 @@ func TestContext2Plan_moduleOrphans(t *testing.T) { State: s, }) - plan, err := ctx.Plan(nil) + plan, err := ctx.Plan() if err != nil { t.Fatalf("err: %s", err) } @@ -282,7 +282,7 @@ func TestContext2Plan_moduleProviderInherit(t *testing.T) { }, }) - _, err := ctx.Plan(nil) + _, err := ctx.Plan() if err != nil { t.Fatalf("err: %s", err) } @@ -332,7 +332,7 @@ func TestContext2Plan_moduleProviderDefaults(t *testing.T) { }, }) - _, err := ctx.Plan(nil) + _, err := ctx.Plan() if err != nil { t.Fatalf("err: %s", err) } @@ -385,7 +385,7 @@ func TestContext2Plan_moduleProviderDefaultsVar(t *testing.T) { }, }) - _, err := ctx.Plan(nil) + _, err := ctx.Plan() if err != nil { t.Fatalf("err: %s", err) } @@ -410,7 +410,7 @@ func TestContext2Plan_moduleVar(t *testing.T) { }, }) - plan, err := ctx.Plan(nil) + plan, err := ctx.Plan() if err != nil { t.Fatalf("err: %s", err) } @@ -433,7 +433,7 @@ func TestContext2Plan_moduleVarComputed(t *testing.T) { }, }) - plan, err := ctx.Plan(nil) + plan, err := ctx.Plan() if err != nil { t.Fatalf("err: %s", err) } @@ -471,7 +471,7 @@ func TestContext2Plan_nil(t *testing.T) { }, }) - plan, err := ctx.Plan(nil) + plan, err := ctx.Plan() if err != nil { t.Fatalf("err: %s", err) } @@ -491,7 +491,7 @@ func TestContext2Plan_computed(t *testing.T) { }, }) - plan, err := ctx.Plan(nil) + plan, err := ctx.Plan() if err != nil { t.Fatalf("err: %s", err) } @@ -514,7 +514,7 @@ func TestContext2Plan_computedList(t *testing.T) { }, }) - plan, err := ctx.Plan(nil) + plan, err := ctx.Plan() if err != nil { t.Fatalf("err: %s", err) } @@ -537,7 +537,7 @@ func TestContext2Plan_count(t *testing.T) { }, }) - plan, err := ctx.Plan(nil) + plan, err := ctx.Plan() if err != nil { t.Fatalf("err: %s", err) } @@ -564,7 +564,7 @@ func TestContext2Plan_countComputed(t *testing.T) { }, }) - _, err := ctx.Plan(nil) + _, err := ctx.Plan() if err == nil { t.Fatal("should error") } @@ -581,7 +581,7 @@ func TestContext2Plan_countIndex(t *testing.T) { }, }) - plan, err := ctx.Plan(nil) + plan, err := ctx.Plan() if err != nil { t.Fatalf("err: %s", err) } @@ -604,7 +604,7 @@ func TestContext2Plan_countIndexZero(t *testing.T) { }, }) - plan, err := ctx.Plan(nil) + plan, err := ctx.Plan() if err != nil { t.Fatalf("err: %s", err) } @@ -630,7 +630,7 @@ func TestContext2Plan_countVar(t *testing.T) { }, }) - plan, err := ctx.Plan(nil) + plan, err := ctx.Plan() if err != nil { t.Fatalf("err: %s", err) } @@ -653,7 +653,7 @@ func TestContext2Plan_countZero(t *testing.T) { }, }) - plan, err := ctx.Plan(nil) + plan, err := ctx.Plan() if err != nil { t.Fatalf("err: %s", err) } @@ -676,7 +676,7 @@ func TestContext2Plan_countOneIndex(t *testing.T) { }, }) - plan, err := ctx.Plan(nil) + plan, err := ctx.Plan() if err != nil { t.Fatalf("err: %s", err) } @@ -731,7 +731,7 @@ func TestContext2Plan_countDecreaseToOne(t *testing.T) { State: s, }) - plan, err := ctx.Plan(nil) + plan, err := ctx.Plan() if err != nil { t.Fatalf("err: %s", err) } @@ -774,7 +774,7 @@ func TestContext2Plan_countIncreaseFromNotSet(t *testing.T) { State: s, }) - plan, err := ctx.Plan(nil) + plan, err := ctx.Plan() if err != nil { t.Fatalf("err: %s", err) } @@ -817,7 +817,7 @@ func TestContext2Plan_countIncreaseFromOne(t *testing.T) { State: s, }) - plan, err := ctx.Plan(nil) + plan, err := ctx.Plan() if err != nil { t.Fatalf("err: %s", err) } @@ -875,7 +875,7 @@ func TestContext2Plan_countIncreaseFromOneCorrupted(t *testing.T) { State: s, }) - plan, err := ctx.Plan(nil) + plan, err := ctx.Plan() if err != nil { t.Fatalf("err: %s", err) } @@ -917,10 +917,11 @@ func TestContext2Plan_destroy(t *testing.T) { Providers: map[string]ResourceProviderFactory{ "aws": testProviderFuncFixed(p), }, - State: s, + State: s, + Destroy: true, }) - plan, err := ctx.Plan(&PlanOpts{Destroy: true}) + plan, err := ctx.Plan() if err != nil { t.Fatalf("err: %s", err) } @@ -971,10 +972,11 @@ func TestContext2Plan_moduleDestroy(t *testing.T) { Providers: map[string]ResourceProviderFactory{ "aws": testProviderFuncFixed(p), }, - State: s, + State: s, + Destroy: true, }) - plan, err := ctx.Plan(&PlanOpts{Destroy: true}) + plan, err := ctx.Plan() if err != nil { t.Fatalf("err: %s", err) } @@ -1020,10 +1022,11 @@ func TestContext2Plan_moduleDestroyMultivar(t *testing.T) { Providers: map[string]ResourceProviderFactory{ "aws": testProviderFuncFixed(p), }, - State: s, + State: s, + Destroy: true, }) - plan, err := ctx.Plan(&PlanOpts{Destroy: true}) + plan, err := ctx.Plan() if err != nil { t.Fatalf("err: %s", err) } @@ -1051,7 +1054,7 @@ func TestContext2Plan_pathVar(t *testing.T) { }, }) - plan, err := ctx.Plan(nil) + plan, err := ctx.Plan() if err != nil { t.Fatalf("err: %s", err) } @@ -1118,7 +1121,7 @@ func TestContext2Plan_diffVar(t *testing.T) { }, nil } - plan, err := ctx.Plan(nil) + plan, err := ctx.Plan() if err != nil { t.Fatalf("err: %s", err) } @@ -1143,7 +1146,7 @@ func TestContext2Plan_hook(t *testing.T) { }, }) - _, err := ctx.Plan(nil) + _, err := ctx.Plan() if err != nil { t.Fatalf("err: %s", err) } @@ -1183,7 +1186,7 @@ func TestContext2Plan_orphan(t *testing.T) { State: s, }) - plan, err := ctx.Plan(nil) + plan, err := ctx.Plan() if err != nil { t.Fatalf("err: %s", err) } @@ -1221,7 +1224,7 @@ func TestContext2Plan_state(t *testing.T) { State: s, }) - plan, err := ctx.Plan(nil) + plan, err := ctx.Plan() if err != nil { t.Fatalf("err: %s", err) } @@ -1273,7 +1276,7 @@ func TestContext2Plan_taint(t *testing.T) { State: s, }) - plan, err := ctx.Plan(nil) + plan, err := ctx.Plan() if err != nil { t.Fatalf("err: %s", err) } @@ -1324,7 +1327,7 @@ func TestContext2Plan_multiple_taint(t *testing.T) { State: s, }) - plan, err := ctx.Plan(nil) + plan, err := ctx.Plan() if err != nil { t.Fatalf("err: %s", err) } @@ -1336,6 +1339,40 @@ func TestContext2Plan_multiple_taint(t *testing.T) { } } +func TestContext2Plan_targeted(t *testing.T) { + m := testModule(t, "plan-targeted") + p := testProvider("aws") + p.DiffFn = testDiffFn + ctx := testContext2(t, &ContextOpts{ + Module: m, + Providers: map[string]ResourceProviderFactory{ + "aws": testProviderFuncFixed(p), + }, + Targets: []string{"aws_instance.foo"}, + }) + + plan, err := ctx.Plan() + if err != nil { + t.Fatalf("err: %s", err) + } + + actual := strings.TrimSpace(plan.String()) + expected := strings.TrimSpace(` +DIFF: + +CREATE: aws_instance.foo + num: "" => "2" + type: "" => "aws_instance" + +STATE: + + + `) + if actual != expected { + t.Fatalf("expected:\n%s\n\ngot:\n%s", expected, actual) + } +} + func TestContext2Plan_provider(t *testing.T) { m := testModule(t, "plan-provider") p := testProvider("aws") @@ -1357,7 +1394,7 @@ func TestContext2Plan_provider(t *testing.T) { }, }) - if _, err := ctx.Plan(nil); err != nil { + if _, err := ctx.Plan(); err != nil { t.Fatalf("err: %s", err) } @@ -1377,7 +1414,7 @@ func TestContext2Plan_varMultiCountOne(t *testing.T) { }, }) - plan, err := ctx.Plan(nil) + plan, err := ctx.Plan() if err != nil { t.Fatalf("err: %s", err) } @@ -1399,7 +1436,7 @@ func TestContext2Plan_varListErr(t *testing.T) { }, }) - _, err := ctx.Plan(nil) + _, err := ctx.Plan() if err == nil { t.Fatal("should error") } @@ -1457,6 +1494,141 @@ func TestContext2Refresh(t *testing.T) { } } +func TestContext2Refresh_targeted(t *testing.T) { + p := testProvider("aws") + m := testModule(t, "refresh-targeted") + ctx := testContext2(t, &ContextOpts{ + Module: m, + Providers: map[string]ResourceProviderFactory{ + "aws": testProviderFuncFixed(p), + }, + State: &State{ + Modules: []*ModuleState{ + &ModuleState{ + Path: rootModulePath, + Resources: map[string]*ResourceState{ + "aws_vpc.metoo": resourceState("aws_vpc", "vpc-abc123"), + "aws_instance.notme": resourceState("aws_instance", "i-bcd345"), + "aws_instance.me": resourceState("aws_instance", "i-abc123"), + "aws_elb.meneither": resourceState("aws_elb", "lb-abc123"), + }, + }, + }, + }, + Targets: []string{"aws_instance.me"}, + }) + + refreshedResources := make([]string, 0, 2) + p.RefreshFn = func(i *InstanceInfo, is *InstanceState) (*InstanceState, error) { + refreshedResources = append(refreshedResources, i.Id) + return is, nil + } + + _, err := ctx.Refresh() + if err != nil { + t.Fatalf("err: %s", err) + } + + expected := []string{"aws_vpc.metoo", "aws_instance.me"} + if !reflect.DeepEqual(refreshedResources, expected) { + t.Fatalf("expected: %#v, got: %#v", expected, refreshedResources) + } +} + +func TestContext2Refresh_targetedCount(t *testing.T) { + p := testProvider("aws") + m := testModule(t, "refresh-targeted-count") + ctx := testContext2(t, &ContextOpts{ + Module: m, + Providers: map[string]ResourceProviderFactory{ + "aws": testProviderFuncFixed(p), + }, + State: &State{ + Modules: []*ModuleState{ + &ModuleState{ + Path: rootModulePath, + Resources: map[string]*ResourceState{ + "aws_vpc.metoo": resourceState("aws_vpc", "vpc-abc123"), + "aws_instance.notme": resourceState("aws_instance", "i-bcd345"), + "aws_instance.me.0": resourceState("aws_instance", "i-abc123"), + "aws_instance.me.1": resourceState("aws_instance", "i-cde567"), + "aws_instance.me.2": resourceState("aws_instance", "i-cde789"), + "aws_elb.meneither": resourceState("aws_elb", "lb-abc123"), + }, + }, + }, + }, + Targets: []string{"aws_instance.me"}, + }) + + refreshedResources := make([]string, 0, 2) + p.RefreshFn = func(i *InstanceInfo, is *InstanceState) (*InstanceState, error) { + refreshedResources = append(refreshedResources, i.Id) + return is, nil + } + + _, err := ctx.Refresh() + if err != nil { + t.Fatalf("err: %s", err) + } + + // Target didn't specify index, so we should get all our instances + expected := []string{ + "aws_vpc.metoo", + "aws_instance.me.0", + "aws_instance.me.1", + "aws_instance.me.2", + } + sort.Strings(expected) + sort.Strings(refreshedResources) + if !reflect.DeepEqual(refreshedResources, expected) { + t.Fatalf("expected: %#v, got: %#v", expected, refreshedResources) + } +} + +func TestContext2Refresh_targetedCountIndex(t *testing.T) { + p := testProvider("aws") + m := testModule(t, "refresh-targeted-count") + ctx := testContext2(t, &ContextOpts{ + Module: m, + Providers: map[string]ResourceProviderFactory{ + "aws": testProviderFuncFixed(p), + }, + State: &State{ + Modules: []*ModuleState{ + &ModuleState{ + Path: rootModulePath, + Resources: map[string]*ResourceState{ + "aws_vpc.metoo": resourceState("aws_vpc", "vpc-abc123"), + "aws_instance.notme": resourceState("aws_instance", "i-bcd345"), + "aws_instance.me.0": resourceState("aws_instance", "i-abc123"), + "aws_instance.me.1": resourceState("aws_instance", "i-cde567"), + "aws_instance.me.2": resourceState("aws_instance", "i-cde789"), + "aws_elb.meneither": resourceState("aws_elb", "lb-abc123"), + }, + }, + }, + }, + Targets: []string{"aws_instance.me[0]"}, + }) + + refreshedResources := make([]string, 0, 2) + p.RefreshFn = func(i *InstanceInfo, is *InstanceState) (*InstanceState, error) { + refreshedResources = append(refreshedResources, i.Id) + return is, nil + } + + _, err := ctx.Refresh() + if err != nil { + t.Fatalf("err: %s", err) + } + + expected := []string{"aws_vpc.metoo", "aws_instance.me.0"} + if !reflect.DeepEqual(refreshedResources, expected) { + t.Fatalf("expected: %#v, got: %#v", expected, refreshedResources) + } +} + func TestContext2Refresh_delete(t *testing.T) { p := testProvider("aws") m := testModule(t, "refresh-basic") @@ -2468,7 +2640,7 @@ func TestContext2Input(t *testing.T) { t.Fatalf("err: %s", err) } - if _, err := ctx.Plan(nil); err != nil { + if _, err := ctx.Plan(); err != nil { t.Fatalf("err: %s", err) } @@ -2513,7 +2685,7 @@ func TestContext2Input_provider(t *testing.T) { t.Fatalf("err: %s", err) } - if _, err := ctx.Plan(nil); err != nil { + if _, err := ctx.Plan(); err != nil { t.Fatalf("err: %s", err) } @@ -2590,7 +2762,7 @@ func TestContext2Input_providerId(t *testing.T) { t.Fatalf("err: %s", err) } - if _, err := ctx.Plan(nil); err != nil { + if _, err := ctx.Plan(); err != nil { t.Fatalf("err: %s", err) } @@ -2638,7 +2810,7 @@ func TestContext2Input_providerOnly(t *testing.T) { t.Fatalf("err: %s", err) } - if _, err := ctx.Plan(nil); err != nil { + if _, err := ctx.Plan(); err != nil { t.Fatalf("err: %s", err) } @@ -2693,7 +2865,7 @@ func TestContext2Input_providerVars(t *testing.T) { t.Fatalf("err: %s", err) } - if _, err := ctx.Plan(nil); err != nil { + if _, err := ctx.Plan(); err != nil { t.Fatalf("err: %s", err) } @@ -2741,7 +2913,7 @@ func TestContext2Input_varOnly(t *testing.T) { t.Fatalf("err: %s", err) } - if _, err := ctx.Plan(nil); err != nil { + if _, err := ctx.Plan(); err != nil { t.Fatalf("err: %s", err) } @@ -2787,7 +2959,7 @@ func TestContext2Input_varOnlyUnset(t *testing.T) { t.Fatalf("err: %s", err) } - if _, err := ctx.Plan(nil); err != nil { + if _, err := ctx.Plan(); err != nil { t.Fatalf("err: %s", err) } @@ -2815,7 +2987,7 @@ func TestContext2Apply(t *testing.T) { }, }) - if _, err := ctx.Plan(nil); err != nil { + if _, err := ctx.Plan(); err != nil { t.Fatalf("err: %s", err) } @@ -2848,7 +3020,7 @@ func TestContext2Apply_emptyModule(t *testing.T) { }, }) - if _, err := ctx.Plan(nil); err != nil { + if _, err := ctx.Plan(); err != nil { t.Fatalf("err: %s", err) } @@ -2896,7 +3068,7 @@ func TestContext2Apply_createBeforeDestroy(t *testing.T) { State: state, }) - if p, err := ctx.Plan(nil); err != nil { + if p, err := ctx.Plan(); err != nil { t.Fatalf("err: %s", err) } else { t.Logf(p.String()) @@ -2950,7 +3122,7 @@ func TestContext2Apply_createBeforeDestroyUpdate(t *testing.T) { State: state, }) - if p, err := ctx.Plan(nil); err != nil { + if p, err := ctx.Plan(); err != nil { t.Fatalf("err: %s", err) } else { t.Logf(p.String()) @@ -2985,7 +3157,7 @@ func TestContext2Apply_minimal(t *testing.T) { }, }) - if _, err := ctx.Plan(nil); err != nil { + if _, err := ctx.Plan(); err != nil { t.Fatalf("err: %s", err) } @@ -3013,7 +3185,7 @@ func TestContext2Apply_badDiff(t *testing.T) { }, }) - if _, err := ctx.Plan(nil); err != nil { + if _, err := ctx.Plan(); err != nil { t.Fatalf("err: %s", err) } @@ -3071,7 +3243,7 @@ func TestContext2Apply_cancel(t *testing.T) { }, nil } - if _, err := ctx.Plan(nil); err != nil { + if _, err := ctx.Plan(); err != nil { t.Fatalf("err: %s", err) } @@ -3112,7 +3284,7 @@ func TestContext2Apply_compute(t *testing.T) { }, }) - if _, err := ctx.Plan(nil); err != nil { + if _, err := ctx.Plan(); err != nil { t.Fatalf("err: %s", err) } @@ -3181,7 +3353,7 @@ func TestContext2Apply_countDecrease(t *testing.T) { State: s, }) - if _, err := ctx.Plan(nil); err != nil { + if _, err := ctx.Plan(); err != nil { t.Fatalf("err: %s", err) } @@ -3241,7 +3413,7 @@ func TestContext2Apply_countDecreaseToOne(t *testing.T) { State: s, }) - if _, err := ctx.Plan(nil); err != nil { + if _, err := ctx.Plan(); err != nil { t.Fatalf("err: %s", err) } @@ -3303,7 +3475,7 @@ func TestContext2Apply_countDecreaseToOneCorrupted(t *testing.T) { State: s, }) - if p, err := ctx.Plan(nil); err != nil { + if p, err := ctx.Plan(); err != nil { t.Fatalf("err: %s", err) } else { testStringMatch(t, p, testTerraformApplyCountDecToOneCorruptedPlanStr) @@ -3354,7 +3526,7 @@ func TestContext2Apply_countTainted(t *testing.T) { State: s, }) - if _, err := ctx.Plan(nil); err != nil { + if _, err := ctx.Plan(); err != nil { t.Fatalf("err: %s", err) } @@ -3382,7 +3554,7 @@ func TestContext2Apply_countVariable(t *testing.T) { }, }) - if _, err := ctx.Plan(nil); err != nil { + if _, err := ctx.Plan(); err != nil { t.Fatalf("err: %s", err) } @@ -3410,7 +3582,7 @@ func TestContext2Apply_module(t *testing.T) { }, }) - if _, err := ctx.Plan(nil); err != nil { + if _, err := ctx.Plan(); err != nil { t.Fatalf("err: %s", err) } @@ -3439,9 +3611,10 @@ func TestContext2Apply_moduleVarResourceCount(t *testing.T) { Variables: map[string]string{ "count": "2", }, + Destroy: true, }) - if _, err := ctx.Plan(nil); err != nil { + if _, err := ctx.Plan(); err != nil { t.Fatalf("err: %s", err) } @@ -3459,7 +3632,7 @@ func TestContext2Apply_moduleVarResourceCount(t *testing.T) { }, }) - if _, err := ctx.Plan(&PlanOpts{Destroy: true}); err != nil { + if _, err := ctx.Plan(); err != nil { t.Fatalf("err: %s", err) } @@ -3481,7 +3654,7 @@ func TestContext2Apply_moduleBool(t *testing.T) { }, }) - if _, err := ctx.Plan(nil); err != nil { + if _, err := ctx.Plan(); err != nil { t.Fatalf("err: %s", err) } @@ -3515,7 +3688,7 @@ func TestContext2Apply_multiProvider(t *testing.T) { }, }) - if _, err := ctx.Plan(nil); err != nil { + if _, err := ctx.Plan(); err != nil { t.Fatalf("err: %s", err) } @@ -3548,7 +3721,7 @@ func TestContext2Apply_nilDiff(t *testing.T) { }, }) - if _, err := ctx.Plan(nil); err != nil { + if _, err := ctx.Plan(); err != nil { t.Fatalf("err: %s", err) } @@ -3588,7 +3761,7 @@ func TestContext2Apply_Provisioner_compute(t *testing.T) { }, }) - if _, err := ctx.Plan(nil); err != nil { + if _, err := ctx.Plan(); err != nil { t.Fatalf("err: %s", err) } @@ -3633,7 +3806,7 @@ func TestContext2Apply_provisionerCreateFail(t *testing.T) { }, }) - if _, err := ctx.Plan(nil); err != nil { + if _, err := ctx.Plan(); err != nil { t.Fatalf("err: %s", err) } @@ -3672,7 +3845,7 @@ func TestContext2Apply_provisionerCreateFailNoId(t *testing.T) { }, }) - if _, err := ctx.Plan(nil); err != nil { + if _, err := ctx.Plan(); err != nil { t.Fatalf("err: %s", err) } @@ -3712,7 +3885,7 @@ func TestContext2Apply_provisionerFail(t *testing.T) { }, }) - if _, err := ctx.Plan(nil); err != nil { + if _, err := ctx.Plan(); err != nil { t.Fatalf("err: %s", err) } @@ -3767,7 +3940,7 @@ func TestContext2Apply_provisionerFail_createBeforeDestroy(t *testing.T) { State: state, }) - if _, err := ctx.Plan(nil); err != nil { + if _, err := ctx.Plan(); err != nil { t.Fatalf("err: %s", err) } @@ -3816,7 +3989,7 @@ func TestContext2Apply_error_createBeforeDestroy(t *testing.T) { } p.DiffFn = testDiffFn - if _, err := ctx.Plan(nil); err != nil { + if _, err := ctx.Plan(); err != nil { t.Fatalf("err: %s", err) } @@ -3874,7 +4047,7 @@ func TestContext2Apply_errorDestroy_createBeforeDestroy(t *testing.T) { } p.DiffFn = testDiffFn - if _, err := ctx.Plan(nil); err != nil { + if _, err := ctx.Plan(); err != nil { t.Fatalf("err: %s", err) } @@ -3931,7 +4104,7 @@ func TestContext2Apply_multiDepose_createBeforeDestroy(t *testing.T) { } } - if _, err := ctx.Plan(nil); err != nil { + if _, err := ctx.Plan(); err != nil { t.Fatalf("err: %s", err) } @@ -3955,7 +4128,7 @@ aws_instance.web: (1 deposed) State: state, }) - if _, err := ctx.Plan(nil); err != nil { + if _, err := ctx.Plan(); err != nil { t.Fatalf("err: %s", err) } @@ -3983,7 +4156,7 @@ aws_instance.web: (2 deposed) } createdInstanceId = "qux" - if _, err := ctx.Plan(nil); err != nil { + if _, err := ctx.Plan(); err != nil { t.Fatalf("err: %s", err) } state, err = ctx.Apply() @@ -4005,7 +4178,7 @@ aws_instance.web: (1 deposed) } createdInstanceId = "quux" - if _, err := ctx.Plan(nil); err != nil { + if _, err := ctx.Plan(); err != nil { t.Fatalf("err: %s", err) } state, err = ctx.Apply() @@ -4045,7 +4218,7 @@ func TestContext2Apply_provisionerResourceRef(t *testing.T) { }, }) - if _, err := ctx.Plan(nil); err != nil { + if _, err := ctx.Plan(); err != nil { t.Fatalf("err: %s", err) } @@ -4091,7 +4264,7 @@ func TestContext2Apply_provisionerSelfRef(t *testing.T) { }, }) - if _, err := ctx.Plan(nil); err != nil { + if _, err := ctx.Plan(); err != nil { t.Fatalf("err: %s", err) } @@ -4144,7 +4317,7 @@ func TestContext2Apply_provisionerMultiSelfRef(t *testing.T) { }, }) - if _, err := ctx.Plan(nil); err != nil { + if _, err := ctx.Plan(); err != nil { t.Fatalf("err: %s", err) } @@ -4192,7 +4365,7 @@ func TestContext2Apply_Provisioner_Diff(t *testing.T) { }, }) - if _, err := ctx.Plan(nil); err != nil { + if _, err := ctx.Plan(); err != nil { t.Fatalf("err: %s", err) } @@ -4229,7 +4402,7 @@ func TestContext2Apply_Provisioner_Diff(t *testing.T) { State: state, }) - if _, err := ctx.Plan(nil); err != nil { + if _, err := ctx.Plan(); err != nil { t.Fatalf("err: %s", err) } @@ -4300,7 +4473,7 @@ func TestContext2Apply_outputDiffVars(t *testing.T) { }, nil } - if _, err := ctx.Plan(nil); err != nil { + if _, err := ctx.Plan(); err != nil { t.Fatalf("err: %s", err) } if _, err := ctx.Apply(); err != nil { @@ -4363,7 +4536,7 @@ func TestContext2Apply_Provisioner_ConnInfo(t *testing.T) { }, }) - if _, err := ctx.Plan(nil); err != nil { + if _, err := ctx.Plan(); err != nil { t.Fatalf("err: %s", err) } @@ -4399,22 +4572,32 @@ func TestContext2Apply_destroy(t *testing.T) { }) // First plan and apply a create operation - if _, err := ctx.Plan(nil); err != nil { + if _, err := ctx.Plan(); err != nil { t.Fatalf("err: %s", err) } - if _, err := ctx.Apply(); err != nil { + state, err := ctx.Apply() + if err != nil { t.Fatalf("err: %s", err) } // Next, plan and apply a destroy operation - if _, err := ctx.Plan(&PlanOpts{Destroy: true}); err != nil { + h.Active = true + ctx = testContext2(t, &ContextOpts{ + Destroy: true, + State: state, + Module: m, + Hooks: []Hook{h}, + Providers: map[string]ResourceProviderFactory{ + "aws": testProviderFuncFixed(p), + }, + }) + + if _, err := ctx.Plan(); err != nil { t.Fatalf("err: %s", err) } - h.Active = true - - state, err := ctx.Apply() + state, err = ctx.Apply() if err != nil { t.Fatalf("err: %s", err) } @@ -4430,7 +4613,7 @@ func TestContext2Apply_destroy(t *testing.T) { expected2 := []string{"aws_instance.bar", "aws_instance.foo"} actual2 := h.IDs if !reflect.DeepEqual(actual2, expected2) { - t.Fatalf("bad: %#v", actual2) + t.Fatalf("expected: %#v\n\ngot:%#v", expected2, actual2) } } @@ -4449,22 +4632,33 @@ func TestContext2Apply_destroyOutputs(t *testing.T) { }) // First plan and apply a create operation - if _, err := ctx.Plan(nil); err != nil { + if _, err := ctx.Plan(); err != nil { t.Fatalf("err: %s", err) } - if _, err := ctx.Apply(); err != nil { + state, err := ctx.Apply() + + if err != nil { t.Fatalf("err: %s", err) } // Next, plan and apply a destroy operation - if _, err := ctx.Plan(&PlanOpts{Destroy: true}); err != nil { + h.Active = true + ctx = testContext2(t, &ContextOpts{ + Destroy: true, + State: state, + Module: m, + Hooks: []Hook{h}, + Providers: map[string]ResourceProviderFactory{ + "aws": testProviderFuncFixed(p), + }, + }) + + if _, err := ctx.Plan(); err != nil { t.Fatalf("err: %s", err) } - h.Active = true - - state, err := ctx.Apply() + state, err = ctx.Apply() if err != nil { t.Fatalf("err: %s", err) } @@ -4520,7 +4714,7 @@ func TestContext2Apply_destroyOrphan(t *testing.T) { }, nil } - if _, err := ctx.Plan(nil); err != nil { + if _, err := ctx.Plan(); err != nil { t.Fatalf("err: %s", err) } @@ -4577,10 +4771,11 @@ func TestContext2Apply_destroyTaintedProvisioner(t *testing.T) { Provisioners: map[string]ResourceProvisionerFactory{ "shell": testProvisionerFuncFixed(pr), }, - State: s, + State: s, + Destroy: true, }) - if _, err := ctx.Plan(&PlanOpts{Destroy: true}); err != nil { + if _, err := ctx.Plan(); err != nil { t.Fatalf("err: %s", err) } @@ -4638,7 +4833,7 @@ func TestContext2Apply_error(t *testing.T) { }, nil } - if _, err := ctx.Plan(nil); err != nil { + if _, err := ctx.Plan(); err != nil { t.Fatalf("err: %s", err) } @@ -4705,7 +4900,7 @@ func TestContext2Apply_errorPartial(t *testing.T) { }, nil } - if _, err := ctx.Plan(nil); err != nil { + if _, err := ctx.Plan(); err != nil { t.Fatalf("err: %s", err) } @@ -4740,7 +4935,7 @@ func TestContext2Apply_hook(t *testing.T) { }, }) - if _, err := ctx.Plan(nil); err != nil { + if _, err := ctx.Plan(); err != nil { t.Fatalf("err: %s", err) } @@ -4788,7 +4983,7 @@ func TestContext2Apply_idAttr(t *testing.T) { }, nil } - if _, err := ctx.Plan(nil); err != nil { + if _, err := ctx.Plan(); err != nil { t.Fatalf("err: %s", err) } @@ -4822,7 +5017,7 @@ func TestContext2Apply_output(t *testing.T) { }, }) - if _, err := ctx.Plan(nil); err != nil { + if _, err := ctx.Plan(); err != nil { t.Fatalf("err: %s", err) } @@ -4850,7 +5045,7 @@ func TestContext2Apply_outputInvalid(t *testing.T) { }, }) - _, err := ctx.Plan(nil) + _, err := ctx.Plan() if err == nil { t.Fatalf("err: %s", err) } @@ -4871,7 +5066,7 @@ func TestContext2Apply_outputList(t *testing.T) { }, }) - if _, err := ctx.Plan(nil); err != nil { + if _, err := ctx.Plan(); err != nil { t.Fatalf("err: %s", err) } @@ -4899,7 +5094,7 @@ func TestContext2Apply_outputMulti(t *testing.T) { }, }) - if _, err := ctx.Plan(nil); err != nil { + if _, err := ctx.Plan(); err != nil { t.Fatalf("err: %s", err) } @@ -4927,7 +5122,7 @@ func TestContext2Apply_outputMultiIndex(t *testing.T) { }, }) - if _, err := ctx.Plan(nil); err != nil { + if _, err := ctx.Plan(); err != nil { t.Fatalf("err: %s", err) } @@ -4992,7 +5187,7 @@ func TestContext2Apply_taint(t *testing.T) { State: s, }) - if _, err := ctx.Plan(nil); err != nil { + if _, err := ctx.Plan(); err != nil { t.Fatalf("err: %s", err) } @@ -5057,7 +5252,7 @@ func TestContext2Apply_taintDep(t *testing.T) { State: s, }) - if p, err := ctx.Plan(nil); err != nil { + if p, err := ctx.Plan(); err != nil { t.Fatalf("err: %s", err) } else { t.Logf("plan: %s", p) @@ -5120,7 +5315,7 @@ func TestContext2Apply_taintDepRequiresNew(t *testing.T) { State: s, }) - if p, err := ctx.Plan(nil); err != nil { + if p, err := ctx.Plan(); err != nil { t.Fatalf("err: %s", err) } else { t.Logf("plan: %s", p) @@ -5138,6 +5333,199 @@ func TestContext2Apply_taintDepRequiresNew(t *testing.T) { } } +func TestContext2Apply_targeted(t *testing.T) { + m := testModule(t, "apply-targeted") + p := testProvider("aws") + p.ApplyFn = testApplyFn + p.DiffFn = testDiffFn + ctx := testContext2(t, &ContextOpts{ + Module: m, + Providers: map[string]ResourceProviderFactory{ + "aws": testProviderFuncFixed(p), + }, + Targets: []string{"aws_instance.foo"}, + }) + + if _, err := ctx.Plan(); err != nil { + t.Fatalf("err: %s", err) + } + + state, err := ctx.Apply() + if err != nil { + t.Fatalf("err: %s", err) + } + + mod := state.RootModule() + if len(mod.Resources) != 1 { + t.Fatalf("expected 1 resource, got: %#v", mod.Resources) + } + + checkStateString(t, state, ` +aws_instance.foo: + ID = foo + num = 2 + type = aws_instance + `) +} + +func TestContext2Apply_targetedCount(t *testing.T) { + m := testModule(t, "apply-targeted-count") + p := testProvider("aws") + p.ApplyFn = testApplyFn + p.DiffFn = testDiffFn + ctx := testContext2(t, &ContextOpts{ + Module: m, + Providers: map[string]ResourceProviderFactory{ + "aws": testProviderFuncFixed(p), + }, + Targets: []string{"aws_instance.foo"}, + }) + + if _, err := ctx.Plan(); err != nil { + t.Fatalf("err: %s", err) + } + + state, err := ctx.Apply() + if err != nil { + t.Fatalf("err: %s", err) + } + + checkStateString(t, state, ` +aws_instance.foo.0: + ID = foo +aws_instance.foo.1: + ID = foo +aws_instance.foo.2: + ID = foo + `) +} + +func TestContext2Apply_targetedCountIndex(t *testing.T) { + m := testModule(t, "apply-targeted-count") + p := testProvider("aws") + p.ApplyFn = testApplyFn + p.DiffFn = testDiffFn + ctx := testContext2(t, &ContextOpts{ + Module: m, + Providers: map[string]ResourceProviderFactory{ + "aws": testProviderFuncFixed(p), + }, + Targets: []string{"aws_instance.foo[1]"}, + }) + + if _, err := ctx.Plan(); err != nil { + t.Fatalf("err: %s", err) + } + + state, err := ctx.Apply() + if err != nil { + t.Fatalf("err: %s", err) + } + + checkStateString(t, state, ` +aws_instance.foo.1: + ID = foo + `) +} + +func TestContext2Apply_targetedDestroy(t *testing.T) { + m := testModule(t, "apply-targeted") + p := testProvider("aws") + p.ApplyFn = testApplyFn + p.DiffFn = testDiffFn + ctx := testContext2(t, &ContextOpts{ + Module: m, + Providers: map[string]ResourceProviderFactory{ + "aws": testProviderFuncFixed(p), + }, + State: &State{ + Modules: []*ModuleState{ + &ModuleState{ + Path: rootModulePath, + Resources: map[string]*ResourceState{ + "aws_instance.foo": resourceState("aws_instance", "i-bcd345"), + "aws_instance.bar": resourceState("aws_instance", "i-abc123"), + }, + }, + }, + }, + Targets: []string{"aws_instance.foo"}, + Destroy: true, + }) + + if _, err := ctx.Plan(); err != nil { + t.Fatalf("err: %s", err) + } + + state, err := ctx.Apply() + if err != nil { + t.Fatalf("err: %s", err) + } + + mod := state.RootModule() + if len(mod.Resources) != 1 { + t.Fatalf("expected 1 resource, got: %#v", mod.Resources) + } + + checkStateString(t, state, ` +aws_instance.bar: + ID = i-abc123 + `) +} + +func TestContext2Apply_targetedDestroyCountIndex(t *testing.T) { + m := testModule(t, "apply-targeted-count") + p := testProvider("aws") + p.ApplyFn = testApplyFn + p.DiffFn = testDiffFn + ctx := testContext2(t, &ContextOpts{ + Module: m, + Providers: map[string]ResourceProviderFactory{ + "aws": testProviderFuncFixed(p), + }, + State: &State{ + Modules: []*ModuleState{ + &ModuleState{ + Path: rootModulePath, + Resources: map[string]*ResourceState{ + "aws_instance.foo.0": resourceState("aws_instance", "i-bcd345"), + "aws_instance.foo.1": resourceState("aws_instance", "i-bcd345"), + "aws_instance.foo.2": resourceState("aws_instance", "i-bcd345"), + "aws_instance.bar.0": resourceState("aws_instance", "i-abc123"), + "aws_instance.bar.1": resourceState("aws_instance", "i-abc123"), + "aws_instance.bar.2": resourceState("aws_instance", "i-abc123"), + }, + }, + }, + }, + Targets: []string{ + "aws_instance.foo[2]", + "aws_instance.bar[1]", + }, + Destroy: true, + }) + + if _, err := ctx.Plan(); err != nil { + t.Fatalf("err: %s", err) + } + + state, err := ctx.Apply() + if err != nil { + t.Fatalf("err: %s", err) + } + + checkStateString(t, state, ` +aws_instance.bar.0: + ID = i-abc123 +aws_instance.bar.2: + ID = i-abc123 +aws_instance.foo.0: + ID = i-bcd345 +aws_instance.foo.1: + ID = i-bcd345 + `) +} + func TestContext2Apply_unknownAttribute(t *testing.T) { m := testModule(t, "apply-unknown") p := testProvider("aws") @@ -5150,7 +5538,7 @@ func TestContext2Apply_unknownAttribute(t *testing.T) { }, }) - if _, err := ctx.Plan(nil); err != nil { + if _, err := ctx.Plan(); err != nil { t.Fatalf("err: %s", err) } @@ -5190,7 +5578,7 @@ func TestContext2Apply_vars(t *testing.T) { t.Fatalf("bad: %s", e) } - if _, err := ctx.Plan(nil); err != nil { + if _, err := ctx.Plan(); err != nil { t.Fatalf("err: %s", err) } @@ -5248,7 +5636,7 @@ func TestContext2Apply_createBefore_depends(t *testing.T) { State: state, }) - if _, err := ctx.Plan(nil); err != nil { + if _, err := ctx.Plan(); err != nil { t.Fatalf("err: %s", err) } @@ -5357,7 +5745,7 @@ func TestContext2Apply_singleDestroy(t *testing.T) { State: state, }) - if _, err := ctx.Plan(nil); err != nil { + if _, err := ctx.Plan(); err != nil { t.Fatalf("err: %s", err) } @@ -5527,6 +5915,15 @@ func checkStateString(t *testing.T, state *State, expected string) { } } +func resourceState(resourceType, resourceID string) *ResourceState { + return &ResourceState{ + Type: resourceType, + Primary: &InstanceState{ + ID: resourceID, + }, + } +} + const testContextGraph = ` root: root aws_instance.bar diff --git a/terraform/graph_builder.go b/terraform/graph_builder.go index 4d5726954..03c59f958 100644 --- a/terraform/graph_builder.go +++ b/terraform/graph_builder.go @@ -65,6 +65,13 @@ type BuiltinGraphBuilder struct { // Provisioners is the list of provisioners supported. Provisioners []string + + // Targets is the user-specified list of resources to target. + Targets []string + + // Destroy is set to true when we're in a `terraform destroy` or a + // `terraform plan -destroy` + Destroy bool } // Build builds the graph according to the steps returned by Steps. @@ -82,7 +89,11 @@ func (b *BuiltinGraphBuilder) Steps() []GraphTransformer { return []GraphTransformer{ // Create all our resources from the configuration and state &ConfigTransformer{Module: b.Root}, - &OrphanTransformer{State: b.State, Module: b.Root}, + &OrphanTransformer{ + State: b.State, + Module: b.Root, + Targeting: (len(b.Targets) > 0), + }, // Provider-related transformations &MissingProviderTransformer{Providers: b.Providers}, @@ -104,6 +115,10 @@ func (b *BuiltinGraphBuilder) Steps() []GraphTransformer { }, }, + // Optionally reduces the graph to a user-specified list of targets and + // their dependencies. + &TargetsTransformer{Targets: b.Targets, Destroy: b.Destroy}, + // Create the destruction nodes &DestroyTransformer{}, &CreateBeforeDestroyTransformer{}, diff --git a/terraform/graph_config_node.go b/terraform/graph_config_node.go index 625992f3f..ddb96da2c 100644 --- a/terraform/graph_config_node.go +++ b/terraform/graph_config_node.go @@ -21,6 +21,26 @@ type graphNodeConfig interface { GraphNodeDependent } +// GraphNodeAddressable is an interface that all graph nodes for the +// configuration graph need to implement in order to be be addressed / targeted +// properly. +type GraphNodeAddressable interface { + graphNodeConfig + + ResourceAddress() *ResourceAddress +} + +// GraphNodeTargetable is an interface for graph nodes to implement when they +// need to be told about incoming targets. This is useful for nodes that need +// to respect targets as they dynamically expand. Note that the list of targets +// provided will contain every target provided, and each implementing graph +// node must filter this list to targets considered relevant. +type GraphNodeTargetable interface { + GraphNodeAddressable + + SetTargets([]ResourceAddress) +} + // GraphNodeConfigModule represents a module within the configuration graph. type GraphNodeConfigModule struct { Path []string @@ -191,6 +211,9 @@ type GraphNodeConfigResource struct { // If this is set to anything other than destroyModeNone, then this // resource represents a resource that will be destroyed in some way. DestroyMode GraphNodeDestroyMode + + // Used during DynamicExpand to target indexes + Targets []ResourceAddress } func (n *GraphNodeConfigResource) DependableName() []string { @@ -279,6 +302,7 @@ func (n *GraphNodeConfigResource) DynamicExpand(ctx EvalContext) (*Graph, error) steps = append(steps, &ResourceCountTransformer{ Resource: n.Resource, Destroy: n.DestroyMode != DestroyNone, + Targets: n.Targets, }) } @@ -289,8 +313,9 @@ func (n *GraphNodeConfigResource) DynamicExpand(ctx EvalContext) (*Graph, error) // expand orphans, which have all the same semantics in a destroy // as a primary. steps = append(steps, &OrphanTransformer{ - State: state, - View: n.Resource.Id(), + State: state, + View: n.Resource.Id(), + Targeting: (len(n.Targets) > 0), }) steps = append(steps, &DeposedTransformer{ @@ -314,6 +339,22 @@ func (n *GraphNodeConfigResource) DynamicExpand(ctx EvalContext) (*Graph, error) return b.Build(ctx.Path()) } +// GraphNodeAddressable impl. +func (n *GraphNodeConfigResource) ResourceAddress() *ResourceAddress { + return &ResourceAddress{ + // Indicates no specific index; will match on other three fields + Index: -1, + InstanceType: TypePrimary, + Name: n.Resource.Name, + Type: n.Resource.Type, + } +} + +// GraphNodeTargetable impl. +func (n *GraphNodeConfigResource) SetTargets(targets []ResourceAddress) { + n.Targets = targets +} + // GraphNodeEvalable impl. func (n *GraphNodeConfigResource) EvalTree() EvalNode { return &EvalSequence{ diff --git a/terraform/instancetype.go b/terraform/instancetype.go new file mode 100644 index 000000000..08959717b --- /dev/null +++ b/terraform/instancetype.go @@ -0,0 +1,13 @@ +package terraform + +//go:generate stringer -type=InstanceType instancetype.go + +// InstanceType is an enum of the various types of instances store in the State +type InstanceType int + +const ( + TypeInvalid InstanceType = iota + TypePrimary + TypeTainted + TypeDeposed +) diff --git a/terraform/instancetype_string.go b/terraform/instancetype_string.go new file mode 100644 index 000000000..fc8697644 --- /dev/null +++ b/terraform/instancetype_string.go @@ -0,0 +1,16 @@ +// generated by stringer -type=InstanceType instancetype.go; DO NOT EDIT + +package terraform + +import "fmt" + +const _InstanceType_name = "TypeInvalidTypePrimaryTypeTaintedTypeDeposed" + +var _InstanceType_index = [...]uint8{0, 11, 22, 33, 44} + +func (i InstanceType) String() string { + if i < 0 || i+1 >= InstanceType(len(_InstanceType_index)) { + return fmt.Sprintf("InstanceType(%d)", i) + } + return _InstanceType_name[_InstanceType_index[i]:_InstanceType_index[i+1]] +} diff --git a/terraform/plan.go b/terraform/plan.go index e73fde383..715136edc 100644 --- a/terraform/plan.go +++ b/terraform/plan.go @@ -18,15 +18,6 @@ func init() { gob.Register(make(map[string]string)) } -// PlanOpts are the options used to generate an execution plan for -// Terraform. -type PlanOpts struct { - // If set to true, then the generated plan will destroy all resources - // that are created. Otherwise, it will move towards the desired state - // specified in the configuration. - Destroy bool -} - // Plan represents a single Terraform execution plan, which contains // all the information necessary to make an infrastructure change. type Plan struct { diff --git a/terraform/resource_address.go b/terraform/resource_address.go new file mode 100644 index 000000000..b54a923d8 --- /dev/null +++ b/terraform/resource_address.go @@ -0,0 +1,98 @@ +package terraform + +import ( + "fmt" + "regexp" + "strconv" +) + +// ResourceAddress is a way of identifying an individual resource (or, +// eventually, a subset of resources) within the state. It is used for Targets. +type ResourceAddress struct { + Index int + InstanceType InstanceType + Name string + Type string +} + +func ParseResourceAddress(s string) (*ResourceAddress, error) { + matches, err := tokenizeResourceAddress(s) + if err != nil { + return nil, err + } + resourceIndex := -1 + if matches["index"] != "" { + var err error + if resourceIndex, err = strconv.Atoi(matches["index"]); err != nil { + return nil, err + } + } + instanceType := TypePrimary + if matches["instance_type"] != "" { + var err error + if instanceType, err = ParseInstanceType(matches["instance_type"]); err != nil { + return nil, err + } + } + + return &ResourceAddress{ + Index: resourceIndex, + InstanceType: instanceType, + Name: matches["name"], + Type: matches["type"], + }, nil +} + +func (addr *ResourceAddress) Equals(raw interface{}) bool { + other, ok := raw.(*ResourceAddress) + if !ok { + return false + } + + indexMatch := (addr.Index == -1 || + other.Index == -1 || + addr.Index == other.Index) + + return (indexMatch && + addr.InstanceType == other.InstanceType && + addr.Name == other.Name && + addr.Type == other.Type) +} + +func ParseInstanceType(s string) (InstanceType, error) { + switch s { + case "primary": + return TypePrimary, nil + case "deposed": + return TypeDeposed, nil + case "tainted": + return TypeTainted, nil + default: + return TypeInvalid, fmt.Errorf("Unexpected value for InstanceType field: %q", s) + } +} + +func tokenizeResourceAddress(s string) (map[string]string, error) { + // Example of portions of the regexp below using the + // string "aws_instance.web.tainted[1]" + re := regexp.MustCompile(`\A` + + // "aws_instance" + `(?P\w+)\.` + + // "web" + `(?P\w+)` + + // "tainted" (optional, omission implies: "primary") + `(?:\.(?P\w+))?` + + // "1" (optional, omission implies: "0") + `(?:\[(?P\d+)\])?` + + `\z`) + groupNames := re.SubexpNames() + rawMatches := re.FindAllStringSubmatch(s, -1) + if len(rawMatches) != 1 { + return nil, fmt.Errorf("Problem parsing address: %q", s) + } + matches := make(map[string]string) + for i, m := range rawMatches[0] { + matches[groupNames[i]] = m + } + return matches, nil +} diff --git a/terraform/resource_address_test.go b/terraform/resource_address_test.go new file mode 100644 index 000000000..2a8caa1f8 --- /dev/null +++ b/terraform/resource_address_test.go @@ -0,0 +1,207 @@ +package terraform + +import ( + "reflect" + "testing" +) + +func TestParseResourceAddress(t *testing.T) { + cases := map[string]struct { + Input string + Expected *ResourceAddress + }{ + "implicit primary, no specific index": { + Input: "aws_instance.foo", + Expected: &ResourceAddress{ + Type: "aws_instance", + Name: "foo", + InstanceType: TypePrimary, + Index: -1, + }, + }, + "implicit primary, explicit index": { + Input: "aws_instance.foo[2]", + Expected: &ResourceAddress{ + Type: "aws_instance", + Name: "foo", + InstanceType: TypePrimary, + Index: 2, + }, + }, + "explicit primary, explicit index": { + Input: "aws_instance.foo.primary[2]", + Expected: &ResourceAddress{ + Type: "aws_instance", + Name: "foo", + InstanceType: TypePrimary, + Index: 2, + }, + }, + "tainted": { + Input: "aws_instance.foo.tainted", + Expected: &ResourceAddress{ + Type: "aws_instance", + Name: "foo", + InstanceType: TypeTainted, + Index: -1, + }, + }, + "deposed": { + Input: "aws_instance.foo.deposed", + Expected: &ResourceAddress{ + Type: "aws_instance", + Name: "foo", + InstanceType: TypeDeposed, + Index: -1, + }, + }, + } + + for tn, tc := range cases { + out, err := ParseResourceAddress(tc.Input) + if err != nil { + t.Fatalf("unexpected err: %#v", err) + } + + if !reflect.DeepEqual(out, tc.Expected) { + t.Fatalf("bad: %q\n\nexpected:\n%#v\n\ngot:\n%#v", tn, tc.Expected, out) + } + } +} + +func TestResourceAddressEquals(t *testing.T) { + cases := map[string]struct { + Address *ResourceAddress + Other interface{} + Expect bool + }{ + "basic match": { + Address: &ResourceAddress{ + Type: "aws_instance", + Name: "foo", + InstanceType: TypePrimary, + Index: 0, + }, + Other: &ResourceAddress{ + Type: "aws_instance", + Name: "foo", + InstanceType: TypePrimary, + Index: 0, + }, + Expect: true, + }, + "address does not set index": { + Address: &ResourceAddress{ + Type: "aws_instance", + Name: "foo", + InstanceType: TypePrimary, + Index: -1, + }, + Other: &ResourceAddress{ + Type: "aws_instance", + Name: "foo", + InstanceType: TypePrimary, + Index: 3, + }, + Expect: true, + }, + "other does not set index": { + Address: &ResourceAddress{ + Type: "aws_instance", + Name: "foo", + InstanceType: TypePrimary, + Index: 3, + }, + Other: &ResourceAddress{ + Type: "aws_instance", + Name: "foo", + InstanceType: TypePrimary, + Index: -1, + }, + Expect: true, + }, + "neither sets index": { + Address: &ResourceAddress{ + Type: "aws_instance", + Name: "foo", + InstanceType: TypePrimary, + Index: -1, + }, + Other: &ResourceAddress{ + Type: "aws_instance", + Name: "foo", + InstanceType: TypePrimary, + Index: -1, + }, + Expect: true, + }, + "different type": { + Address: &ResourceAddress{ + Type: "aws_instance", + Name: "foo", + InstanceType: TypePrimary, + Index: 0, + }, + Other: &ResourceAddress{ + Type: "aws_vpc", + Name: "foo", + InstanceType: TypePrimary, + Index: 0, + }, + Expect: false, + }, + "different name": { + Address: &ResourceAddress{ + Type: "aws_instance", + Name: "foo", + InstanceType: TypePrimary, + Index: 0, + }, + Other: &ResourceAddress{ + Type: "aws_instance", + Name: "bar", + InstanceType: TypePrimary, + Index: 0, + }, + Expect: false, + }, + "different instance type": { + Address: &ResourceAddress{ + Type: "aws_instance", + Name: "foo", + InstanceType: TypePrimary, + Index: 0, + }, + Other: &ResourceAddress{ + Type: "aws_instance", + Name: "foo", + InstanceType: TypeTainted, + Index: 0, + }, + Expect: false, + }, + "different index": { + Address: &ResourceAddress{ + Type: "aws_instance", + Name: "foo", + InstanceType: TypePrimary, + Index: 0, + }, + Other: &ResourceAddress{ + Type: "aws_instance", + Name: "foo", + InstanceType: TypePrimary, + Index: 1, + }, + Expect: false, + }, + } + + for tn, tc := range cases { + actual := tc.Address.Equals(tc.Other) + if actual != tc.Expect { + t.Fatalf("%q: expected equals: %t, got %t for:\n%#v\n%#v", + tn, tc.Expect, actual, tc.Address, tc.Other) + } + } +} diff --git a/terraform/test-fixtures/apply-targeted-count/main.tf b/terraform/test-fixtures/apply-targeted-count/main.tf new file mode 100644 index 000000000..cd861898f --- /dev/null +++ b/terraform/test-fixtures/apply-targeted-count/main.tf @@ -0,0 +1,7 @@ +resource "aws_instance" "foo" { + count = 3 +} + +resource "aws_instance" "bar" { + count = 3 +} diff --git a/terraform/test-fixtures/apply-targeted/main.tf b/terraform/test-fixtures/apply-targeted/main.tf new file mode 100644 index 000000000..b07fc97f4 --- /dev/null +++ b/terraform/test-fixtures/apply-targeted/main.tf @@ -0,0 +1,7 @@ +resource "aws_instance" "foo" { + num = "2" +} + +resource "aws_instance" "bar" { + foo = "bar" +} diff --git a/terraform/test-fixtures/plan-targeted/main.tf b/terraform/test-fixtures/plan-targeted/main.tf new file mode 100644 index 000000000..1b6cdae67 --- /dev/null +++ b/terraform/test-fixtures/plan-targeted/main.tf @@ -0,0 +1,7 @@ +resource "aws_instance" "foo" { + num = "2" +} + +resource "aws_instance" "bar" { + foo = "${aws_instance.foo.num}" +} diff --git a/terraform/test-fixtures/refresh-targeted-count/main.tf b/terraform/test-fixtures/refresh-targeted-count/main.tf new file mode 100644 index 000000000..f564b629c --- /dev/null +++ b/terraform/test-fixtures/refresh-targeted-count/main.tf @@ -0,0 +1,9 @@ +resource "aws_vpc" "metoo" {} +resource "aws_instance" "notme" { } +resource "aws_instance" "me" { + vpc_id = "${aws_vpc.metoo.id}" + count = 3 +} +resource "aws_elb" "meneither" { + instances = ["${aws_instance.me.*.id}"] +} diff --git a/terraform/test-fixtures/refresh-targeted/main.tf b/terraform/test-fixtures/refresh-targeted/main.tf new file mode 100644 index 000000000..3a7618464 --- /dev/null +++ b/terraform/test-fixtures/refresh-targeted/main.tf @@ -0,0 +1,8 @@ +resource "aws_vpc" "metoo" {} +resource "aws_instance" "notme" { } +resource "aws_instance" "me" { + vpc_id = "${aws_vpc.metoo.id}" +} +resource "aws_elb" "meneither" { + instances = ["${aws_instance.me.*.id}"] +} diff --git a/terraform/test-fixtures/transform-targets-basic/main.tf b/terraform/test-fixtures/transform-targets-basic/main.tf new file mode 100644 index 000000000..b845a1de6 --- /dev/null +++ b/terraform/test-fixtures/transform-targets-basic/main.tf @@ -0,0 +1,16 @@ +resource "aws_vpc" "me" {} + +resource "aws_subnet" "me" { + vpc_id = "${aws_vpc.me.id}" +} + +resource "aws_instance" "me" { + subnet_id = "${aws_subnet.me.id}" +} + +resource "aws_vpc" "notme" {} +resource "aws_subnet" "notme" {} +resource "aws_instance" "notme" {} +resource "aws_instance" "notmeeither" { + name = "${aws_instance.me.id}" +} diff --git a/terraform/test-fixtures/transform-targets-destroy/main.tf b/terraform/test-fixtures/transform-targets-destroy/main.tf new file mode 100644 index 000000000..da99de43c --- /dev/null +++ b/terraform/test-fixtures/transform-targets-destroy/main.tf @@ -0,0 +1,18 @@ +resource "aws_vpc" "notme" {} + +resource "aws_subnet" "notme" { + vpc_id = "${aws_vpc.notme.id}" +} + +resource "aws_instance" "me" { + subnet_id = "${aws_subnet.notme.id}" +} + +resource "aws_instance" "notme" {} +resource "aws_instance" "metoo" { + name = "${aws_instance.me.id}" +} + +resource "aws_elb" "me" { + instances = "${aws_instance.me.*.id}" +} diff --git a/terraform/transform_orphan.go b/terraform/transform_orphan.go index e2a9c7dcd..5de64c65c 100644 --- a/terraform/transform_orphan.go +++ b/terraform/transform_orphan.go @@ -2,6 +2,7 @@ package terraform import ( "fmt" + "log" "github.com/hashicorp/terraform/config" "github.com/hashicorp/terraform/config/module" @@ -25,6 +26,11 @@ type OrphanTransformer struct { // using the graph path. Module *module.Tree + // Targets are user-specified resources to target. We need to be aware of + // these so we don't improperly identify orphans when they've just been + // filtered out of the graph via targeting. + Targeting bool + // View, if non-nil will set a view on the module state. View string } @@ -35,6 +41,13 @@ func (t *OrphanTransformer) Transform(g *Graph) error { return nil } + if t.Targeting { + log.Printf("Skipping orphan transformer because we have targets.") + // If we are in a run where we are targeting nodes, we won't process + // orphans for this run. + return nil + } + // Build up all our state representatives resourceRep := make(map[string]struct{}) for _, v := range g.Vertices() { diff --git a/terraform/transform_resource.go b/terraform/transform_resource.go index 8c2a00c78..21774e953 100644 --- a/terraform/transform_resource.go +++ b/terraform/transform_resource.go @@ -12,6 +12,7 @@ import ( type ResourceCountTransformer struct { Resource *config.Resource Destroy bool + Targets []ResourceAddress } func (t *ResourceCountTransformer) Transform(g *Graph) error { @@ -27,7 +28,7 @@ func (t *ResourceCountTransformer) Transform(g *Graph) error { } // For each count, build and add the node - nodes := make([]dag.Vertex, count) + nodes := make([]dag.Vertex, 0, count) for i := 0; i < count; i++ { // Set the index. If our count is 1 we special case it so that // we handle the "resource.0" and "resource" boundary properly. @@ -49,9 +50,14 @@ func (t *ResourceCountTransformer) Transform(g *Graph) error { } } + // Skip nodes if targeting excludes them + if !t.nodeIsTargeted(node) { + continue + } + // Add the node now - nodes[i] = node - g.Add(nodes[i]) + nodes = append(nodes, node) + g.Add(node) } // Make the dependency connections @@ -64,6 +70,25 @@ func (t *ResourceCountTransformer) Transform(g *Graph) error { return nil } +func (t *ResourceCountTransformer) nodeIsTargeted(node dag.Vertex) bool { + // no targets specified, everything stays in the graph + if len(t.Targets) == 0 { + return true + } + addressable, ok := node.(GraphNodeAddressable) + if !ok { + return false + } + + addr := addressable.ResourceAddress() + for _, targetAddr := range t.Targets { + if targetAddr.Equals(addr) { + return true + } + } + return false +} + type graphNodeExpandedResource struct { Index int Resource *config.Resource @@ -77,6 +102,23 @@ func (n *graphNodeExpandedResource) Name() string { return fmt.Sprintf("%s #%d", n.Resource.Id(), n.Index) } +// GraphNodeAddressable impl. +func (n *graphNodeExpandedResource) ResourceAddress() *ResourceAddress { + // We want this to report the logical index properly, so we must undo the + // special case from the expand + index := n.Index + if index == -1 { + index = 0 + } + return &ResourceAddress{ + Index: index, + // TODO: kjkjkj + InstanceType: TypePrimary, + Name: n.Resource.Name, + Type: n.Resource.Type, + } +} + // GraphNodeDependable impl. func (n *graphNodeExpandedResource) DependableName() []string { return []string{ diff --git a/terraform/transform_targets.go b/terraform/transform_targets.go new file mode 100644 index 000000000..29a6d53c6 --- /dev/null +++ b/terraform/transform_targets.go @@ -0,0 +1,103 @@ +package terraform + +import "github.com/hashicorp/terraform/dag" + +// TargetsTransformer is a GraphTransformer that, when the user specifies a +// list of resources to target, limits the graph to only those resources and +// their dependencies. +type TargetsTransformer struct { + // List of targeted resource names specified by the user + Targets []string + + // Set to true when we're in a `terraform destroy` or a + // `terraform plan -destroy` + Destroy bool +} + +func (t *TargetsTransformer) Transform(g *Graph) error { + if len(t.Targets) > 0 { + // TODO: duplicated in OrphanTransformer; pull up parsing earlier + addrs, err := t.parseTargetAddresses() + if err != nil { + return err + } + + targetedNodes, err := t.selectTargetedNodes(g, addrs) + if err != nil { + return err + } + + for _, v := range g.Vertices() { + if targetedNodes.Include(v) { + } else { + g.Remove(v) + } + } + } + return nil +} + +func (t *TargetsTransformer) parseTargetAddresses() ([]ResourceAddress, error) { + addrs := make([]ResourceAddress, len(t.Targets)) + for i, target := range t.Targets { + ta, err := ParseResourceAddress(target) + if err != nil { + return nil, err + } + addrs[i] = *ta + } + return addrs, nil +} + +func (t *TargetsTransformer) selectTargetedNodes( + g *Graph, addrs []ResourceAddress) (*dag.Set, error) { + targetedNodes := new(dag.Set) + for _, v := range g.Vertices() { + // Keep all providers; they'll be pruned later if necessary + if r, ok := v.(GraphNodeProvider); ok { + targetedNodes.Add(r) + continue + } + + // For the remaining filter, we only care about addressable nodes + r, ok := v.(GraphNodeAddressable) + if !ok { + continue + } + + if t.nodeIsTarget(r, addrs) { + targetedNodes.Add(r) + // If the node would like to know about targets, tell it. + if n, ok := r.(GraphNodeTargetable); ok { + n.SetTargets(addrs) + } + + var deps *dag.Set + var err error + if t.Destroy { + deps, err = g.Descendents(r) + } else { + deps, err = g.Ancestors(r) + } + if err != nil { + return nil, err + } + + for _, d := range deps.List() { + targetedNodes.Add(d) + } + } + } + return targetedNodes, nil +} + +func (t *TargetsTransformer) nodeIsTarget( + r GraphNodeAddressable, addrs []ResourceAddress) bool { + addr := r.ResourceAddress() + for _, targetAddr := range addrs { + if targetAddr.Equals(addr) { + return true + } + } + return false +} diff --git a/terraform/transform_targets_test.go b/terraform/transform_targets_test.go new file mode 100644 index 000000000..2daa72827 --- /dev/null +++ b/terraform/transform_targets_test.go @@ -0,0 +1,71 @@ +package terraform + +import ( + "strings" + "testing" +) + +func TestTargetsTransformer(t *testing.T) { + mod := testModule(t, "transform-targets-basic") + + g := Graph{Path: RootModulePath} + { + tf := &ConfigTransformer{Module: mod} + if err := tf.Transform(&g); err != nil { + t.Fatalf("err: %s", err) + } + } + + { + transform := &TargetsTransformer{Targets: []string{"aws_instance.me"}} + if err := transform.Transform(&g); err != nil { + t.Fatalf("err: %s", err) + } + } + + actual := strings.TrimSpace(g.String()) + expected := strings.TrimSpace(` +aws_instance.me + aws_subnet.me +aws_subnet.me + aws_vpc.me +aws_vpc.me + `) + if actual != expected { + t.Fatalf("bad:\n\nexpected:\n%s\n\ngot:\n%s\n", expected, actual) + } +} + +func TestTargetsTransformer_destroy(t *testing.T) { + mod := testModule(t, "transform-targets-destroy") + + g := Graph{Path: RootModulePath} + { + tf := &ConfigTransformer{Module: mod} + if err := tf.Transform(&g); err != nil { + t.Fatalf("err: %s", err) + } + } + + { + transform := &TargetsTransformer{ + Targets: []string{"aws_instance.me"}, + Destroy: true, + } + if err := transform.Transform(&g); err != nil { + t.Fatalf("err: %s", err) + } + } + + actual := strings.TrimSpace(g.String()) + expected := strings.TrimSpace(` +aws_elb.me + aws_instance.me +aws_instance.me +aws_instance.metoo + aws_instance.me + `) + if actual != expected { + t.Fatalf("bad:\n\nexpected:\n%s\n\ngot:\n%s\n", expected, actual) + } +} diff --git a/website/source/assets/stylesheets/_docs.scss b/website/source/assets/stylesheets/_docs.scss index f144f813a..27cbba873 100755 --- a/website/source/assets/stylesheets/_docs.scss +++ b/website/source/assets/stylesheets/_docs.scss @@ -10,6 +10,7 @@ body.layout-atlas, body.layout-consul, body.layout-dnsimple, body.layout-dme, +body.layout-docker, body.layout-cloudflare, body.layout-cloudstack, body.layout-google, diff --git a/website/source/docs/commands/apply.html.markdown b/website/source/docs/commands/apply.html.markdown index c7e2c27da..9bb5acdbf 100644 --- a/website/source/docs/commands/apply.html.markdown +++ b/website/source/docs/commands/apply.html.markdown @@ -44,6 +44,11 @@ The command-line flags are all optional. The list of available flags are: * `-state-out=path` - Path to write updated state file. By default, the `-state` path will be used. +* `-target=resource` - A [Resource + Address](/docs/internals/resource-addressing.html) to target. Operation will + be limited to this resource and its dependencies. This flag can be used + multiple times. + * `-var 'foo=bar'` - Set a variable in the Terraform configuration. This flag can be set multiple times. diff --git a/website/source/docs/commands/destroy.html.markdown b/website/source/docs/commands/destroy.html.markdown index 4ea84f880..0a0f3a738 100644 --- a/website/source/docs/commands/destroy.html.markdown +++ b/website/source/docs/commands/destroy.html.markdown @@ -21,3 +21,9 @@ confirmation before destroying. This command accepts all the flags that the [apply command](/docs/commands/apply.html) accepts. If `-force` is set, then the destroy confirmation will not be shown. + +The `-target` flag, instead of affecting "dependencies" will instead also +destroy any resources that _depend on_ the target(s) specified. + +The behavior of any `terraform destroy` command can be previewed at any time +with an equivalent `terraform plan -destroy` command. diff --git a/website/source/docs/commands/plan.html.markdown b/website/source/docs/commands/plan.html.markdown index 14c10c5da..e05c460ce 100644 --- a/website/source/docs/commands/plan.html.markdown +++ b/website/source/docs/commands/plan.html.markdown @@ -45,6 +45,11 @@ The command-line flags are all optional. The list of available flags are: * `-state=path` - Path to the state file. Defaults to "terraform.tfstate". +* `-target=resource` - A [Resource + Address](/docs/internals/resource-addressing.html) to target. Operation will + be limited to this resource and its dependencies. This flag can be used + multiple times. + * `-var 'foo=bar'` - Set a variable in the Terraform configuration. This flag can be set multiple times. diff --git a/website/source/docs/commands/refresh.html.markdown b/website/source/docs/commands/refresh.html.markdown index cc797ca38..0fc3fc938 100644 --- a/website/source/docs/commands/refresh.html.markdown +++ b/website/source/docs/commands/refresh.html.markdown @@ -36,6 +36,11 @@ The command-line flags are all optional. The list of available flags are: * `-state-out=path` - Path to write updated state file. By default, the `-state` path will be used. +* `-target=resource` - A [Resource + Address](/docs/internals/resource-addressing.html) to target. Operation will + be limited to this resource and its dependencies. This flag can be used + multiple times. + * `-var 'foo=bar'` - Set a variable in the Terraform configuration. This flag can be set multiple times. diff --git a/website/source/docs/internals/resource-addressing.html.markdown b/website/source/docs/internals/resource-addressing.html.markdown new file mode 100644 index 000000000..b4b994a88 --- /dev/null +++ b/website/source/docs/internals/resource-addressing.html.markdown @@ -0,0 +1,57 @@ +--- +layout: "docs" +page_title: "Internals: Resource Address" +sidebar_current: "docs-internals-resource-addressing" +description: |- + Resource addressing is used to target specific resources in a larger + infrastructure. +--- + +# Resource Addressing + +A __Resource Address__ is a string that references a specific resource in a +larger infrastructure. The syntax of a resource address is: + +``` +.[optional fields] +``` + +Required fields: + + * `resource_type` - Type of the resource being addressed. + * `resource_name` - User-defined name of the resource. + +Optional fields may include: + + * `[N]` - where `N` is a `0`-based index into a resource with multiple + instances specified by the `count` meta-parameter. Omitting an index when + addressing a resource where `count > 1` means that the address references + all instances. + + +## Examples + +Given a Terraform config that includes: + +``` +resource "aws_instance" "web" { + # ... + count = 4 +} +``` + +An address like this: + + +``` +aws_instance.web[3] +``` + +Refers to only the last instance in the config, and an address like this: + +``` +aws_instance.web +``` + + +Refers to all four "web" instances. diff --git a/website/source/docs/providers/docker/index.html.markdown b/website/source/docs/providers/docker/index.html.markdown new file mode 100644 index 000000000..9a1f4abe1 --- /dev/null +++ b/website/source/docs/providers/docker/index.html.markdown @@ -0,0 +1,53 @@ +--- +layout: "docker" +page_title: "Provider: Docker" +sidebar_current: "docs-docker-index" +description: |- + The Docker provider is used to interact with Docker containers and images. +--- + +# Docker Provider + +The Docker provider is used to interact with Docker containers and images. +It uses the Docker API to manage the lifecycle of Docker containers. Because +the Docker provider uses the Docker API, it is immediatel compatible not +only with single server Docker but Swarm and any additional Docker-compatible +API hosts. + +Use the navigation to the left to read about the available resources. + +
+Note: The Docker provider is new as of Terraform 0.4. +It is ready to be used but many features are still being added. If there +is a Docker feature missing, please report it in the GitHub repo. +
+ +## Example Usage + +``` +# Configure the Docker provider +provider "docker" { + host = "tcp://127.0.0.1:1234/" +} + +# Create a container +resource "docker_container" "foo" { + image = "${docker_image.ubuntu.latest}" + name = "foo" +} + +resource "docker_image" "ubuntu" { + name = "ubuntu:latest" +} +``` + +## Argument Reference + +The following arguments are supported: + +* `host` - (Required) This is the address to the Docker host. If this is + blank, the `DOCKER_HOST` environment variable will also be read. + +* `cert_path` - (Optional) Path to a directory with certificate information + for connecting to the Docker host via TLS. If this is blank, the + `DOCKER_CERT_PATH` will also be checked. diff --git a/website/source/docs/providers/docker/r/container.html.markdown b/website/source/docs/providers/docker/r/container.html.markdown new file mode 100644 index 000000000..418e35fc1 --- /dev/null +++ b/website/source/docs/providers/docker/r/container.html.markdown @@ -0,0 +1,77 @@ +--- +layout: "docker" +page_title: "Docker: docker_container" +sidebar_current: "docs-docker-resource-container" +description: |- + Manages the lifecycle of a Docker container. +--- + +# docker\_container + +Manages the lifecycle of a Docker container. + +## Example Usage + +``` +# Start a container +resource "docker_container" "ubuntu" { + name = "foo" + image = "${docker_image.ubuntu.latest}" +} + +# Find the latest Ubuntu precise image. +resource "docker_image" "ubuntu" { + image = "ubuntu:precise" +} +``` + +## Argument Reference + +The following arguments are supported: + +* `name` - (Required, string) The name of the Docker container. +* `image` - (Required, string) The ID of the image to back this container. + The easiest way to get this value is to use the `docker_image` resource + as is shown in the example above. + +* `command` - (Optional, list of strings) The command to use to start the + container. +* `dns` - (Optional, set of strings) Set of DNS servers. +* `env` - (Optional, set of strings) Environmental variables to set. +* `hostname` - (Optional, string) Hostname of the container. +* `domainname` - (Optional, string) Domain name of the container. +* `must_run` - (Optional, bool) If true, then the Docker container will be + kept running. If false, then as long as the container exists, Terraform + assumes it is successful. +* `ports` - (Optional) See [Ports](#ports) below for details. +* `publish_all_ports` - (Optional, bool) Publish all ports of the container. +* `volumes` - (Optional) See [Volumes](#volumes) below for details. + + +## Ports + +`ports` is a block within the configuration that can be repeated to specify +the port mappings of the container. Each `ports` block supports +the following: + +* `internal` - (Required, int) Port within the container. +* `external` - (Required, int) Port exposed out of the container. +* `ip` - (Optional, string) IP address/mask that can access this port. +* `protocol` - (Optional, string) Protocol that can be used over this port, + defaults to TCP. + + +## Volumes + +`volumes` is a block within the configuration that can be repeated to specify +the volumes attached to a container. Each `volumes` block supports +the following: + +* `from_container` - (Optional, string) The container where the volume is + coming from. +* `container_path` - (Optional, string) The path in the container where the + volume will be mounted. +* `host_path` - (Optional, string) The path on the host where the volume + is coming from. +* `read_only` - (Optinal, bool) If true, this volume will be readonly. + Defaults to false. diff --git a/website/source/docs/providers/docker/r/image.html.markdown b/website/source/docs/providers/docker/r/image.html.markdown new file mode 100644 index 000000000..a2c896110 --- /dev/null +++ b/website/source/docs/providers/docker/r/image.html.markdown @@ -0,0 +1,41 @@ +--- +layout: "docker" +page_title: "Docker: docker_image" +sidebar_current: "docs-docker-resource-image" +description: |- + Downloads and exports the ID of a Docker image. +--- + +# docker\_image + +Downloads and exports the ID of a Docker image. This can be used alongside +[docker\_container](/docs/providers/docker/r/container.html) +to programmatically get the latest image IDs without having to hardcode +them. + +## Example Usage + +``` +# Find the latest Ubuntu precise image. +resource "docker_image" "ubuntu" { + image = "ubuntu:precise" +} + +# Access it somewhere else with ${docker_image.ubuntu.latest} +``` + +## Argument Reference + +The following arguments are supported: + +* `name` - (Required) The name of the Docker image, including any tags. +* `keep_updated` - (Optional) If true, then the Docker image will always + be updated on the host to the latest. If this is false, as long as an + image is downloaded with the correct tag, it won't be redownloaded if + there is a newer image. + +## Attributes Reference + +The following attributes are exported in addition to the above configuration: + +* `latest` (string) - The ID of the image. diff --git a/website/source/docs/providers/heroku/index.html.markdown b/website/source/docs/providers/heroku/index.html.markdown index b04fd001c..696a41963 100644 --- a/website/source/docs/providers/heroku/index.html.markdown +++ b/website/source/docs/providers/heroku/index.html.markdown @@ -33,6 +33,8 @@ resource "heroku_app" "default" { The following arguments are supported: -* `api_key` - (Required) Heroku API token -* `email` - (Required) Email to be notified by Heroku +* `api_key` - (Required) Heroku API token. It must be provided, but it can also + be sourced from the `HEROKU_API_KEY` environment variable. +* `email` - (Required) Email to be notified by Heroku. It must be provided, but + it can also be sourced from the `HEROKU_EMAIL` environment variable. diff --git a/website/source/docs/providers/heroku/r/addon.html.markdown b/website/source/docs/providers/heroku/r/addon.html.markdown index d39cb1e8b..f9907597a 100644 --- a/website/source/docs/providers/heroku/r/addon.html.markdown +++ b/website/source/docs/providers/heroku/r/addon.html.markdown @@ -19,6 +19,12 @@ resource "heroku_app" "default" { name = "test-app" } +# Create a database, and configure the app to use it +resource "heroku_addon" "database" { + app = "${heroku_app.default.name}" + plan = "heroku-postgresql:hobby-basic" +} + # Add a web-hook addon for the app resource "heroku_addon" "webhook" { app = "${heroku_app.default.name}" diff --git a/website/source/docs/providers/heroku/r/app.html.markdown b/website/source/docs/providers/heroku/r/app.html.markdown index d05bd2fb0..9e51d62f2 100644 --- a/website/source/docs/providers/heroku/r/app.html.markdown +++ b/website/source/docs/providers/heroku/r/app.html.markdown @@ -17,6 +17,7 @@ create and manage applications on Heroku. # Create a new Heroku app resource "heroku_app" "default" { name = "my-cool-app" + region = "us" config_vars { FOOBAR = "baz" diff --git a/website/source/docs/providers/index.html.markdown b/website/source/docs/providers/index.html.markdown index f03c17c54..5365d0e86 100644 --- a/website/source/docs/providers/index.html.markdown +++ b/website/source/docs/providers/index.html.markdown @@ -14,7 +14,7 @@ etc. Almost any infrastructure noun can be represented as a resource in Terrafor Terraform is agnostic to the underlying platforms by supporting providers. A provider is responsible for understanding API interactions and exposing resources. Providers -generally are an IaaS (e.g. AWS, DigitalOcean, GCE), PaaS (e.g. Heroku, CloudFoundry), +generally are an IaaS (e.g. AWS, DigitalOcean, GCE, OpenStack), PaaS (e.g. Heroku, CloudFoundry), or SaaS services (e.g. Atlas, DNSimple, CloudFlare). Use the navigation to the left to read about the available providers. diff --git a/website/source/docs/providers/openstack/index.html.markdown b/website/source/docs/providers/openstack/index.html.markdown new file mode 100644 index 000000000..da6d1fb79 --- /dev/null +++ b/website/source/docs/providers/openstack/index.html.markdown @@ -0,0 +1,54 @@ +--- +layout: "openstack" +page_title: "Provider: OpenStack" +sidebar_current: "docs-openstack-index" +description: |- + The OpenStack provider is used to interact with the many resources supported by OpenStack. The provider needs to be configured with the proper credentials before it can be used. +--- + +# OpenStack Provider + +The OpenStack provider is used to interact with the +many resources supported by OpenStack. The provider needs to be configured +with the proper credentials before it can be used. + +Use the navigation to the left to read about the available resources. + +## Example Usage + +``` +# Configure the OpenStack Provider +provider "openstack" { + user_name = "admin" + tenant_name = "admin" + password = "pwd" + auth_url = "http://myauthurl:5000/v2.0" +} + +# Create a web server +resource "openstack_compute_instance_v2" "test-server" { + ... +} +``` + +## Configuration Reference + +The following arguments are supported: + +* `auth_url` - (Required) + +* `user_name` - (Optional; Required for Identity V2) + +* `user_id` - (Optional) + +* `password` - (Optional; Required if not using `api_key`) + +* `api_key` - (Optional; Required if not using `password`) + +* `domain_id` - (Optional) + +* `domain_name` - (Optional) + +* `tenant_id` - (Optional) + +* `tenant_name` - (Optional) diff --git a/website/source/docs/providers/openstack/r/blockstorage_volume_v1.html.markdown b/website/source/docs/providers/openstack/r/blockstorage_volume_v1.html.markdown new file mode 100644 index 000000000..0b91eb11c --- /dev/null +++ b/website/source/docs/providers/openstack/r/blockstorage_volume_v1.html.markdown @@ -0,0 +1,68 @@ +--- +layout: "openstack" +page_title: "OpenStack: openstack_blockstorage_volume_v1" +sidebar_current: "docs-openstack-resource-blockstorage-volume-v1" +description: |- +Manages a V1 volume resource within OpenStack. +--- + +# openstack\_blockstorage\_volume_v1 + +Manages a V1 volume resource within OpenStack. + +## Example Usage + +``` +resource "openstack_blockstorage_volume_v1" "volume_1" { + region = "RegionOne" + name = "tf-test-volume" + description = "first test volume" + size = 3 +} +``` + +## Argument Reference + +The following arguments are supported: + +* `region` - (Required) The region in which to create the volume. If + omitted, the `OS_REGION_NAME` environment variable is used. Changing this + creates a new volume. + +* `size` - (Required) The size of the volume to create (in gigabytes). Changing + this creates a new volume. + +* `name` - (Optional) A unique name for the volume. Changing this updates the + volume's name. + +* `description` - (Optional) A description of the volume. Changing this updates + the volume's description. + +* `image_id` - (Optional) The image ID from which to create the volume. + Changing this creates a new volume. + +* `snapshot_id` - (Optional) The snapshot ID from which to create the volume. + Changing this creates a new volume. + +* `source_vol_id` - (Optional) The volume ID from which to create the volume. + Changing this creates a new volume. + +* `metadata` - (Optional) Metadata key/value pairs to associate with the volume. + Changing this updates the existing volume metadata. + +* `volume_type` - (Optional) The type of volume to create (either SATA or SSD). + Changing this creates a new volume. + +## Attributes Reference + +The following attributes are exported: + +* `region` - See Argument Reference above. +* `size` - See Argument Reference above. +* `name` - See Argument Reference above. +* `description` - See Argument Reference above. +* `image_id` - See Argument Reference above. +* `source_vol_id` - See Argument Reference above. +* `snapshot_id` - See Argument Reference above. +* `metadata` - See Argument Reference above. +* `volume_type` - See Argument Reference above. diff --git a/website/source/docs/providers/openstack/r/compute_instance_v2.html.markdown b/website/source/docs/providers/openstack/r/compute_instance_v2.html.markdown new file mode 100644 index 000000000..36805ed0d --- /dev/null +++ b/website/source/docs/providers/openstack/r/compute_instance_v2.html.markdown @@ -0,0 +1,120 @@ +--- +layout: "openstack" +page_title: "OpenStack: openstack_compute_instance_v2" +sidebar_current: "docs-openstack-resource-compute-instance-v2" +description: |- + Manages a V2 VM instance resource within OpenStack. +--- + +# openstack\_compute\_instance_v2 + +Manages a V2 VM instance resource within OpenStack. + +## Example Usage + +``` +resource "openstack_compute_instance_v2" "test-server" { + name = "tf-test" + image_id = "ad091b52-742f-469e-8f3c-fd81cadf0743" + flavor_id = "3" + metadata { + this = "that" + } + key_pair = "my_key_pair_name" + security_groups = ["test-group-1"] +} +``` + +## Argument Reference + +The following arguments are supported: + +* `region` - (Required) The region in which to create the server instance. If + omitted, the `OS_REGION_NAME` environment variable is used. Changing this + creates a new server. + +* `name` - (Required) A unique name for the resource. + +* `image_id` - (Optional; Required if `image_name` is empty) The image ID of + the desired image for the server. Changing this creates a new server. + +* `image_name` - (Optional; Required if `image_id` is empty) The name of the + desired image for the server. Changing this creates a new server. + +* `flavor_id` - (Optional; Required if `flavor_name` is empty) The flavor ID of + the desired flavor for the server. Changing this resizes the existing server. + +* `flavor_name` - (Optional; Required if `flavor_id` is empty) The name of the + desired flavor for the server. Changing this resizes the existing server. + +* `security_groups` - (Optional) An array of one or more security group names + to associate with the server. Changing this results in adding/removing + security groups from the existing server. + +* `availability_zone` - (Optional) The availability zone in which to create + the server. Changing this creates a new server. + +* `network` - (Optional) An array of one or more networks to attach to the + instance. The network object structure is documented below. Changing this + creates a new server. + +* `metadata` - (Optional) Metadata key/value pairs to make available from + within the instance. Changing this updates the existing server metadata. + +* `admin_pass` - (Optional) The administrative password to assign to the server. + Changing this changes the root password on the existing server. + +* `key_pair` - (Optional) The name of a key pair to put on the server. The key + pair must already be created and associated with the tenant's account. + Changing this creates a new server. + +* `block_device` - (Optional) The object for booting by volume. The block_device + object structure is documented below. Changing this creates a new server. + +* `volume` - (Optional) Attach an existing volume to the instance. The volume + structure is described below. + +The `network` block supports: + +* `uuid` - (Required unless `port` is provided) The network UUID to attach to + the server. + +* `port` - (Required unless `uuid` is provided) The port UUID of a network to + attach to the server. + +* `fixed_ip` - (Optional) Specifies a fixed IP address to be used on this + network. + +The `block_device` block supports: + +* `uuid` - (Required) The UUID of the image, volume, or snapshot. + +* `source_type` - (Required) The source type of the device. Must be one of + "image", "volume", or "snapshot". + +* `volume_size` - (Optional) The size of the volume to create (in gigabytes). + +* `boot_index` - (Optional) The boot index of the volume. It defaults to 0. + +* `destination_type` - (Optional) The type that gets created. Possible values + are "volume" and "local". + +The `volume` block supports: + +* `volume_id` - (Required) The UUID of the volume to attach. + +* `device` - (Optional) The device that the volume will be attached as. For + example: `/dev/vdc`. Omit this option to allow the volume to be + auto-assigned a device. + +## Attributes Reference + +The following attributes are exported: + +* `region` - See Argument Reference above. +* `name` - See Argument Reference above. +* `access_ip_v4` - See Argument Reference above. +* `access_ip_v6` - See Argument Reference above. +* `metadata` - See Argument Reference above. +* `security_groups` - See Argument Reference above. +* `flavor_ref` - See Argument Reference above. diff --git a/website/source/docs/providers/openstack/r/compute_keypair_v2.html.markdown b/website/source/docs/providers/openstack/r/compute_keypair_v2.html.markdown new file mode 100644 index 000000000..0c3beae27 --- /dev/null +++ b/website/source/docs/providers/openstack/r/compute_keypair_v2.html.markdown @@ -0,0 +1,43 @@ +--- +layout: "openstack" +page_title: "OpenStack: openstack_compute_keypair_v2" +sidebar_current: "docs-openstack-resource-compute-keypair-v2" +description: |- + Manages a V2 keypair resource within OpenStack. +--- + +# openstack\_compute\_keypair_v2 + +Manages a V2 keypair resource within OpenStack. + +## Example Usage + +``` +resource "openstack_compute_keypair_v2" "test-keypair" { + name = "my-keypair" + public_key = "ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAABAQDAjpC1hwiOCCmKEWxJ4qzTTsJbKzndLotBCz5PcwtUnflmU+gHJtWMZKpuEGVi29h0A/+ydKek1O18k10Ff+4tyFjiHDQAnOfgWf7+b1yK+qDip3X1C0UPMbwHlTfSGWLGZqd9LvEFx9k3h/M+VtMvwR1lJ9LUyTAImnNjWG7TaIPmui30HvM2UiFEmqkr4ijq45MyX2+fLIePLRIF61p4whjHAQYufqyno3BS48icQb4p6iVEZPo4AE2o9oIyQvj2mx4dk5Y8CgSETOZTYDOR3rU2fZTRDRgPJDH9FWvQjF5tA0p3d9CoWWd2s6GKKbfoUIi8R/Db1BSPJwkqB" +} +``` + +## Argument Reference + +The following arguments are supported: + +* `region` - (Required) The region in which to obtain the V2 Compute client. + Keypairs are associated with accounts, but a Compute client is needed to + create one. If omitted, the `OS_REGION_NAME` environment variable is used. + Changing this creates a new keypair. + +* `name` - (Required) A unique name for the keypair. Changing this creates a new + keypair. + +* `public_key` - (Required) A pregenerated OpenSSH-formatted public key. + Changing this creates a new keypair. + +## Attributes Reference + +The following attributes are exported: + +* `region` - See Argument Reference above. +* `name` - See Argument Reference above. +* `public_key` - See Argument Reference above. diff --git a/website/source/docs/providers/openstack/r/compute_secgroup_v2.html.markdown b/website/source/docs/providers/openstack/r/compute_secgroup_v2.html.markdown new file mode 100644 index 000000000..5b9538793 --- /dev/null +++ b/website/source/docs/providers/openstack/r/compute_secgroup_v2.html.markdown @@ -0,0 +1,76 @@ +--- +layout: "openstack" +page_title: "OpenStack: openstack_compute_secgroup_v2" +sidebar_current: "docs-openstack-resource-compute-secgroup-2" +description: |- + Manages a V2 security group resource within OpenStack. +--- + +# openstack\_compute\_secgroup_v2 + +Manages a V2 security group resource within OpenStack. + +## Example Usage + +``` +resource "openstack_compute_secgroup_v2" "secgroup_1" { + name = "my_secgroup" + description = "my security group" + rule { + from_port = 22 + to_port = 22 + ip_protocol = "tcp" + cidr = "0.0.0.0/0" + } +} +``` + +## Argument Reference + +The following arguments are supported: + +* `region` - (Required) The region in which to obtain the V2 Compute client. + A Compute client is needed to create a security group. If omitted, the + `OS_REGION_NAME` environment variable is used. Changing this creates a new + security group. + +* `name` - (Required) A unique name for the security group. Changing this + updates the `name` of an existing security group. + +* `description` - (Required) A description for the security group. Changing this + updates the `description` of an existing security group. + +* `rule` - (Optional) A rule describing how the security group operates. The + rule object structure is documented below. Changing this updates the + security group rules. + +The `rule` block supports: + +* `from_port` - (Required) An integer representing the lower bound of the port +range to open. Changing this creates a new security group rule. + +* `to_port` - (Required) An integer representing the upper bound of the port +range to open. Changing this creates a new security group rule. + +* `ip_protocol` - (Required) The protocol type that will be allowed. Changing +this creates a new security group rule. + +* `cidr` - (Optional) Required if `from_group_id` is empty. The IP range that +will be the source of network traffic to the security group. Use 0.0.0.0./0 +to allow all IP addresses. Changing this creates a new security group rule. + +* `from_group_id` - (Optional) Required if `cidr` is empty. The ID of a group +from which to forward traffic to the parent group. Changing +this creates a new security group rule. + +* `self` - (Optional) Required if `cidr` and `from_group_id` is empty. If true, +the security group itself will be added as a source to this ingress rule. + +## Attributes Reference + +The following attributes are exported: + +* `region` - See Argument Reference above. +* `name` - See Argument Reference above. +* `description` - See Argument Reference above. +* `rule` - See Argument Reference above. diff --git a/website/source/docs/providers/openstack/r/lb_monitor_v1.html.markdown b/website/source/docs/providers/openstack/r/lb_monitor_v1.html.markdown new file mode 100644 index 000000000..cbf6b2b87 --- /dev/null +++ b/website/source/docs/providers/openstack/r/lb_monitor_v1.html.markdown @@ -0,0 +1,82 @@ +--- +layout: "openstack" +page_title: "OpenStack: openstack_lb_monitor_v1" +sidebar_current: "docs-openstack-resource-lb-monitor-v1" +description: |- + Manages a V1 load balancer monitor resource within OpenStack. +--- + +# openstack\_lb\_monitor_v1 + +Manages a V1 load balancer monitor resource within OpenStack. + +## Example Usage + +``` +resource "openstack_lb_monitor_v1" "monitor_1" { + type = "PING" + delay = 30 + timeout = 5 + max_retries = 3 + admin_state_up = "true" +} +``` + +## Argument Reference + +The following arguments are supported: + +* `region` - (Required) The region in which to obtain the V2 Networking client. + A Networking client is needed to create an LB monitor. If omitted, the + `OS_REGION_NAME` environment variable is used. Changing this creates a new + LB monitor. + +* `type` - (Required) The type of probe, which is PING, TCP, HTTP, or HTTPS, + that is sent by the monitor to verify the member state. Changing this + creates a new monitor. + +* `delay` - (Required) The time, in seconds, between sending probes to members. + Changing this creates a new monitor. + +* `timeout` - (Required) Maximum number of seconds for a monitor to wait for a + ping reply before it times out. The value must be less than the delay value. + Changing this updates the timeout of the existing monitor. + +* `max_retries` - (Required) Number of permissible ping failures before changing + the member's status to INACTIVE. Must be a number between 1 and 10. Changing + this updates the max_retries of the existing monitor. + +* `url_path` - (Optional) Required for HTTP(S) types. URI path that will be + accessed if monitor type is HTTP or HTTPS. Changing this updates the + url_path of the existing monitor. + +* `http_method` - (Optional) Required for HTTP(S) types. The HTTP method used + for requests by the monitor. If this attribute is not specified, it defaults + to "GET". Changing this updates the http_method of the existing monitor. + +* `expected_codes` - (Optional) equired for HTTP(S) types. Expected HTTP codes + for a passing HTTP(S) monitor. You can either specify a single status like + "200", or a range like "200-202". Changing this updates the expected_codes + of the existing monitor. + +* `admin_state_up` - (Optional) The administrative state of the monitor. + Acceptable values are "true" and "false". Changing this value updates the + state of the existing monitor. + +* `tenant_id` - (Optional) The owner of the monitor. Required if admin wants to + create a monitor for another tenant. Changing this creates a new monitor. + +## Attributes Reference + +The following attributes are exported: + +* `region` - See Argument Reference above. +* `type` - See Argument Reference above. +* `delay` - See Argument Reference above. +* `timeout` - See Argument Reference above. +* `max_retries` - See Argument Reference above. +* `url_path` - See Argument Reference above. +* `http_method` - See Argument Reference above. +* `expected_codes` - See Argument Reference above. +* `admin_state_up` - See Argument Reference above. +* `tenant_id` - See Argument Reference above. diff --git a/website/source/docs/providers/openstack/r/lb_pool_v1.html.markdown b/website/source/docs/providers/openstack/r/lb_pool_v1.html.markdown new file mode 100644 index 000000000..5ddbdf1af --- /dev/null +++ b/website/source/docs/providers/openstack/r/lb_pool_v1.html.markdown @@ -0,0 +1,90 @@ +--- +layout: "openstack" +page_title: "OpenStack: openstack_lb_pool_v1" +sidebar_current: "docs-openstack-resource-lb-pool-v1" +description: |- + Manages a V1 load balancer pool resource within OpenStack. +--- + +# openstack\_lb\_pool_v1 + +Manages a V1 load balancer pool resource within OpenStack. + +## Example Usage + +``` +resource "openstack_lb_pool_v1" "pool_1" { + name = "tf_test_lb_pool" + protocol = "HTTP" + subnet_id = "12345" + lb_method = "ROUND_ROBIN" + monitor_ids = ["67890"] + member { + address = "192.168.0.1" + port = 80 + admin_state_up = "true" + } +} +``` + +## Argument Reference + +The following arguments are supported: + +* `region` - (Required) The region in which to obtain the V2 Networking client. + A Networking client is needed to create an LB pool. If omitted, the + `OS_REGION_NAME` environment variable is used. Changing this creates a new + LB pool. + +* `name` - (Required) The name of the pool. Changing this updates the name of + the existing pool. + +* `protocol` - (Required) The protocol used by the pool members, you can use + either 'TCP, 'HTTP', or 'HTTPS'. Changing this creates a new pool. + +* `subnet_id` - (Required) The network on which the members of the pool will be + located. Only members that are on this network can be added to the pool. + Changing this creates a new pool. + +* `lb_method` - (Required) The algorithm used to distribute load between the + members of the pool. The current specification supports 'ROUND_ROBIN' and + 'LEAST_CONNECTIONS' as valid values for this attribute. + +* `tenant_id` - (Optional) The owner of the pool. Required if admin wants to + create a pool member for another tenant. Changing this creates a new pool. + +* `monitor_ids` - (Optional) A list of IDs of monitors to associate with the + pool. + +* `member` - (Optional) An existing node to add to the pool. Changing this + updates the members of the pool. The member object structure is documented + below. + +The `member` block supports: + +* `address` - (Required) The IP address of the member. Changing this creates a +new member. + +* `port` - (Required) An integer representing the port on which the member is +hosted. Changing this creates a new member. + +* `admin_state_up` - (Optional) The administrative state of the member. +Acceptable values are 'true' and 'false'. Changing this value updates the +state of the existing member. + +* `tenant_id` - (Optional) The owner of the member. Required if admin wants to +create a pool member for another tenant. Changing this creates a new member. + + +## Attributes Reference + +The following attributes are exported: + +* `region` - See Argument Reference above. +* `name` - See Argument Reference above. +* `protocol` - See Argument Reference above. +* `subnet_id` - See Argument Reference above. +* `lb_method` - See Argument Reference above. +* `tenant_id` - See Argument Reference above. +* `monitor_id` - See Argument Reference above. +* `member` - See Argument Reference above. diff --git a/website/source/docs/providers/openstack/r/lb_vip_v1.html.markdown b/website/source/docs/providers/openstack/r/lb_vip_v1.html.markdown new file mode 100644 index 000000000..7a9bc3d4b --- /dev/null +++ b/website/source/docs/providers/openstack/r/lb_vip_v1.html.markdown @@ -0,0 +1,95 @@ +--- +layout: "openstack" +page_title: "OpenStack: openstack_lb_vip_v1" +sidebar_current: "docs-openstack-resource-lb-vip-v1" +description: |- + Manages a V1 load balancer vip resource within OpenStack. +--- + +# openstack\_lb\_vip_v1 + +Manages a V1 load balancer vip resource within OpenStack. + +## Example Usage + +``` +resource "openstack_lb_vip_v1" "vip_1" { + name = "tf_test_lb_vip" + subnet_id = "12345" + protocol = "HTTP" + port = 80 + pool_id = "67890" +} +``` + +## Argument Reference + +The following arguments are supported: + +* `region` - (Required) The region in which to obtain the V2 Networking client. + A Networking client is needed to create a VIP. If omitted, the + `OS_REGION_NAME` environment variable is used. Changing this creates a new + VIP. + +* `name` - (Required) The name of the vip. Changing this updates the name of + the existing vip. + +* `subnet_id` - (Required) The network on which to allocate the vip's address. A + tenant can only create vips on networks authorized by policy (e.g. networks + that belong to them or networks that are shared). Changing this creates a + new vip. + +* `protocol` - (Required) The protocol - can be either 'TCP, 'HTTP', or + HTTPS'. Changing this creates a new vip. + +* `port` - (Required) The port on which to listen for client traffic. Changing + this creates a new vip. + +* `pool_id` - (Required) The ID of the pool with which the vip is associated. + Changing this updates the pool_id of the existing vip. + +* `tenant_id` - (Optional) The owner of the vip. Required if admin wants to + create a vip member for another tenant. Changing this creates a new vip. + +* `address` - (Optional) The IP address of the vip. Changing this creates a new + vip. + +* `description` - (Optional) Human-readable description for the vip. Changing + this updates the description of the existing vip. + +* `persistence` - (Optional) Omit this field to prevent session persistence. + The persistence object structure is documented below. Changing this updates + the persistence of the existing vip. + +* `conn_limit` - (Optional) The maximum number of connections allowed for the + vip. Default is -1, meaning no limit. Changing this updates the conn_limit + of the existing vip. + +* `admin_state_up` - (Optional) The administrative state of the vip. + Acceptable values are "true" and "false". Changing this value updates the + state of the existing vip. + +The `persistence` block supports: + +* `type` - (Required) The type of persistence mode. Valid values are "SOURCE_IP", + "HTTP_COOKIE", or "APP_COOKIE". + +* `cookie_name` - (Optional) The name of the cookie if persistence mode is set + appropriately. + +## Attributes Reference + +The following attributes are exported: + +* `region` - See Argument Reference above. +* `name` - See Argument Reference above. +* `subnet_id` - See Argument Reference above. +* `protocol` - See Argument Reference above. +* `port` - See Argument Reference above. +* `pool_id` - See Argument Reference above. +* `tenant_id` - See Argument Reference above. +* `address` - See Argument Reference above. +* `description` - See Argument Reference above. +* `persistence` - See Argument Reference above. +* `conn_limit` - See Argument Reference above. +* `admin_state_up` - See Argument Reference above. diff --git a/website/source/docs/providers/openstack/r/networking_network_v2.html.markdown b/website/source/docs/providers/openstack/r/networking_network_v2.html.markdown new file mode 100644 index 000000000..699cd1ae9 --- /dev/null +++ b/website/source/docs/providers/openstack/r/networking_network_v2.html.markdown @@ -0,0 +1,53 @@ +--- +layout: "openstack" +page_title: "OpenStack: openstack_networking_network_v2" +sidebar_current: "docs-openstack-resource-networking-network-v2" +description: |- + Manages a V2 Neutron network resource within OpenStack. +--- + +# openstack\_networking\_network_v2 + +Manages a V2 Neutron network resource within OpenStack. + +## Example Usage + +``` +resource "openstack_networking_network_v2" "network_1" { + name = "tf_test_network" + admin_state_up = "true" +} +``` + +## Argument Reference + +The following arguments are supported: + +* `region` - (Required) The region in which to obtain the V2 Networking client. + A Networking client is needed to create a Neutron network. If omitted, the + `OS_REGION_NAME` environment variable is used. Changing this creates a new + network. + +* `name` - (Optional) The name of the network. Changing this updates the name of + the existing network. + +* `shared` - (Optional) Specifies whether the network resource can be accessed + by any tenant or not. Changing this updates the sharing capabalities of the + existing network. + +* `tenant_id` - (Optional) The owner of the newtork. Required if admin wants to + create a network for another tenant. Changing this creates a new network. + +* `admin_state_up` - (Optional) The administrative state of the network. + Acceptable values are "true" and "false". Changing this value updates the + state of the existing network. + +## Attributes Reference + +The following attributes are exported: + +* `region` - See Argument Reference above. +* `name` - See Argument Reference above. +* `shared` - See Argument Reference above. +* `tenant_id` - See Argument Reference above. +* `admin_state_up` - See Argument Reference above. diff --git a/website/source/docs/providers/openstack/r/networking_subnet_v2.html.markdown b/website/source/docs/providers/openstack/r/networking_subnet_v2.html.markdown new file mode 100644 index 000000000..a8243a817 --- /dev/null +++ b/website/source/docs/providers/openstack/r/networking_subnet_v2.html.markdown @@ -0,0 +1,98 @@ +--- +layout: "openstack" +page_title: "OpenStack: openstack_networking_subnet_v2" +sidebar_current: "docs-openstack-resource-networking-subnet-v2" +description: |- + Manages a V2 Neutron subnet resource within OpenStack. +--- + +# openstack\_networking\_subnet_v2 + +Manages a V2 Neutron subnet resource within OpenStack. + +## Example Usage + +``` +resource "openstack_networking_network_v2" "network_1" { + name = "tf_test_network" + admin_state_up = "true" +} + +resource "openstack_networking_subnet_v2" "subnet_1" { + network_id = "${openstack_networking_network_v2.network_1.id}" + cidr = "192.168.199.0/24" + ip_version = 4 +} +``` + +## Argument Reference + +The following arguments are supported: + +* `region` - (Required) The region in which to obtain the V2 Networking client. + A Networking client is needed to create a Neutron subnet. If omitted, the + `OS_REGION_NAME` environment variable is used. Changing this creates a new + subnet. + +* `network_id` - (Required) The UUID of the parent network. Changing this + creates a new subnet. + +* `cidr` - (Required) CIDR representing IP range for this subnet, based on IP + version. Changing this creates a new subnet. + +* `ip_version` - (Required) IP version, either 4 or 6. Changing this creates a + new subnet. + +* `name` - (Optional) The name of the subnet. Changing this updates the name of + the existing subnet. + +* `tenant_id` - (Optional) The owner of the subnet. Required if admin wants to + create a subnet for another tenant. Changing this creates a new subnet. + +* `allocation_pools` - (Optional) An array of sub-ranges of CIDR available for + dynamic allocation to ports. The allocation_pool object structure is + documented below. Changing this creates a new subnet. + +* `gateway_ip` - (Optional) Default gateway used by devices in this subnet. + Changing this updates the gateway IP of the existing subnet. + +* `enable_dhcp` - (Optional) The administrative state of the network. + Acceptable values are "true" and "false". Changing this value enables or + disables the DHCP capabilities of the existing subnet. + +* `dns_nameservers` - (Optional) An array of DNS name server names used by hosts + in this subnet. Changing this updates the DNS name servers for the existing + subnet. + +* `host_routes` - (Optional) An array of routes that should be used by devices + with IPs from this subnet (not including local subnet route). The host_route + object structure is documented below. Changing this updates the host routes + for the existing subnet. + +The `allocation_pools` block supports: + +* `start` - (Required) The starting address. + +* `end` - (Required) The ending address. + +The `host_routes` block supports: + +* `destination_cidr` - (Required) The destination CIDR. + +* `next_hop` - (Required) The next hop in the route. + +## Attributes Reference + +The following attributes are exported: + +* `region` - See Argument Reference above. +* `network_id` - See Argument Reference above. +* `cidr` - See Argument Reference above. +* `ip_version` - See Argument Reference above. +* `name` - See Argument Reference above. +* `tenant_id` - See Argument Reference above. +* `allocation_pools` - See Argument Reference above. +* `gateway_ip` - See Argument Reference above. +* `enable_dhcp` - See Argument Reference above. +* `dns_nameservers` - See Argument Reference above. +* `host_routes` - See Argument Reference above. diff --git a/website/source/docs/providers/openstack/r/objectstorage_container_v1.html.markdown b/website/source/docs/providers/openstack/r/objectstorage_container_v1.html.markdown new file mode 100644 index 000000000..8101d1ca2 --- /dev/null +++ b/website/source/docs/providers/openstack/r/objectstorage_container_v1.html.markdown @@ -0,0 +1,68 @@ +--- +layout: "openstack" +page_title: "OpenStack: openstack_objectstorage_container_v1" +sidebar_current: "docs-openstack-resource-objectstorage-container-v1" +description: |- +Manages a V1 container resource within OpenStack. +--- + +# openstack\_objectstorage\_container_v1 + +Manages a V1 container resource within OpenStack. + +## Example Usage + +``` +resource "openstack_objectstorage_container_v1" "container_1" { + region = "RegionOne" + name = "tf-test-container-1" + metadata { + test = "true" + } + content_type = "application/json" +} +``` + +## Argument Reference + +The following arguments are supported: + +* `region` - (Required) The region in which to create the container. If + omitted, the `OS_REGION_NAME` environment variable is used. Changing this + creates a new container. + +* `name` - (Required) A unique name for the container. Changing this creates a + new container. + +* `container_read` - (Optional) Sets an access control list (ACL) that grants + read access. This header can contain a comma-delimited list of users that + can read the container (allows the GET method for all objects in the + container). Changing this updates the access control list read access. + +* `container_sync_to` - (Optional) The destination for container synchronization. + Changing this updates container synchronization. + +* `container_sync_key` - (Optional) The secret key for container synchronization. + Changing this updates container synchronization. + +* `container_write` - (Optional) Sets an ACL that grants write access. + Changing this updates the access control list write access. + +* `metadata` - (Optional) Custom key/value pairs to associate with the container. + Changing this updates the existing container metadata. + +* `content_type` - (Optional) The MIME type for the container. Changing this + updates the MIME type. + +## Attributes Reference + +The following attributes are exported: + +* `region` - See Argument Reference above. +* `name` - See Argument Reference above. +* `container_read` - See Argument Reference above. +* `container_sync_to` - See Argument Reference above. +* `container_sync_key` - See Argument Reference above. +* `container_write` - See Argument Reference above. +* `metadata` - See Argument Reference above. +* `content_type` - See Argument Reference above. diff --git a/website/source/layouts/docker.erb b/website/source/layouts/docker.erb new file mode 100644 index 000000000..920e7aa43 --- /dev/null +++ b/website/source/layouts/docker.erb @@ -0,0 +1,30 @@ +<% wrap_layout :inner do %> + <% content_for :sidebar do %> + + <% end %> + + <%= yield %> +<% end %> diff --git a/website/source/layouts/docs.erb b/website/source/layouts/docs.erb index a0b31127a..30b8c3253 100644 --- a/website/source/layouts/docs.erb +++ b/website/source/layouts/docs.erb @@ -130,7 +130,7 @@ > DigitalOcean - + > DNSMadeEasy @@ -140,6 +140,10 @@ DNSimple + > + Docker + + > Google Cloud @@ -215,6 +219,10 @@ > Resource Lifecycle + + > + Resource Addressing + diff --git a/website/source/layouts/openstack.erb b/website/source/layouts/openstack.erb new file mode 100644 index 000000000..22afb4aeb --- /dev/null +++ b/website/source/layouts/openstack.erb @@ -0,0 +1,53 @@ +<% wrap_layout :inner do %> + <% content_for :sidebar do %> + + <% end %> + + <%= yield %> + <% end %>