Merge pull request #1921 from jtopjian/openstack-servergroup-schedulerhints
provider/openstack: scheduler_hints and servergroups
This commit is contained in:
commit
ede6af8763
|
@ -73,6 +73,7 @@ func Provider() terraform.ResourceProvider {
|
|||
"openstack_compute_instance_v2": resourceComputeInstanceV2(),
|
||||
"openstack_compute_keypair_v2": resourceComputeKeypairV2(),
|
||||
"openstack_compute_secgroup_v2": resourceComputeSecGroupV2(),
|
||||
"openstack_compute_servergroup_v2": resourceComputeServerGroupV2(),
|
||||
"openstack_compute_floatingip_v2": resourceComputeFloatingIPV2(),
|
||||
"openstack_fw_firewall_v1": resourceFWFirewallV1(),
|
||||
"openstack_fw_policy_v1": resourceFWPolicyV1(),
|
||||
|
|
|
@ -15,6 +15,7 @@ import (
|
|||
"github.com/rackspace/gophercloud/openstack/compute/v2/extensions/bootfromvolume"
|
||||
"github.com/rackspace/gophercloud/openstack/compute/v2/extensions/floatingip"
|
||||
"github.com/rackspace/gophercloud/openstack/compute/v2/extensions/keypairs"
|
||||
"github.com/rackspace/gophercloud/openstack/compute/v2/extensions/schedulerhints"
|
||||
"github.com/rackspace/gophercloud/openstack/compute/v2/extensions/secgroups"
|
||||
"github.com/rackspace/gophercloud/openstack/compute/v2/extensions/tenantnetworks"
|
||||
"github.com/rackspace/gophercloud/openstack/compute/v2/extensions/volumeattach"
|
||||
|
@ -224,6 +225,48 @@ func resourceComputeInstanceV2() *schema.Resource {
|
|||
},
|
||||
Set: resourceComputeVolumeAttachmentHash,
|
||||
},
|
||||
"scheduler_hints": &schema.Schema{
|
||||
Type: schema.TypeSet,
|
||||
Optional: true,
|
||||
Elem: &schema.Resource{
|
||||
Schema: map[string]*schema.Schema{
|
||||
"group": &schema.Schema{
|
||||
Type: schema.TypeString,
|
||||
Optional: true,
|
||||
ForceNew: true,
|
||||
},
|
||||
"different_host": &schema.Schema{
|
||||
Type: schema.TypeList,
|
||||
Optional: true,
|
||||
ForceNew: true,
|
||||
Elem: &schema.Schema{Type: schema.TypeString},
|
||||
},
|
||||
"same_host": &schema.Schema{
|
||||
Type: schema.TypeList,
|
||||
Optional: true,
|
||||
ForceNew: true,
|
||||
Elem: &schema.Schema{Type: schema.TypeString},
|
||||
},
|
||||
"query": &schema.Schema{
|
||||
Type: schema.TypeList,
|
||||
Optional: true,
|
||||
ForceNew: true,
|
||||
Elem: &schema.Schema{Type: schema.TypeString},
|
||||
},
|
||||
"target_cell": &schema.Schema{
|
||||
Type: schema.TypeString,
|
||||
Optional: true,
|
||||
ForceNew: true,
|
||||
},
|
||||
"build_near_host_ip": &schema.Schema{
|
||||
Type: schema.TypeString,
|
||||
Optional: true,
|
||||
ForceNew: true,
|
||||
},
|
||||
},
|
||||
},
|
||||
Set: resourceComputeSchedulerHintsHash,
|
||||
},
|
||||
},
|
||||
}
|
||||
}
|
||||
|
@ -289,6 +332,16 @@ func resourceComputeInstanceV2Create(d *schema.ResourceData, meta interface{}) e
|
|||
}
|
||||
}
|
||||
|
||||
schedulerHintsRaw := d.Get("scheduler_hints").(*schema.Set).List()
|
||||
if len(schedulerHintsRaw) > 0 {
|
||||
log.Printf("[DEBUG] schedulerhints: %+v", schedulerHintsRaw)
|
||||
schedulerHints := resourceInstanceSchedulerHintsV2(d, schedulerHintsRaw[0].(map[string]interface{}))
|
||||
createOpts = &schedulerhints.CreateOptsExt{
|
||||
createOpts,
|
||||
schedulerHints,
|
||||
}
|
||||
}
|
||||
|
||||
log.Printf("[DEBUG] Create Options: %#v", createOpts)
|
||||
server, err := servers.Create(computeClient, createOpts).Extract()
|
||||
if err != nil {
|
||||
|
@ -877,6 +930,40 @@ func resourceInstanceBlockDeviceV2(d *schema.ResourceData, bd map[string]interfa
|
|||
return bfvOpts
|
||||
}
|
||||
|
||||
func resourceInstanceSchedulerHintsV2(d *schema.ResourceData, schedulerHintsRaw map[string]interface{}) schedulerhints.SchedulerHints {
|
||||
differentHost := []string{}
|
||||
if len(schedulerHintsRaw["different_host"].([]interface{})) > 0 {
|
||||
for _, dh := range schedulerHintsRaw["different_host"].([]interface{}) {
|
||||
differentHost = append(differentHost, dh.(string))
|
||||
}
|
||||
}
|
||||
|
||||
sameHost := []string{}
|
||||
if len(schedulerHintsRaw["same_host"].([]interface{})) > 0 {
|
||||
for _, sh := range schedulerHintsRaw["same_host"].([]interface{}) {
|
||||
sameHost = append(sameHost, sh.(string))
|
||||
}
|
||||
}
|
||||
|
||||
query := make([]interface{}, len(schedulerHintsRaw["query"].([]interface{})))
|
||||
if len(schedulerHintsRaw["query"].([]interface{})) > 0 {
|
||||
for _, q := range schedulerHintsRaw["query"].([]interface{}) {
|
||||
query = append(query, q.(string))
|
||||
}
|
||||
}
|
||||
|
||||
schedulerHints := schedulerhints.SchedulerHints{
|
||||
Group: schedulerHintsRaw["group"].(string),
|
||||
DifferentHost: differentHost,
|
||||
SameHost: sameHost,
|
||||
Query: query,
|
||||
TargetCell: schedulerHintsRaw["target_cell"].(string),
|
||||
BuildNearHostIP: schedulerHintsRaw["build_near_host_ip"].(string),
|
||||
}
|
||||
|
||||
return schedulerHints
|
||||
}
|
||||
|
||||
func getImageID(client *gophercloud.ServiceClient, d *schema.ResourceData) (string, error) {
|
||||
imageId := d.Get("image_id").(string)
|
||||
|
||||
|
@ -962,6 +1049,29 @@ func resourceComputeVolumeAttachmentHash(v interface{}) int {
|
|||
return hashcode.String(buf.String())
|
||||
}
|
||||
|
||||
func resourceComputeSchedulerHintsHash(v interface{}) int {
|
||||
var buf bytes.Buffer
|
||||
m := v.(map[string]interface{})
|
||||
|
||||
if m["group"] != nil {
|
||||
buf.WriteString(fmt.Sprintf("%s-", m["group"].(string)))
|
||||
}
|
||||
|
||||
if m["target_cell"] != nil {
|
||||
buf.WriteString(fmt.Sprintf("%s-", m["target_cell"].(string)))
|
||||
}
|
||||
|
||||
if m["build_host_near_ip"] != nil {
|
||||
buf.WriteString(fmt.Sprintf("%s-", m["build_host_near_ip"].(string)))
|
||||
}
|
||||
|
||||
buf.WriteString(fmt.Sprintf("%s-", m["different_host"].([]interface{})))
|
||||
buf.WriteString(fmt.Sprintf("%s-", m["same_host"].([]interface{})))
|
||||
buf.WriteString(fmt.Sprintf("%s-", m["query"].([]interface{})))
|
||||
|
||||
return hashcode.String(buf.String())
|
||||
}
|
||||
|
||||
func attachVolumesToInstance(computeClient *gophercloud.ServiceClient, blockClient *gophercloud.ServiceClient, serverId string, vols []interface{}) error {
|
||||
if len(vols) > 0 {
|
||||
for _, v := range vols {
|
||||
|
|
|
@ -0,0 +1,123 @@
|
|||
package openstack
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"log"
|
||||
|
||||
"github.com/hashicorp/terraform/helper/schema"
|
||||
"github.com/rackspace/gophercloud/openstack/compute/v2/extensions/servergroups"
|
||||
)
|
||||
|
||||
func resourceComputeServerGroupV2() *schema.Resource {
|
||||
return &schema.Resource{
|
||||
Create: resourceComputeServerGroupV2Create,
|
||||
Read: resourceComputeServerGroupV2Read,
|
||||
Update: nil,
|
||||
Delete: resourceComputeServerGroupV2Delete,
|
||||
|
||||
Schema: map[string]*schema.Schema{
|
||||
"region": &schema.Schema{
|
||||
Type: schema.TypeString,
|
||||
Required: true,
|
||||
ForceNew: true,
|
||||
DefaultFunc: envDefaultFuncAllowMissing("OS_REGION_NAME"),
|
||||
},
|
||||
"name": &schema.Schema{
|
||||
Type: schema.TypeString,
|
||||
ForceNew: true,
|
||||
Required: true,
|
||||
},
|
||||
"policies": &schema.Schema{
|
||||
Type: schema.TypeList,
|
||||
Optional: true,
|
||||
ForceNew: true,
|
||||
Elem: &schema.Schema{Type: schema.TypeString},
|
||||
},
|
||||
"members": &schema.Schema{
|
||||
Type: schema.TypeList,
|
||||
Computed: true,
|
||||
Elem: &schema.Schema{Type: schema.TypeString},
|
||||
},
|
||||
},
|
||||
}
|
||||
}
|
||||
|
||||
func resourceComputeServerGroupV2Create(d *schema.ResourceData, meta interface{}) error {
|
||||
config := meta.(*Config)
|
||||
computeClient, err := config.computeV2Client(d.Get("region").(string))
|
||||
if err != nil {
|
||||
return fmt.Errorf("Error creating OpenStack compute client: %s", err)
|
||||
}
|
||||
|
||||
createOpts := &servergroups.CreateOpts{
|
||||
Name: d.Get("name").(string),
|
||||
Policies: resourceServerGroupPoliciesV2(d),
|
||||
}
|
||||
log.Printf("[DEBUG] Create Options: %#v", createOpts)
|
||||
newSG, err := servergroups.Create(computeClient, createOpts).Extract()
|
||||
if err != nil {
|
||||
return fmt.Errorf("Error creating ServerGroup", err)
|
||||
}
|
||||
|
||||
d.SetId(newSG.ID)
|
||||
|
||||
return resourceComputeServerGroupV2Read(d, meta)
|
||||
}
|
||||
|
||||
func resourceComputeServerGroupV2Read(d *schema.ResourceData, meta interface{}) error {
|
||||
config := meta.(*Config)
|
||||
computeClient, err := config.computeV2Client(d.Get("region").(string))
|
||||
if err != nil {
|
||||
return fmt.Errorf("Error creating OpenStack compute client: %s", err)
|
||||
}
|
||||
|
||||
sg, err := servergroups.Get(computeClient, d.Id()).Extract()
|
||||
if err != nil {
|
||||
return CheckDeleted(d, err, "server group")
|
||||
}
|
||||
|
||||
log.Printf("[DEBUG] Retrieved ServerGroup %s: %+v", d.Id(), sg)
|
||||
|
||||
// Set the name
|
||||
d.Set("name", sg.Name)
|
||||
|
||||
// Set the policies
|
||||
policies := []string{}
|
||||
for _, p := range sg.Policies {
|
||||
policies = append(policies, p)
|
||||
}
|
||||
d.Set("policies", policies)
|
||||
|
||||
// Set the members
|
||||
members := []string{}
|
||||
for _, m := range sg.Members {
|
||||
members = append(members, m)
|
||||
}
|
||||
d.Set("members", members)
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func resourceComputeServerGroupV2Delete(d *schema.ResourceData, meta interface{}) error {
|
||||
config := meta.(*Config)
|
||||
computeClient, err := config.computeV2Client(d.Get("region").(string))
|
||||
if err != nil {
|
||||
return fmt.Errorf("Error creating OpenStack compute client: %s", err)
|
||||
}
|
||||
|
||||
log.Printf("[DEBUG] Deleting ServerGroup %s", d.Id())
|
||||
if err := servergroups.Delete(computeClient, d.Id()).ExtractErr(); err != nil {
|
||||
return fmt.Errorf("Error deleting ServerGroup: %s", err)
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func resourceServerGroupPoliciesV2(d *schema.ResourceData) []string {
|
||||
rawPolicies := d.Get("policies").([]interface{})
|
||||
policies := make([]string, len(rawPolicies))
|
||||
for i, raw := range rawPolicies {
|
||||
policies[i] = raw.(string)
|
||||
}
|
||||
return policies
|
||||
}
|
|
@ -0,0 +1,138 @@
|
|||
package openstack
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"testing"
|
||||
|
||||
"github.com/hashicorp/terraform/helper/resource"
|
||||
"github.com/hashicorp/terraform/terraform"
|
||||
|
||||
"github.com/rackspace/gophercloud/openstack/compute/v2/extensions/servergroups"
|
||||
"github.com/rackspace/gophercloud/openstack/compute/v2/servers"
|
||||
)
|
||||
|
||||
func TestAccComputeV2ServerGroup_basic(t *testing.T) {
|
||||
var serverGroup servergroups.ServerGroup
|
||||
|
||||
resource.Test(t, resource.TestCase{
|
||||
PreCheck: func() { testAccPreCheck(t) },
|
||||
Providers: testAccProviders,
|
||||
CheckDestroy: testAccCheckComputeV2ServerGroupDestroy,
|
||||
Steps: []resource.TestStep{
|
||||
resource.TestStep{
|
||||
Config: testAccComputeV2ServerGroup_basic,
|
||||
Check: resource.ComposeTestCheckFunc(
|
||||
testAccCheckComputeV2ServerGroupExists(t, "openstack_compute_servergroup_v2.mysg", &serverGroup),
|
||||
),
|
||||
},
|
||||
},
|
||||
})
|
||||
}
|
||||
|
||||
func TestAccComputeV2ServerGroup_affinity(t *testing.T) {
|
||||
var instance servers.Server
|
||||
var sg servergroups.ServerGroup
|
||||
|
||||
resource.Test(t, resource.TestCase{
|
||||
PreCheck: func() { testAccPreCheck(t) },
|
||||
Providers: testAccProviders,
|
||||
CheckDestroy: testAccCheckComputeV2ServerGroupDestroy,
|
||||
Steps: []resource.TestStep{
|
||||
resource.TestStep{
|
||||
Config: testAccComputeV2ServerGroup_affinity,
|
||||
Check: resource.ComposeTestCheckFunc(
|
||||
testAccCheckComputeV2ServerGroupExists(t, "openstack_compute_servergroup_v2.mysg", &sg),
|
||||
testAccCheckComputeV2InstanceExists(t, "openstack_compute_instance_v2.myinstance", &instance),
|
||||
testAccCheckComputeV2InstanceInServerGroup(&instance, &sg),
|
||||
),
|
||||
},
|
||||
},
|
||||
})
|
||||
}
|
||||
|
||||
func testAccCheckComputeV2ServerGroupDestroy(s *terraform.State) error {
|
||||
config := testAccProvider.Meta().(*Config)
|
||||
computeClient, err := config.computeV2Client(OS_REGION_NAME)
|
||||
if err != nil {
|
||||
return fmt.Errorf("(testAccCheckComputeV2ServerGroupDestroy) Error creating OpenStack compute client: %s", err)
|
||||
}
|
||||
|
||||
for _, rs := range s.RootModule().Resources {
|
||||
if rs.Type != "openstack_compute_servergroup_v2" {
|
||||
continue
|
||||
}
|
||||
|
||||
_, err := servergroups.Get(computeClient, rs.Primary.ID).Extract()
|
||||
if err == nil {
|
||||
return fmt.Errorf("ServerGroup still exists")
|
||||
}
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func testAccCheckComputeV2ServerGroupExists(t *testing.T, n string, kp *servergroups.ServerGroup) resource.TestCheckFunc {
|
||||
return func(s *terraform.State) error {
|
||||
rs, ok := s.RootModule().Resources[n]
|
||||
if !ok {
|
||||
return fmt.Errorf("Not found: %s", n)
|
||||
}
|
||||
|
||||
if rs.Primary.ID == "" {
|
||||
return fmt.Errorf("No ID is set")
|
||||
}
|
||||
|
||||
config := testAccProvider.Meta().(*Config)
|
||||
computeClient, err := config.computeV2Client(OS_REGION_NAME)
|
||||
if err != nil {
|
||||
return fmt.Errorf("(testAccCheckComputeV2ServerGroupExists) Error creating OpenStack compute client: %s", err)
|
||||
}
|
||||
|
||||
found, err := servergroups.Get(computeClient, rs.Primary.ID).Extract()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
if found.ID != rs.Primary.ID {
|
||||
return fmt.Errorf("ServerGroup not found")
|
||||
}
|
||||
|
||||
*kp = *found
|
||||
|
||||
return nil
|
||||
}
|
||||
}
|
||||
|
||||
func testAccCheckComputeV2InstanceInServerGroup(instance *servers.Server, sg *servergroups.ServerGroup) resource.TestCheckFunc {
|
||||
return func(s *terraform.State) error {
|
||||
if len(sg.Members) > 0 {
|
||||
for _, m := range sg.Members {
|
||||
if m == instance.ID {
|
||||
return nil
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
return fmt.Errorf("Instance %s is not part of Server Group %s", instance.ID, sg.ID)
|
||||
}
|
||||
}
|
||||
|
||||
var testAccComputeV2ServerGroup_basic = `
|
||||
resource "openstack_compute_servergroup_v2" "mysg" {
|
||||
name = "mysg"
|
||||
policies = ["affinity"]
|
||||
}`
|
||||
|
||||
var testAccComputeV2ServerGroup_affinity = `
|
||||
resource "openstack_compute_servergroup_v2" "mysg" {
|
||||
name = "mysg"
|
||||
policies = ["affinity"]
|
||||
}
|
||||
|
||||
resource "openstack_compute_instance_v2" "myinstance" {
|
||||
name = "myinstance"
|
||||
security_groups = ["default"]
|
||||
scheduler_hints {
|
||||
group = "${openstack_compute_servergroup_v2.mysg.id}"
|
||||
}
|
||||
}`
|
|
@ -83,6 +83,9 @@ The following arguments are supported:
|
|||
* `volume` - (Optional) Attach an existing volume to the instance. The volume
|
||||
structure is described below.
|
||||
|
||||
* `scheduler_hints` - (Optional) Provider the Nova scheduler with hints on how
|
||||
the instance should be launched. The available hints are described below.
|
||||
|
||||
The `network` block supports:
|
||||
|
||||
* `uuid` - (Required unless `port` or `name` is provided) The network UUID to
|
||||
|
@ -119,6 +122,25 @@ The `volume` block supports:
|
|||
example: `/dev/vdc`. Omit this option to allow the volume to be
|
||||
auto-assigned a device.
|
||||
|
||||
The `scheduler_hints` block supports:
|
||||
|
||||
* `group` - (Optional) A UUID of a Server Group. The instance will be placed
|
||||
into that group.
|
||||
|
||||
* `different_host` - (Optional) A list of instance UUIDs. The instance will
|
||||
be scheduled on a different host than all other instances.
|
||||
|
||||
* `same_host` - (Optional) A list of instance UUIDs. The instance will be
|
||||
scheduled on the same host of those specified.
|
||||
|
||||
* `query` - (Optional) A conditional query that a compute node must pass in
|
||||
order to host an instance.
|
||||
|
||||
* `target_cell` - (Optional) The name of a cell to host the instance.
|
||||
|
||||
* `build_near_host_ip` - (Optional) An IP Address in CIDR form. The instance
|
||||
will be placed on a compute node that is in the same subnet.
|
||||
|
||||
## Attributes Reference
|
||||
|
||||
The following attributes are exported:
|
||||
|
|
|
@ -0,0 +1,53 @@
|
|||
---
|
||||
layout: "openstack"
|
||||
page_title: "OpenStack: openstack_compute_servergroup_v2"
|
||||
sidebar_current: "docs-openstack-resource-compute-servergroup-v2"
|
||||
description: |-
|
||||
Manages a V2 Server Group resource within OpenStack.
|
||||
---
|
||||
|
||||
# openstack\_compute\_servergroup_v2
|
||||
|
||||
Manages a V2 Server Group resource within OpenStack.
|
||||
|
||||
## Example Usage
|
||||
|
||||
```
|
||||
resource "openstack_compute_servergroup_v2" "test-sg" {
|
||||
name = "my-sg"
|
||||
policies = ["anti-affinity"]
|
||||
}
|
||||
```
|
||||
|
||||
## Argument Reference
|
||||
|
||||
The following arguments are supported:
|
||||
|
||||
* `region` - (Required) The region in which to obtain the V2 Compute client.
|
||||
If omitted, the `OS_REGION_NAME` environment variable is used. Changing
|
||||
this creates a new server group.
|
||||
|
||||
* `name` - (Required) A unique name for the server group. Changing this creates
|
||||
a new server group.
|
||||
|
||||
* `policies` - (Required) The set of policies for the server group. Only two
|
||||
two policies are available right now, and both are mutually exclusive. See
|
||||
the Policies section for more information. Changing this creates a new
|
||||
server group.
|
||||
|
||||
## Policies
|
||||
|
||||
* `affinity` - All instances/servers launched in this group will be hosted on
|
||||
the same compute node.
|
||||
|
||||
* `anti-affinity` - All instances/servers launched in this group will be
|
||||
hosted on different compute nodes.
|
||||
|
||||
## Attributes Reference
|
||||
|
||||
The following attributes are exported:
|
||||
|
||||
* `region` - See Argument Reference above.
|
||||
* `name` - See Argument Reference above.
|
||||
* `policies` - See Argument Reference above.
|
||||
* `members` - The instances that are part of this server group.
|
Loading…
Reference in New Issue