snapshot from CenturyLinkLabs/terraform-provider-clc
+examples +docs for clc
This commit is contained in:
parent
7e3e3b20b1
commit
7775cc8ccc
|
@ -0,0 +1,12 @@
|
|||
package main
|
||||
|
||||
import (
|
||||
"github.com/hashicorp/terraform/builtin/providers/clc"
|
||||
"github.com/hashicorp/terraform/plugin"
|
||||
)
|
||||
|
||||
func main() {
|
||||
plugin.Serve(&plugin.ServeOpts{
|
||||
ProviderFunc: clc.Provider,
|
||||
})
|
||||
}
|
|
@ -0,0 +1 @@
|
|||
package main
|
|
@ -0,0 +1,210 @@
|
|||
package clc
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"log"
|
||||
"strconv"
|
||||
|
||||
clc "github.com/CenturyLinkCloud/clc-sdk"
|
||||
"github.com/CenturyLinkCloud/clc-sdk/api"
|
||||
"github.com/CenturyLinkCloud/clc-sdk/group"
|
||||
"github.com/CenturyLinkCloud/clc-sdk/server"
|
||||
"github.com/CenturyLinkCloud/clc-sdk/status"
|
||||
"github.com/hashicorp/terraform/helper/schema"
|
||||
"github.com/hashicorp/terraform/terraform"
|
||||
)
|
||||
|
||||
// Provider implements ResourceProvider for CLC
|
||||
func Provider() terraform.ResourceProvider {
|
||||
return &schema.Provider{
|
||||
Schema: map[string]*schema.Schema{
|
||||
"username": &schema.Schema{
|
||||
Type: schema.TypeString,
|
||||
Required: true,
|
||||
DefaultFunc: schema.EnvDefaultFunc("CLC_USERNAME", nil),
|
||||
Description: "Your CLC username",
|
||||
},
|
||||
"password": &schema.Schema{
|
||||
Type: schema.TypeString,
|
||||
Required: true,
|
||||
DefaultFunc: schema.EnvDefaultFunc("CLC_PASSWORD", nil),
|
||||
Description: "Your CLC password",
|
||||
},
|
||||
"account": &schema.Schema{
|
||||
Type: schema.TypeString,
|
||||
Required: true,
|
||||
DefaultFunc: schema.EnvDefaultFunc("CLC_ACCOUNT", nil),
|
||||
Description: "Your CLC account alias",
|
||||
},
|
||||
"url": &schema.Schema{
|
||||
Type: schema.TypeString,
|
||||
Optional: true,
|
||||
Description: "Base CLC API url",
|
||||
DefaultFunc: schema.EnvDefaultFunc("CLC_BASE_URL", nil),
|
||||
},
|
||||
},
|
||||
|
||||
ResourcesMap: map[string]*schema.Resource{
|
||||
"clc_server": resourceCLCServer(),
|
||||
"clc_group": resourceCLCGroup(),
|
||||
"clc_public_ip": resourceCLCPublicIP(),
|
||||
"clc_load_balancer": resourceCLCLoadBalancer(),
|
||||
"clc_load_balancer_pool": resourceCLCLoadBalancerPool(),
|
||||
},
|
||||
|
||||
ConfigureFunc: providerConfigure,
|
||||
}
|
||||
}
|
||||
|
||||
func providerConfigure(d *schema.ResourceData) (interface{}, error) {
|
||||
un := d.Get("username").(string)
|
||||
pw := d.Get("password").(string)
|
||||
ac := d.Get("account").(string)
|
||||
url := d.Get("url").(string)
|
||||
|
||||
config, config_err := api.NewConfig(un, pw, ac, url)
|
||||
if config_err != nil {
|
||||
return nil, fmt.Errorf("Failed to create CLC config with provided details: %v", config_err)
|
||||
}
|
||||
config.UserAgent = "terraform-clc"
|
||||
|
||||
client := clc.New(config)
|
||||
err := client.Authenticate()
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("Failed authenticated with provided credentials: %v", err)
|
||||
}
|
||||
|
||||
alerts, err := client.Alert.GetAll()
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("Failed to connect to the CLC api because %s", err)
|
||||
}
|
||||
for _, a := range alerts.Items {
|
||||
log.Printf("[WARN] Received alert: %v", a)
|
||||
}
|
||||
return client, nil
|
||||
}
|
||||
|
||||
// package utility functions
|
||||
|
||||
func waitStatus(client *clc.Client, id string) error {
|
||||
// block until queue is processed and server is up
|
||||
poll := make(chan *status.Response, 1)
|
||||
err := client.Status.Poll(id, poll)
|
||||
if err != nil {
|
||||
return nil
|
||||
}
|
||||
status := <-poll
|
||||
log.Printf("[DEBUG] status %v", status)
|
||||
if status.Failed() {
|
||||
return fmt.Errorf("unsuccessful job %v failed with status: %v", id, status.Status)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func dcGroups(dcname string, client *clc.Client) (map[string]string, error) {
|
||||
dc, _ := client.DC.Get(dcname)
|
||||
_, id := dc.Links.GetID("group")
|
||||
m := map[string]string{}
|
||||
resp, _ := client.Group.Get(id)
|
||||
m[resp.Name] = resp.ID // top
|
||||
m[resp.ID] = resp.ID
|
||||
for _, x := range resp.Groups {
|
||||
deepGroups(x, &m)
|
||||
}
|
||||
return m, nil
|
||||
}
|
||||
|
||||
func deepGroups(g group.Groups, m *map[string]string) {
|
||||
(*m)[g.Name] = g.ID
|
||||
(*m)[g.ID] = g.ID
|
||||
for _, sg := range g.Groups {
|
||||
deepGroups(sg, m)
|
||||
}
|
||||
}
|
||||
|
||||
// resolveGroupByNameOrId takes a reference to a group (either name or guid)
|
||||
// and returns the guid of the group
|
||||
func resolveGroupByNameOrId(ref, dc string, client *clc.Client) (string, error) {
|
||||
m, err := dcGroups(dc, client)
|
||||
if err != nil {
|
||||
return "", fmt.Errorf("Failed pulling groups in location %v - %v", dc, err)
|
||||
}
|
||||
if id, ok := m[ref]; ok {
|
||||
return id, nil
|
||||
}
|
||||
return "", fmt.Errorf("Failed resolving group '%v' in location %v", ref, dc)
|
||||
}
|
||||
|
||||
func stateFromString(st string) server.PowerState {
|
||||
switch st {
|
||||
case "on", "started":
|
||||
return server.On
|
||||
case "off", "stopped":
|
||||
return server.Off
|
||||
case "pause", "paused":
|
||||
return server.Pause
|
||||
case "reboot":
|
||||
return server.Reboot
|
||||
case "reset":
|
||||
return server.Reset
|
||||
case "shutdown":
|
||||
return server.ShutDown
|
||||
case "start_maintenance":
|
||||
return server.StartMaintenance
|
||||
case "stop_maintenance":
|
||||
return server.StopMaintenance
|
||||
}
|
||||
return -1
|
||||
}
|
||||
|
||||
func parseCustomFields(d *schema.ResourceData) ([]api.Customfields, error) {
|
||||
var fields []api.Customfields
|
||||
if v := d.Get("custom_fields"); v != nil {
|
||||
for _, v := range v.([]interface{}) {
|
||||
m := v.(map[string]interface{})
|
||||
f := api.Customfields{
|
||||
ID: m["id"].(string),
|
||||
Value: m["value"].(string),
|
||||
}
|
||||
fields = append(fields, f)
|
||||
}
|
||||
}
|
||||
return fields, nil
|
||||
}
|
||||
|
||||
func parseAdditionalDisks(d *schema.ResourceData) ([]server.Disk, error) {
|
||||
// some complexity here: create has a different format than update
|
||||
// on-create: { path, sizeGB, type }
|
||||
// on-update: { diskId, sizeGB, (path), (type=partitioned) }
|
||||
var disks []server.Disk
|
||||
if v := d.Get("additional_disks"); v != nil {
|
||||
for _, v := range v.([]interface{}) {
|
||||
m := v.(map[string]interface{})
|
||||
ty := m["type"].(string)
|
||||
var pa string
|
||||
if nil != m["path"] {
|
||||
pa = m["path"].(string)
|
||||
}
|
||||
sz, err := strconv.Atoi(m["size_gb"].(string))
|
||||
if err != nil {
|
||||
log.Printf("[WARN] Failed parsing size '%v'. skipping", m["size_gb"])
|
||||
return nil, fmt.Errorf("Unable to parse %v as int", m["size_gb"])
|
||||
}
|
||||
if ty != "raw" && ty != "partitioned" {
|
||||
return nil, fmt.Errorf("Expected type of { raw | partitioned }. received %v", ty)
|
||||
}
|
||||
if ty == "raw" && pa != "" {
|
||||
return nil, fmt.Errorf("Path can not be specified for raw disks")
|
||||
}
|
||||
disk := server.Disk{
|
||||
SizeGB: sz,
|
||||
Type: ty,
|
||||
}
|
||||
if pa != "" {
|
||||
disk.Path = pa
|
||||
}
|
||||
disks = append(disks, disk)
|
||||
}
|
||||
}
|
||||
return disks, nil
|
||||
}
|
|
@ -0,0 +1,41 @@
|
|||
package clc
|
||||
|
||||
import (
|
||||
"os"
|
||||
"testing"
|
||||
|
||||
"github.com/hashicorp/terraform/helper/schema"
|
||||
"github.com/hashicorp/terraform/terraform"
|
||||
)
|
||||
|
||||
var testAccProviders map[string]terraform.ResourceProvider
|
||||
var testAccProvider *schema.Provider
|
||||
|
||||
func init() {
|
||||
testAccProvider = Provider().(*schema.Provider)
|
||||
testAccProviders = map[string]terraform.ResourceProvider{
|
||||
"clc": testAccProvider,
|
||||
}
|
||||
}
|
||||
|
||||
func TestProvider(t *testing.T) {
|
||||
if err := Provider().(*schema.Provider).InternalValidate(); err != nil {
|
||||
t.Fatalf("err: %s", err)
|
||||
}
|
||||
}
|
||||
|
||||
func TestProvider_impl(t *testing.T) {
|
||||
var _ terraform.ResourceProvider = Provider()
|
||||
}
|
||||
|
||||
func testAccPreCheck(t *testing.T) {
|
||||
if v := os.Getenv("CLC_USERNAME"); v == "" {
|
||||
t.Fatal("CLC_USERNAME must be set for acceptance tests")
|
||||
}
|
||||
if v := os.Getenv("CLC_PASSWORD"); v == "" {
|
||||
t.Fatal("CLC_PASSWORD must be set for acceptance tests")
|
||||
}
|
||||
if v := os.Getenv("CLC_ACCOUNT"); v == "" {
|
||||
t.Fatal("CLC_ACCOUNT must be set for acceptance tests")
|
||||
}
|
||||
}
|
|
@ -0,0 +1,159 @@
|
|||
package clc
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"log"
|
||||
"time"
|
||||
|
||||
"github.com/CenturyLinkCloud/clc-sdk"
|
||||
"github.com/CenturyLinkCloud/clc-sdk/api"
|
||||
"github.com/CenturyLinkCloud/clc-sdk/group"
|
||||
"github.com/hashicorp/terraform/helper/resource"
|
||||
"github.com/hashicorp/terraform/helper/schema"
|
||||
)
|
||||
|
||||
func resourceCLCGroup() *schema.Resource {
|
||||
return &schema.Resource{
|
||||
Create: resourceCLCGroupCreate,
|
||||
Read: resourceCLCGroupRead,
|
||||
Update: resourceCLCGroupUpdate,
|
||||
Delete: resourceCLCGroupDelete,
|
||||
Schema: map[string]*schema.Schema{
|
||||
"name": &schema.Schema{
|
||||
Type: schema.TypeString,
|
||||
Required: true,
|
||||
},
|
||||
"description": &schema.Schema{
|
||||
Type: schema.TypeString,
|
||||
Optional: true,
|
||||
Default: "",
|
||||
},
|
||||
"parent": &schema.Schema{
|
||||
Type: schema.TypeString,
|
||||
Required: true,
|
||||
},
|
||||
"location_id": &schema.Schema{
|
||||
Type: schema.TypeString,
|
||||
Required: true,
|
||||
},
|
||||
"parent_group_id": &schema.Schema{
|
||||
Type: schema.TypeString,
|
||||
Computed: true,
|
||||
},
|
||||
"custom_fields": &schema.Schema{
|
||||
Type: schema.TypeList,
|
||||
Optional: true,
|
||||
Elem: &schema.Schema{Type: schema.TypeMap},
|
||||
},
|
||||
},
|
||||
}
|
||||
}
|
||||
|
||||
func resourceCLCGroupCreate(d *schema.ResourceData, meta interface{}) error {
|
||||
client := meta.(*clc.Client)
|
||||
name := d.Get("name").(string)
|
||||
desc := d.Get("description").(string)
|
||||
parent := d.Get("parent").(string)
|
||||
dc := d.Get("location_id").(string)
|
||||
|
||||
// clc doesn't enforce uniqueness by name
|
||||
// so skip the trad'l error we'd raise
|
||||
e, err := resolveGroupByNameOrId(name, dc, client)
|
||||
if e != "" {
|
||||
log.Printf("[INFO] Resolved existing group: %v => %v", name, e)
|
||||
d.SetId(e)
|
||||
return nil
|
||||
}
|
||||
|
||||
var pgid string
|
||||
p, err := resolveGroupByNameOrId(parent, dc, client)
|
||||
if p != "" {
|
||||
log.Printf("[INFO] Resolved parent group: %v => %v", parent, p)
|
||||
pgid = p
|
||||
} else {
|
||||
return fmt.Errorf("Failed resolving parent group %s - %s err:%s", parent, p, err)
|
||||
}
|
||||
|
||||
d.Set("parent_group_id", pgid)
|
||||
spec := group.Group{
|
||||
Name: name,
|
||||
Description: desc,
|
||||
ParentGroupID: pgid,
|
||||
}
|
||||
resp, err := client.Group.Create(spec)
|
||||
if err != nil {
|
||||
return fmt.Errorf("Failed creating group: %s", err)
|
||||
}
|
||||
log.Println("[INFO] Group created")
|
||||
d.SetId(resp.ID)
|
||||
return nil
|
||||
}
|
||||
|
||||
func resourceCLCGroupRead(d *schema.ResourceData, meta interface{}) error {
|
||||
client := meta.(*clc.Client)
|
||||
id := d.Id()
|
||||
g, err := client.Group.Get(id)
|
||||
if err != nil {
|
||||
log.Printf("[INFO] Failed finding group: %s - %s. Marking destroyed", id, err)
|
||||
d.SetId("")
|
||||
return nil
|
||||
}
|
||||
d.Set("name", g.Name)
|
||||
d.Set("description", g.Description)
|
||||
d.Set("parent_group_id", g.ParentGroupID())
|
||||
return nil
|
||||
}
|
||||
|
||||
func resourceCLCGroupUpdate(d *schema.ResourceData, meta interface{}) error {
|
||||
client := meta.(*clc.Client)
|
||||
id := d.Id()
|
||||
var err error
|
||||
var patches []api.Update
|
||||
|
||||
g, err := client.Group.Get(id)
|
||||
if err != nil {
|
||||
return fmt.Errorf("Failed fetching group: %v - %v", id, err)
|
||||
}
|
||||
|
||||
if delta, orig := d.Get("name").(string), g.Name; delta != orig {
|
||||
patches = append(patches, group.UpdateName(delta))
|
||||
}
|
||||
if delta, orig := d.Get("description").(string), g.Description; delta != orig {
|
||||
patches = append(patches, group.UpdateDescription(delta))
|
||||
}
|
||||
newParent := d.Get("parent").(string)
|
||||
pgid, err := resolveGroupByNameOrId(newParent, g.Locationid, client)
|
||||
log.Printf("[DEBUG] PARENT current:%v new:%v resolved:%v", g.ParentGroupID(), newParent, pgid)
|
||||
if pgid == "" {
|
||||
return fmt.Errorf("Unable to resolve parent group %v: %v", newParent, err)
|
||||
} else if newParent != g.ParentGroupID() {
|
||||
patches = append(patches, group.UpdateParentGroupID(pgid))
|
||||
}
|
||||
|
||||
if len(patches) == 0 {
|
||||
return nil
|
||||
}
|
||||
err = client.Group.Update(id, patches...)
|
||||
if err != nil {
|
||||
return fmt.Errorf("Failed updating group %v: %v", id, err)
|
||||
}
|
||||
return resource.Retry(1*time.Minute, func() error {
|
||||
_, err := client.Group.Get(id)
|
||||
if err == nil {
|
||||
return resourceCLCGroupRead(d, meta)
|
||||
}
|
||||
return &resource.RetryError{Err: err}
|
||||
})
|
||||
}
|
||||
|
||||
func resourceCLCGroupDelete(d *schema.ResourceData, meta interface{}) error {
|
||||
client := meta.(*clc.Client)
|
||||
id := d.Id()
|
||||
log.Printf("[INFO] Deleting group %v", id)
|
||||
st, err := client.Group.Delete(id)
|
||||
if err != nil {
|
||||
return fmt.Errorf("Failed deleting group: %v with err: %v", id, err)
|
||||
}
|
||||
waitStatus(client, st.ID)
|
||||
return nil
|
||||
}
|
|
@ -0,0 +1,148 @@
|
|||
package clc
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"testing"
|
||||
|
||||
clc "github.com/CenturyLinkCloud/clc-sdk"
|
||||
"github.com/CenturyLinkCloud/clc-sdk/group"
|
||||
"github.com/hashicorp/terraform/helper/resource"
|
||||
"github.com/hashicorp/terraform/terraform"
|
||||
)
|
||||
|
||||
// things to test:
|
||||
// resolves to existing group
|
||||
// does not nuke a group w/ no parents (root group)
|
||||
// change a name on a group
|
||||
|
||||
func TestAccGroupBasic(t *testing.T) {
|
||||
var resp group.Response
|
||||
resource.Test(t, resource.TestCase{
|
||||
PreCheck: func() { testAccPreCheck(t) },
|
||||
Providers: testAccProviders,
|
||||
CheckDestroy: testAccCheckGroupDestroy,
|
||||
Steps: []resource.TestStep{
|
||||
resource.TestStep{
|
||||
Config: testAccCheckGroupConfigBasic,
|
||||
Check: resource.ComposeTestCheckFunc(
|
||||
testAccCheckGroupExists("clc_group.acc_test_group", &resp),
|
||||
testAccCheckGroupParent(&resp, "Default Group"),
|
||||
resource.TestCheckResourceAttr(
|
||||
"clc_group.acc_test_group", "name", "okcomputer"),
|
||||
resource.TestCheckResourceAttr(
|
||||
"clc_group.acc_test_group", "location_id", "WA1"),
|
||||
),
|
||||
},
|
||||
resource.TestStep{
|
||||
Config: testAccCheckGroupConfigUpdate,
|
||||
Check: resource.ComposeTestCheckFunc(
|
||||
testAccCheckGroupExists("clc_group.acc_test_group", &resp),
|
||||
testAccCheckGroupParent(&resp, "Default Group"),
|
||||
resource.TestCheckResourceAttr(
|
||||
"clc_group.acc_test_group", "name", "foobar"),
|
||||
resource.TestCheckResourceAttr(
|
||||
"clc_group.acc_test_group", "location_id", "WA1"),
|
||||
),
|
||||
},
|
||||
resource.TestStep{
|
||||
Config: testAccCheckGroupConfigReparent,
|
||||
Check: resource.ComposeTestCheckFunc(
|
||||
testAccCheckGroupExists("clc_group.acc_test_group", &resp),
|
||||
testAccCheckGroupParent(&resp, "reparent"),
|
||||
resource.TestCheckResourceAttr(
|
||||
"clc_group.acc_test_group", "name", "foobar"),
|
||||
resource.TestCheckResourceAttr(
|
||||
"clc_group.acc_test_group", "location_id", "WA1"),
|
||||
),
|
||||
},
|
||||
},
|
||||
})
|
||||
}
|
||||
|
||||
func testAccCheckGroupDestroy(s *terraform.State) error {
|
||||
client := testAccProvider.Meta().(*clc.Client)
|
||||
for _, rs := range s.RootModule().Resources {
|
||||
if rs.Type != "clc_group" {
|
||||
continue
|
||||
}
|
||||
_, err := client.Group.Get(rs.Primary.ID)
|
||||
if err == nil {
|
||||
return fmt.Errorf("Group still exists")
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func testAccCheckGroupParent(resp *group.Response, expectedName string) resource.TestCheckFunc {
|
||||
return func(s *terraform.State) error {
|
||||
client := testAccProvider.Meta().(*clc.Client)
|
||||
ok, l := resp.Links.GetLink("parentGroup")
|
||||
if !ok {
|
||||
return fmt.Errorf("Missing parent group: %v", resp)
|
||||
}
|
||||
parent, err := client.Group.Get(l.ID)
|
||||
if err != nil {
|
||||
return fmt.Errorf("Failed fetching parent %v: %v", l.ID, err)
|
||||
}
|
||||
if parent.Name != expectedName {
|
||||
return fmt.Errorf("Incorrect parent found:'%v' expected:'%v'", parent.Name, expectedName)
|
||||
}
|
||||
// would be good to test parent but we'd have to make a bunch of calls
|
||||
return nil
|
||||
}
|
||||
}
|
||||
|
||||
func testAccCheckGroupExists(n string, resp *group.Response) resource.TestCheckFunc {
|
||||
return func(s *terraform.State) error {
|
||||
rs, ok := s.RootModule().Resources[n]
|
||||
if !ok {
|
||||
return fmt.Errorf("Not found: %s", n)
|
||||
}
|
||||
if rs.Primary.ID == "" {
|
||||
return fmt.Errorf("No Group ID is set")
|
||||
}
|
||||
|
||||
client := testAccProvider.Meta().(*clc.Client)
|
||||
g, err := client.Group.Get(rs.Primary.ID)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
if g.ID != rs.Primary.ID {
|
||||
return fmt.Errorf("Group not found")
|
||||
}
|
||||
*resp = *g
|
||||
return nil
|
||||
}
|
||||
}
|
||||
|
||||
const testAccCheckGroupConfigBasic = `
|
||||
resource "clc_group" "acc_test_group" {
|
||||
location_id = "WA1"
|
||||
name = "okcomputer"
|
||||
description = "mishaps happening"
|
||||
parent = "Default Group"
|
||||
}`
|
||||
|
||||
const testAccCheckGroupConfigUpdate = `
|
||||
resource "clc_group" "acc_test_group" {
|
||||
location_id = "WA1"
|
||||
name = "foobar"
|
||||
description = "update test"
|
||||
parent = "Default Group"
|
||||
}`
|
||||
|
||||
const testAccCheckGroupConfigReparent = `
|
||||
resource "clc_group" "acc_test_group_reparent" {
|
||||
location_id = "WA1"
|
||||
name = "reparent"
|
||||
description = "introduce a parent group in place"
|
||||
parent = "Default Group"
|
||||
}
|
||||
resource "clc_group" "acc_test_group" {
|
||||
location_id = "WA1"
|
||||
name = "foobar"
|
||||
description = "update test"
|
||||
parent = "${clc_group.acc_test_group_reparent.id}"
|
||||
}
|
||||
`
|
|
@ -0,0 +1,125 @@
|
|||
package clc
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"log"
|
||||
"time"
|
||||
|
||||
clc "github.com/CenturyLinkCloud/clc-sdk"
|
||||
"github.com/CenturyLinkCloud/clc-sdk/lb"
|
||||
|
||||
"github.com/hashicorp/terraform/helper/resource"
|
||||
"github.com/hashicorp/terraform/helper/schema"
|
||||
)
|
||||
|
||||
func resourceCLCLoadBalancer() *schema.Resource {
|
||||
return &schema.Resource{
|
||||
Create: resourceCLCLoadBalancerCreate,
|
||||
Read: resourceCLCLoadBalancerRead,
|
||||
Update: resourceCLCLoadBalancerUpdate,
|
||||
Delete: resourceCLCLoadBalancerDelete,
|
||||
Schema: map[string]*schema.Schema{
|
||||
"name": &schema.Schema{
|
||||
Type: schema.TypeString,
|
||||
Required: true,
|
||||
},
|
||||
"data_center": &schema.Schema{
|
||||
Type: schema.TypeString,
|
||||
Required: true,
|
||||
},
|
||||
"description": &schema.Schema{
|
||||
Type: schema.TypeString,
|
||||
Required: true,
|
||||
},
|
||||
// optional
|
||||
"status": &schema.Schema{
|
||||
Type: schema.TypeString,
|
||||
Optional: true,
|
||||
Default: "enabled",
|
||||
},
|
||||
// computed
|
||||
"ip_address": &schema.Schema{
|
||||
Type: schema.TypeString,
|
||||
Computed: true,
|
||||
},
|
||||
},
|
||||
}
|
||||
}
|
||||
|
||||
func resourceCLCLoadBalancerCreate(d *schema.ResourceData, meta interface{}) error {
|
||||
client := meta.(*clc.Client)
|
||||
dc := d.Get("data_center").(string)
|
||||
name := d.Get("name").(string)
|
||||
desc := d.Get("description").(string)
|
||||
status := d.Get("status").(string)
|
||||
r1 := lb.LoadBalancer{
|
||||
Name: name,
|
||||
Description: desc,
|
||||
Status: status,
|
||||
}
|
||||
l, err := client.LB.Create(dc, r1)
|
||||
if err != nil {
|
||||
return fmt.Errorf("Failed creating load balancer under %v/%v: %v", dc, name, err)
|
||||
}
|
||||
d.SetId(l.ID)
|
||||
return resource.Retry(1*time.Minute, func() error {
|
||||
_, err := client.LB.Get(dc, l.ID)
|
||||
if err == nil {
|
||||
return resourceCLCLoadBalancerRead(d, meta)
|
||||
}
|
||||
return &resource.RetryError{Err: err}
|
||||
})
|
||||
}
|
||||
|
||||
func resourceCLCLoadBalancerRead(d *schema.ResourceData, meta interface{}) error {
|
||||
client := meta.(*clc.Client)
|
||||
dc := d.Get("data_center").(string)
|
||||
id := d.Id()
|
||||
resp, err := client.LB.Get(dc, id)
|
||||
if err != nil {
|
||||
log.Printf("[INFO] Failed finding load balancer %v/%v. Marking destroyed", dc, id)
|
||||
d.SetId("")
|
||||
return nil
|
||||
}
|
||||
d.Set("description", resp.Description)
|
||||
d.Set("ip_address", resp.IPaddress)
|
||||
d.Set("status", resp.Status)
|
||||
d.Set("pools", resp.Pools)
|
||||
d.Set("links", resp.Links)
|
||||
return nil
|
||||
}
|
||||
|
||||
func resourceCLCLoadBalancerUpdate(d *schema.ResourceData, meta interface{}) error {
|
||||
update := lb.LoadBalancer{}
|
||||
client := meta.(*clc.Client)
|
||||
dc := d.Get("data_center").(string)
|
||||
id := d.Id()
|
||||
|
||||
if d.HasChange("name") {
|
||||
update.Name = d.Get("name").(string)
|
||||
}
|
||||
if d.HasChange("description") {
|
||||
update.Description = d.Get("description").(string)
|
||||
}
|
||||
if d.HasChange("status") {
|
||||
update.Status = d.Get("status").(string)
|
||||
}
|
||||
if update.Name != "" || update.Description != "" || update.Status != "" {
|
||||
err := client.LB.Update(dc, id, update)
|
||||
if err != nil {
|
||||
return fmt.Errorf("Failed updating load balancer under %v/%v: %v", dc, id, err)
|
||||
}
|
||||
}
|
||||
return resourceCLCLoadBalancerRead(d, meta)
|
||||
}
|
||||
|
||||
func resourceCLCLoadBalancerDelete(d *schema.ResourceData, meta interface{}) error {
|
||||
client := meta.(*clc.Client)
|
||||
dc := d.Get("data_center").(string)
|
||||
id := d.Id()
|
||||
err := client.LB.Delete(dc, id)
|
||||
if err != nil {
|
||||
return fmt.Errorf("Failed deleting loadbalancer %v: %v", id, err)
|
||||
}
|
||||
return nil
|
||||
}
|
|
@ -0,0 +1,179 @@
|
|||
package clc
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"log"
|
||||
"strconv"
|
||||
|
||||
clc "github.com/CenturyLinkCloud/clc-sdk"
|
||||
"github.com/CenturyLinkCloud/clc-sdk/lb"
|
||||
|
||||
"github.com/hashicorp/terraform/helper/schema"
|
||||
)
|
||||
|
||||
func resourceCLCLoadBalancerPool() *schema.Resource {
|
||||
return &schema.Resource{
|
||||
Create: resourceCLCLoadBalancerPoolCreate,
|
||||
Read: resourceCLCLoadBalancerPoolRead,
|
||||
Update: resourceCLCLoadBalancerPoolUpdate,
|
||||
Delete: resourceCLCLoadBalancerPoolDelete,
|
||||
Schema: map[string]*schema.Schema{
|
||||
// pool args
|
||||
"port": &schema.Schema{
|
||||
Type: schema.TypeInt,
|
||||
Required: true,
|
||||
},
|
||||
"data_center": &schema.Schema{
|
||||
Type: schema.TypeString,
|
||||
Required: true,
|
||||
},
|
||||
"load_balancer": &schema.Schema{
|
||||
Type: schema.TypeString,
|
||||
Required: true,
|
||||
},
|
||||
"method": &schema.Schema{
|
||||
Type: schema.TypeString,
|
||||
Optional: true,
|
||||
Default: "roundRobin",
|
||||
},
|
||||
"persistence": &schema.Schema{
|
||||
Type: schema.TypeString,
|
||||
Optional: true,
|
||||
Default: "standard",
|
||||
},
|
||||
"nodes": &schema.Schema{
|
||||
Type: schema.TypeList,
|
||||
Required: true,
|
||||
Elem: &schema.Schema{Type: schema.TypeMap},
|
||||
},
|
||||
},
|
||||
}
|
||||
}
|
||||
|
||||
func resourceCLCLoadBalancerPoolCreate(d *schema.ResourceData, meta interface{}) error {
|
||||
client := meta.(*clc.Client)
|
||||
dc := d.Get("data_center").(string)
|
||||
lbid := d.Get("load_balancer").(string)
|
||||
|
||||
s1 := d.Get("method").(string)
|
||||
m := lb.LeastConn
|
||||
if s1 == string(lb.RoundRobin) {
|
||||
m = lb.RoundRobin
|
||||
}
|
||||
s2 := d.Get("persistence").(string)
|
||||
p := lb.Standard
|
||||
if s2 == string(lb.Sticky) {
|
||||
p = lb.Sticky
|
||||
}
|
||||
r2 := lb.Pool{
|
||||
Port: d.Get("port").(int),
|
||||
Method: m,
|
||||
Persistence: p,
|
||||
}
|
||||
lbp, err := client.LB.CreatePool(dc, lbid, r2)
|
||||
if err != nil {
|
||||
return fmt.Errorf("Failed creating pool under %v/%v: %v", dc, lbid, err)
|
||||
}
|
||||
d.SetId(lbp.ID)
|
||||
return resourceCLCLoadBalancerPoolUpdate(d, meta)
|
||||
}
|
||||
|
||||
func resourceCLCLoadBalancerPoolRead(d *schema.ResourceData, meta interface{}) error {
|
||||
client := meta.(*clc.Client)
|
||||
dc := d.Get("data_center").(string)
|
||||
lbid := d.Get("load_balancer").(string)
|
||||
id := d.Id()
|
||||
pool, err := client.LB.GetPool(dc, lbid, id)
|
||||
if err != nil {
|
||||
log.Printf("[INFO] Failed fetching pool %v/%v. Marking destroyed", lbid, d.Id())
|
||||
d.SetId("")
|
||||
return nil
|
||||
}
|
||||
nodes, err := client.LB.GetAllNodes(dc, lbid, id)
|
||||
nodes2 := make([]lb.Node, len(nodes))
|
||||
for i, n := range nodes {
|
||||
nodes2[i] = *n
|
||||
}
|
||||
pool.Nodes = nodes2
|
||||
d.Set("port", pool.Port)
|
||||
d.Set("method", pool.Method)
|
||||
d.Set("persistence", pool.Persistence)
|
||||
d.Set("nodes", pool.Nodes)
|
||||
d.Set("links", pool.Links)
|
||||
return nil
|
||||
}
|
||||
|
||||
func resourceCLCLoadBalancerPoolUpdate(d *schema.ResourceData, meta interface{}) error {
|
||||
client := meta.(*clc.Client)
|
||||
dc := d.Get("data_center").(string)
|
||||
lbid := d.Get("load_balancer").(string)
|
||||
id := d.Id()
|
||||
pool, err := client.LB.GetPool(dc, lbid, d.Id())
|
||||
pool.Port = 0 // triggers empty value => omission from POST
|
||||
|
||||
if d.HasChange("method") {
|
||||
d.SetPartial("method")
|
||||
pool.Method = lb.Method(d.Get("method").(string))
|
||||
}
|
||||
if d.HasChange("persistence") {
|
||||
d.SetPartial("persistence")
|
||||
pool.Persistence = lb.Persistence(d.Get("persistence").(string))
|
||||
}
|
||||
err = client.LB.UpdatePool(dc, lbid, id, *pool)
|
||||
if err != nil {
|
||||
return fmt.Errorf("Failed updating pool %v: %v", id, err)
|
||||
}
|
||||
|
||||
if d.HasChange("nodes") {
|
||||
d.SetPartial("nodes")
|
||||
nodes, err := parseNodes(d)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
err = client.LB.UpdateNodes(dc, lbid, id, nodes...)
|
||||
if err != nil {
|
||||
return fmt.Errorf("Failed updating pool nodes %v: %v", id, err)
|
||||
}
|
||||
}
|
||||
return resourceCLCLoadBalancerPoolRead(d, meta)
|
||||
}
|
||||
|
||||
func resourceCLCLoadBalancerPoolDelete(d *schema.ResourceData, meta interface{}) error {
|
||||
client := meta.(*clc.Client)
|
||||
dc := d.Get("data_center").(string)
|
||||
lbid := d.Get("load_balancer").(string)
|
||||
id := d.Id()
|
||||
err := client.LB.DeletePool(dc, lbid, id)
|
||||
if err != nil {
|
||||
return fmt.Errorf("Failed deleting pool %v: %v", id, err)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func parseNodes(d *schema.ResourceData) ([]lb.Node, error) {
|
||||
var nodes []lb.Node
|
||||
raw := d.Get("nodes")
|
||||
if raw == nil {
|
||||
log.Println("WARNING: pool missing nodes")
|
||||
return nil, nil
|
||||
}
|
||||
if arr, ok := raw.([]interface{}); ok {
|
||||
for _, v := range arr {
|
||||
m := v.(map[string]interface{})
|
||||
p, err := strconv.Atoi(m["privatePort"].(string))
|
||||
if err != nil {
|
||||
log.Printf("[WARN] Failed parsing port '%v'. skipping", m["privatePort"])
|
||||
continue
|
||||
}
|
||||
n := lb.Node{
|
||||
Status: m["status"].(string),
|
||||
IPaddress: m["ipAddress"].(string),
|
||||
PrivatePort: p,
|
||||
}
|
||||
nodes = append(nodes, n)
|
||||
}
|
||||
} else {
|
||||
return nil, fmt.Errorf("Failed parsing nodes from pool spec: %v", raw)
|
||||
}
|
||||
return nodes, nil
|
||||
}
|
|
@ -0,0 +1,113 @@
|
|||
package clc
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"testing"
|
||||
|
||||
clc "github.com/CenturyLinkCloud/clc-sdk"
|
||||
lb "github.com/CenturyLinkCloud/clc-sdk/lb"
|
||||
"github.com/hashicorp/terraform/helper/resource"
|
||||
"github.com/hashicorp/terraform/terraform"
|
||||
)
|
||||
|
||||
// things to test:
|
||||
// basic create/delete
|
||||
// update nodes
|
||||
// works for 80 and 443 together
|
||||
|
||||
func TestAccLBPoolBasic(t *testing.T) {
|
||||
var pool lb.Pool
|
||||
resource.Test(t, resource.TestCase{
|
||||
PreCheck: func() { testAccPreCheck(t) },
|
||||
Providers: testAccProviders,
|
||||
CheckDestroy: testAccCheckLBPDestroy,
|
||||
Steps: []resource.TestStep{
|
||||
resource.TestStep{
|
||||
Config: testAccCheckLBPConfigBasic,
|
||||
Check: resource.ComposeTestCheckFunc(
|
||||
testAccCheckLBPExists("clc_load_balancer_pool.acc_test_pool", &pool),
|
||||
resource.TestCheckResourceAttr("clc_load_balancer_pool.acc_test_pool", "port", "80"),
|
||||
),
|
||||
},
|
||||
},
|
||||
})
|
||||
}
|
||||
|
||||
func testAccCheckLBPDestroy(s *terraform.State) error {
|
||||
client := testAccProvider.Meta().(*clc.Client)
|
||||
for _, rs := range s.RootModule().Resources {
|
||||
if rs.Type != "clc_load_balancer_pool" {
|
||||
continue
|
||||
}
|
||||
lbid := rs.Primary.Attributes["load_balancer"]
|
||||
_, err := client.LB.GetPool(testAccDC, lbid, rs.Primary.ID)
|
||||
if err == nil {
|
||||
return fmt.Errorf("LB still exists")
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func testAccCheckLBPExists(n string, resp *lb.Pool) resource.TestCheckFunc {
|
||||
return func(s *terraform.State) error {
|
||||
rs, ok := s.RootModule().Resources[n]
|
||||
if !ok {
|
||||
return fmt.Errorf("Not found: %s", n)
|
||||
}
|
||||
if rs.Primary.ID == "" {
|
||||
return fmt.Errorf("No ID is set")
|
||||
}
|
||||
lbid := rs.Primary.Attributes["load_balancer"]
|
||||
client := testAccProvider.Meta().(*clc.Client)
|
||||
p, err := client.LB.GetPool(testAccDC, lbid, rs.Primary.ID)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if p.ID != rs.Primary.ID {
|
||||
return fmt.Errorf("Pool not found")
|
||||
}
|
||||
*resp = *p
|
||||
return nil
|
||||
}
|
||||
}
|
||||
|
||||
const testAccCheckLBPConfigBasic = `
|
||||
|
||||
resource "clc_group" "acc_test_lbp_group" {
|
||||
location_id = "WA1"
|
||||
name = "acc_test_lbp_group"
|
||||
parent = "Default Group"
|
||||
}
|
||||
|
||||
# need a server here because we need to reference an ip owned by this account
|
||||
resource "clc_server" "acc_test_lbp_server" {
|
||||
name_template = "node"
|
||||
description = "load balanced node"
|
||||
source_server_id = "UBUNTU-14-64-TEMPLATE"
|
||||
type = "standard"
|
||||
group_id = "${clc_group.acc_test_lbp_group.id}"
|
||||
cpu = 1
|
||||
memory_mb = 1024
|
||||
password = "Green123$"
|
||||
power_state = "started"
|
||||
}
|
||||
|
||||
resource "clc_load_balancer" "acc_test_lbp" {
|
||||
data_center = "WA1"
|
||||
name = "acc_test_lb"
|
||||
description = "load balancer test"
|
||||
status = "enabled"
|
||||
}
|
||||
|
||||
resource "clc_load_balancer_pool" "acc_test_pool" {
|
||||
port = 80
|
||||
data_center = "WA1"
|
||||
load_balancer = "${clc_load_balancer.acc_test_lbp.id}"
|
||||
nodes
|
||||
{
|
||||
status = "enabled"
|
||||
ipAddress = "${clc_server.acc_test_lbp_server.private_ip_address}"
|
||||
privatePort = 80
|
||||
}
|
||||
}
|
||||
`
|
|
@ -0,0 +1,100 @@
|
|||
package clc
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"testing"
|
||||
|
||||
clc "github.com/CenturyLinkCloud/clc-sdk"
|
||||
lb "github.com/CenturyLinkCloud/clc-sdk/lb"
|
||||
"github.com/hashicorp/terraform/helper/resource"
|
||||
"github.com/hashicorp/terraform/terraform"
|
||||
)
|
||||
|
||||
// things to test:
|
||||
// updates name/desc
|
||||
// toggles status
|
||||
// created w/o pool
|
||||
|
||||
const testAccDC = "WA1"
|
||||
|
||||
func TestAccLoadBalancerBasic(t *testing.T) {
|
||||
var resp lb.LoadBalancer
|
||||
resource.Test(t, resource.TestCase{
|
||||
PreCheck: func() { testAccPreCheck(t) },
|
||||
Providers: testAccProviders,
|
||||
CheckDestroy: testAccCheckLBDestroy,
|
||||
Steps: []resource.TestStep{
|
||||
resource.TestStep{
|
||||
Config: testAccCheckLBConfigBasic,
|
||||
Check: resource.ComposeTestCheckFunc(
|
||||
testAccCheckLBExists("clc_load_balancer.acc_test_lb", &resp),
|
||||
resource.TestCheckResourceAttr("clc_load_balancer.acc_test_lb", "name", "acc_test_lb"),
|
||||
resource.TestCheckResourceAttr("clc_load_balancer.acc_test_lb", "data_center", testAccDC),
|
||||
resource.TestCheckResourceAttr("clc_load_balancer.acc_test_lb", "status", "enabled"),
|
||||
),
|
||||
},
|
||||
// update simple attrs
|
||||
resource.TestStep{
|
||||
Config: testAccCheckLBConfigNameDesc,
|
||||
Check: resource.ComposeTestCheckFunc(
|
||||
testAccCheckLBExists("clc_load_balancer.acc_test_lb", &resp),
|
||||
resource.TestCheckResourceAttr("clc_load_balancer.acc_test_lb", "name", "foobar"),
|
||||
resource.TestCheckResourceAttr("clc_load_balancer.acc_test_lb", "description", "foobar"),
|
||||
resource.TestCheckResourceAttr("clc_load_balancer.acc_test_lb", "status", "disabled"),
|
||||
),
|
||||
},
|
||||
},
|
||||
})
|
||||
}
|
||||
|
||||
func testAccCheckLBDestroy(s *terraform.State) error {
|
||||
client := testAccProvider.Meta().(*clc.Client)
|
||||
for _, rs := range s.RootModule().Resources {
|
||||
if rs.Type != "clc_load_balancer" {
|
||||
continue
|
||||
}
|
||||
_, err := client.LB.Get(testAccDC, rs.Primary.ID)
|
||||
if err == nil {
|
||||
return fmt.Errorf("LB still exists")
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func testAccCheckLBExists(n string, resp *lb.LoadBalancer) resource.TestCheckFunc {
|
||||
return func(s *terraform.State) error {
|
||||
rs, ok := s.RootModule().Resources[n]
|
||||
if !ok {
|
||||
return fmt.Errorf("Not found: %s", n)
|
||||
}
|
||||
if rs.Primary.ID == "" {
|
||||
return fmt.Errorf("No ID is set")
|
||||
}
|
||||
client := testAccProvider.Meta().(*clc.Client)
|
||||
l, err := client.LB.Get(testAccDC, rs.Primary.ID)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if l.ID != rs.Primary.ID {
|
||||
return fmt.Errorf("LB not found")
|
||||
}
|
||||
*resp = *l
|
||||
return nil
|
||||
}
|
||||
}
|
||||
|
||||
const testAccCheckLBConfigBasic = `
|
||||
resource "clc_load_balancer" "acc_test_lb" {
|
||||
data_center = "WA1"
|
||||
name = "acc_test_lb"
|
||||
description = "load balancer test"
|
||||
status = "enabled"
|
||||
}`
|
||||
|
||||
const testAccCheckLBConfigNameDesc = `
|
||||
resource "clc_load_balancer" "acc_test_lb" {
|
||||
data_center = "WA1"
|
||||
name = "foobar"
|
||||
description = "foobar"
|
||||
status = "disabled"
|
||||
}`
|
|
@ -0,0 +1,193 @@
|
|||
package clc
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"log"
|
||||
"strconv"
|
||||
|
||||
clc "github.com/CenturyLinkCloud/clc-sdk"
|
||||
"github.com/CenturyLinkCloud/clc-sdk/server"
|
||||
|
||||
"github.com/hashicorp/terraform/helper/schema"
|
||||
)
|
||||
|
||||
func resourceCLCPublicIP() *schema.Resource {
|
||||
return &schema.Resource{
|
||||
Create: resourceCLCPublicIPCreate,
|
||||
Read: resourceCLCPublicIPRead,
|
||||
Update: resourceCLCPublicIPUpdate,
|
||||
Delete: resourceCLCPublicIPDelete,
|
||||
Schema: map[string]*schema.Schema{
|
||||
"server_id": &schema.Schema{
|
||||
Type: schema.TypeString,
|
||||
Required: true,
|
||||
ForceNew: true,
|
||||
},
|
||||
"internal_ip_address": &schema.Schema{
|
||||
Type: schema.TypeString,
|
||||
Optional: true,
|
||||
Computed: true,
|
||||
Default: nil,
|
||||
},
|
||||
"ports": &schema.Schema{
|
||||
Type: schema.TypeList,
|
||||
Required: true,
|
||||
Elem: &schema.Schema{Type: schema.TypeMap},
|
||||
},
|
||||
"source_restrictions": &schema.Schema{
|
||||
Type: schema.TypeList,
|
||||
Optional: true,
|
||||
Elem: &schema.Schema{Type: schema.TypeMap},
|
||||
},
|
||||
},
|
||||
}
|
||||
}
|
||||
|
||||
func resourceCLCPublicIPCreate(d *schema.ResourceData, meta interface{}) error {
|
||||
client := meta.(*clc.Client)
|
||||
sid := d.Get("server_id").(string)
|
||||
priv := d.Get("internal_ip_address").(string)
|
||||
ports, sources := parseIPSpec(d)
|
||||
req := server.PublicIP{
|
||||
Ports: *ports,
|
||||
SourceRestrictions: *sources,
|
||||
}
|
||||
|
||||
// since the API doesn't tell us the public IP it allocated,
|
||||
// track what was added after the call.
|
||||
ips := make(map[string]string)
|
||||
prev, err := client.Server.Get(sid)
|
||||
if err != nil {
|
||||
return fmt.Errorf("Failed finding server %v: %v", sid, err)
|
||||
}
|
||||
for _, i := range prev.Details.IPaddresses {
|
||||
ips[i.Internal] = i.Public
|
||||
}
|
||||
|
||||
if priv != "" {
|
||||
// use existing private ip
|
||||
if _, present := ips[priv]; !present {
|
||||
return fmt.Errorf("Failed finding internal ip to use %v", priv)
|
||||
}
|
||||
req.InternalIP = priv
|
||||
}
|
||||
// execute the request
|
||||
resp, err := client.Server.AddPublicIP(sid, req)
|
||||
if err != nil {
|
||||
return fmt.Errorf("Failed reserving public ip: %v", err)
|
||||
}
|
||||
err = waitStatus(client, resp.ID)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
server, err := client.Server.Get(sid)
|
||||
if err != nil {
|
||||
return fmt.Errorf("Failed refreshing server for public ip: %v", err)
|
||||
}
|
||||
for _, i := range server.Details.IPaddresses {
|
||||
if priv != "" && i.Internal == priv {
|
||||
// bind
|
||||
log.Printf("[DEBUG] Public IP bound on existing internal:%v - %v", i.Internal, i.Public)
|
||||
d.SetId(i.Public)
|
||||
break
|
||||
} else if ips[i.Internal] == "" && i.Public != "" {
|
||||
// allocate
|
||||
log.Printf("[DEBUG] Public IP allocated on new internal:%v - %v", i.Internal, i.Public)
|
||||
d.SetId(i.Public)
|
||||
break
|
||||
}
|
||||
}
|
||||
return resourceCLCPublicIPRead(d, meta)
|
||||
}
|
||||
|
||||
func resourceCLCPublicIPRead(d *schema.ResourceData, meta interface{}) error {
|
||||
client := meta.(*clc.Client)
|
||||
pip := d.Id()
|
||||
s := d.Get("server_id").(string)
|
||||
resp, err := client.Server.GetPublicIP(s, pip)
|
||||
if err != nil {
|
||||
log.Printf("[INFO] Failed finding public ip: %v. Marking destroyed", d.Id())
|
||||
d.SetId("")
|
||||
return nil
|
||||
}
|
||||
|
||||
d.Set("internal_ip_address", resp.InternalIP)
|
||||
d.Set("ports", resp.Ports)
|
||||
d.Set("source_restrictions", resp.SourceRestrictions)
|
||||
return nil
|
||||
}
|
||||
|
||||
func resourceCLCPublicIPUpdate(d *schema.ResourceData, meta interface{}) error {
|
||||
client := meta.(*clc.Client)
|
||||
ip := d.Id()
|
||||
sid := d.Get("server_id").(string)
|
||||
if d.HasChange("ports") || d.HasChange("source_restrictions") {
|
||||
ports, sources := parseIPSpec(d)
|
||||
req := server.PublicIP{
|
||||
Ports: *ports,
|
||||
SourceRestrictions: *sources,
|
||||
}
|
||||
resp, err := client.Server.UpdatePublicIP(sid, ip, req)
|
||||
if err != nil {
|
||||
return fmt.Errorf("Failed updating public ip: %v", err)
|
||||
}
|
||||
err = waitStatus(client, resp.ID)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
log.Printf("[INFO] Successfully updated %v with %v", ip, req)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func resourceCLCPublicIPDelete(d *schema.ResourceData, meta interface{}) error {
|
||||
client := meta.(*clc.Client)
|
||||
s := d.Get("server_id").(string)
|
||||
ip := d.Id()
|
||||
log.Printf("[INFO] Deleting public ip %v", ip)
|
||||
resp, err := client.Server.DeletePublicIP(s, ip)
|
||||
if err != nil {
|
||||
return fmt.Errorf("Failed deleting public ip: %v", err)
|
||||
}
|
||||
err = waitStatus(client, resp.ID)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
log.Printf("[INFO] Public IP sucessfully deleted: %v", ip)
|
||||
return nil
|
||||
}
|
||||
|
||||
func parseIPSpec(d *schema.ResourceData) (*[]server.Port, *[]server.SourceRestriction) {
|
||||
var ports []server.Port
|
||||
var sources []server.SourceRestriction
|
||||
if v := d.Get("ports"); v != nil {
|
||||
for _, v := range v.([]interface{}) {
|
||||
m := v.(map[string]interface{})
|
||||
p := server.Port{}
|
||||
port, err := strconv.Atoi(m["port"].(string))
|
||||
if err != nil {
|
||||
log.Printf("[WARN] Failed parsing port '%v'. skipping", m["port"])
|
||||
continue
|
||||
}
|
||||
p.Protocol = m["protocol"].(string)
|
||||
p.Port = port
|
||||
through := -1
|
||||
if to := m["port_to"]; to != nil {
|
||||
through, _ = strconv.Atoi(to.(string))
|
||||
log.Printf("[DEBUG] port range: %v-%v", port, through)
|
||||
p.PortTo = through
|
||||
}
|
||||
ports = append(ports, p)
|
||||
}
|
||||
}
|
||||
if v := d.Get("source_restrictions"); v != nil {
|
||||
for _, v := range v.([]interface{}) {
|
||||
m := v.(map[string]interface{})
|
||||
r := server.SourceRestriction{}
|
||||
r.CIDR = m["cidr"].(string)
|
||||
sources = append(sources, r)
|
||||
}
|
||||
}
|
||||
return &ports, &sources
|
||||
}
|
|
@ -0,0 +1,158 @@
|
|||
package clc
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"testing"
|
||||
|
||||
clc "github.com/CenturyLinkCloud/clc-sdk"
|
||||
"github.com/CenturyLinkCloud/clc-sdk/server"
|
||||
"github.com/hashicorp/terraform/helper/resource"
|
||||
"github.com/hashicorp/terraform/terraform"
|
||||
)
|
||||
|
||||
// things to test:
|
||||
// maps to internal specified ip
|
||||
// port range
|
||||
// update existing rule
|
||||
// CIDR restriction
|
||||
|
||||
func TestAccPublicIPBasic(t *testing.T) {
|
||||
var resp server.PublicIP
|
||||
resource.Test(t, resource.TestCase{
|
||||
PreCheck: func() { testAccPreCheck(t) },
|
||||
Providers: testAccProviders,
|
||||
CheckDestroy: testAccCheckPublicIPDestroy,
|
||||
Steps: []resource.TestStep{
|
||||
resource.TestStep{
|
||||
Config: testAccCheckPublicIPConfigBasic,
|
||||
Check: resource.ComposeTestCheckFunc(
|
||||
testAccCheckPublicIPExists("clc_public_ip.acc_test_public_ip", &resp),
|
||||
testAccCheckPublicIPNIC("clc_public_ip.acc_test_public_ip", &resp),
|
||||
testAccCheckPublicIPPortRange("clc_public_ip.acc_test_public_ip", &resp),
|
||||
testAccCheckPublicIPBlockCIDR("clc_public_ip.acc_test_public_ip", &resp),
|
||||
//testAccCheckPublicIPUpdated("clc_public_ip.eip", &resp),
|
||||
),
|
||||
},
|
||||
},
|
||||
})
|
||||
}
|
||||
|
||||
func testAccCheckPublicIPDestroy(s *terraform.State) error {
|
||||
client := testAccProvider.Meta().(*clc.Client)
|
||||
for _, rs := range s.RootModule().Resources {
|
||||
if rs.Type != "clc_public_ip" {
|
||||
continue
|
||||
}
|
||||
sid := rs.Primary.Attributes["server_id"]
|
||||
_, err := client.Server.GetPublicIP(sid, rs.Primary.ID)
|
||||
if err == nil {
|
||||
return fmt.Errorf("IP still exists")
|
||||
}
|
||||
}
|
||||
return nil
|
||||
|
||||
}
|
||||
|
||||
func testAccCheckPublicIPExists(n string, resp *server.PublicIP) resource.TestCheckFunc {
|
||||
return func(s *terraform.State) error {
|
||||
rs, ok := s.RootModule().Resources[n]
|
||||
if !ok {
|
||||
return fmt.Errorf("Not found: %s", n)
|
||||
}
|
||||
if rs.Primary.ID == "" {
|
||||
return fmt.Errorf("No PublicIP ID is set")
|
||||
}
|
||||
client := testAccProvider.Meta().(*clc.Client)
|
||||
sid := rs.Primary.Attributes["server_id"]
|
||||
p, err := client.Server.GetPublicIP(sid, rs.Primary.ID)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
*resp = *p
|
||||
return nil
|
||||
}
|
||||
}
|
||||
|
||||
func testAccCheckPublicIPPortRange(n string, resp *server.PublicIP) resource.TestCheckFunc {
|
||||
return func(s *terraform.State) error {
|
||||
// check the passed port range made it through
|
||||
var spec server.Port
|
||||
for _, p := range resp.Ports {
|
||||
if p.Protocol == "UDP" {
|
||||
spec = p
|
||||
break
|
||||
}
|
||||
}
|
||||
if spec.Port != 53 || spec.PortTo != 55 {
|
||||
return fmt.Errorf("Expected udp ports from 53-55 but found: %v", spec)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
}
|
||||
func testAccCheckPublicIPBlockCIDR(n string, resp *server.PublicIP) resource.TestCheckFunc {
|
||||
return func(s *terraform.State) error {
|
||||
// check the passed port range made it through
|
||||
spec := resp.SourceRestrictions[0]
|
||||
if spec.CIDR != "108.19.67.15/32" {
|
||||
return fmt.Errorf("Expected cidr restriction but found: %v", spec)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
}
|
||||
|
||||
func testAccCheckPublicIPNIC(n string, resp *server.PublicIP) resource.TestCheckFunc {
|
||||
return func(s *terraform.State) error {
|
||||
rs, ok := s.RootModule().Resources[n]
|
||||
if !ok {
|
||||
return fmt.Errorf("Not found: %s", n)
|
||||
}
|
||||
sid := rs.Primary.Attributes["server_id"]
|
||||
nic := rs.Primary.Attributes["internal_ip_address"]
|
||||
|
||||
client := testAccProvider.Meta().(*clc.Client)
|
||||
srv, err := client.Server.Get(sid)
|
||||
if err != nil {
|
||||
return fmt.Errorf("Failed fetching server? %v", err)
|
||||
}
|
||||
first := srv.Details.IPaddresses[0].Internal
|
||||
if nic != first {
|
||||
return fmt.Errorf("Expected public ip to be mapped to %s but found: %s", first, nic)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
}
|
||||
|
||||
var testAccCheckPublicIPConfigBasic = `
|
||||
resource "clc_group" "acc_test_group_ip" {
|
||||
location_id = "CA1"
|
||||
name = "acc_test_group_ip"
|
||||
parent = "Default Group"
|
||||
}
|
||||
|
||||
resource "clc_server" "acc_test_server" {
|
||||
name_template = "test"
|
||||
source_server_id = "UBUNTU-14-64-TEMPLATE"
|
||||
group_id = "${clc_group.acc_test_group_ip.id}"
|
||||
cpu = 1
|
||||
memory_mb = 1024
|
||||
password = "Green123$"
|
||||
}
|
||||
|
||||
resource "clc_public_ip" "acc_test_public_ip" {
|
||||
server_id = "${clc_server.acc_test_server.id}"
|
||||
internal_ip_address = "${clc_server.acc_test_server.private_ip_address}"
|
||||
source_restrictions
|
||||
{ cidr = "108.19.67.15/32" }
|
||||
ports
|
||||
{
|
||||
protocol = "TCP"
|
||||
port = 80
|
||||
}
|
||||
ports
|
||||
{
|
||||
protocol = "UDP"
|
||||
port = 53
|
||||
port_to = 55
|
||||
}
|
||||
}
|
||||
`
|
|
@ -0,0 +1,327 @@
|
|||
package clc
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"log"
|
||||
"strings"
|
||||
|
||||
clc "github.com/CenturyLinkCloud/clc-sdk"
|
||||
"github.com/CenturyLinkCloud/clc-sdk/api"
|
||||
"github.com/CenturyLinkCloud/clc-sdk/server"
|
||||
"github.com/CenturyLinkCloud/clc-sdk/status"
|
||||
|
||||
"github.com/hashicorp/terraform/helper/schema"
|
||||
)
|
||||
|
||||
func resourceCLCServer() *schema.Resource {
|
||||
return &schema.Resource{
|
||||
Create: resourceCLCServerCreate,
|
||||
Read: resourceCLCServerRead,
|
||||
Update: resourceCLCServerUpdate,
|
||||
Delete: resourceCLCServerDelete,
|
||||
Schema: map[string]*schema.Schema{
|
||||
"name_template": &schema.Schema{
|
||||
Type: schema.TypeString,
|
||||
Required: true,
|
||||
},
|
||||
"name": &schema.Schema{
|
||||
Type: schema.TypeString,
|
||||
Computed: true,
|
||||
},
|
||||
"group_id": &schema.Schema{
|
||||
Type: schema.TypeString,
|
||||
Required: true,
|
||||
},
|
||||
"source_server_id": &schema.Schema{
|
||||
Type: schema.TypeString,
|
||||
Required: true,
|
||||
},
|
||||
"cpu": &schema.Schema{
|
||||
Type: schema.TypeInt,
|
||||
Required: true,
|
||||
},
|
||||
"memory_mb": &schema.Schema{
|
||||
Type: schema.TypeInt,
|
||||
Required: true,
|
||||
},
|
||||
"password": &schema.Schema{
|
||||
Type: schema.TypeString,
|
||||
Required: true,
|
||||
},
|
||||
// optional
|
||||
"description": &schema.Schema{
|
||||
Type: schema.TypeString,
|
||||
Optional: true,
|
||||
Default: "",
|
||||
},
|
||||
"type": &schema.Schema{
|
||||
Type: schema.TypeString,
|
||||
Optional: true,
|
||||
Default: "standard",
|
||||
},
|
||||
"network_id": &schema.Schema{
|
||||
Type: schema.TypeString,
|
||||
Optional: true,
|
||||
},
|
||||
"custom_fields": &schema.Schema{
|
||||
Type: schema.TypeList,
|
||||
Optional: true,
|
||||
Elem: &schema.Schema{Type: schema.TypeMap},
|
||||
},
|
||||
"additional_disks": &schema.Schema{
|
||||
Type: schema.TypeList,
|
||||
Optional: true,
|
||||
Elem: &schema.Schema{Type: schema.TypeMap},
|
||||
},
|
||||
|
||||
// optional: misc state storage. non-CLC field
|
||||
"metadata": &schema.Schema{
|
||||
Type: schema.TypeMap,
|
||||
Optional: true,
|
||||
},
|
||||
|
||||
// optional
|
||||
"storage_type": &schema.Schema{
|
||||
Type: schema.TypeString,
|
||||
Optional: true,
|
||||
Default: "standard",
|
||||
},
|
||||
|
||||
// sorta computed
|
||||
"private_ip_address": &schema.Schema{
|
||||
Type: schema.TypeString,
|
||||
Optional: true,
|
||||
Computed: true,
|
||||
Default: nil,
|
||||
},
|
||||
"power_state": &schema.Schema{
|
||||
Type: schema.TypeString,
|
||||
Optional: true,
|
||||
Computed: true,
|
||||
Default: nil,
|
||||
},
|
||||
|
||||
// computed
|
||||
"created_date": &schema.Schema{
|
||||
Type: schema.TypeString,
|
||||
Computed: true,
|
||||
},
|
||||
"modified_date": &schema.Schema{
|
||||
Type: schema.TypeString,
|
||||
Computed: true,
|
||||
},
|
||||
"public_ip_address": &schema.Schema{
|
||||
// RO: if a public_ip is on this server, populate it
|
||||
Type: schema.TypeString,
|
||||
Computed: true,
|
||||
},
|
||||
},
|
||||
}
|
||||
}
|
||||
|
||||
func resourceCLCServerCreate(d *schema.ResourceData, meta interface{}) error {
|
||||
client := meta.(*clc.Client)
|
||||
spec := server.Server{
|
||||
Name: d.Get("name_template").(string),
|
||||
Password: d.Get("password").(string),
|
||||
Description: d.Get("description").(string),
|
||||
GroupID: d.Get("group_id").(string),
|
||||
CPU: d.Get("cpu").(int),
|
||||
MemoryGB: d.Get("memory_mb").(int) / 1024,
|
||||
SourceServerID: d.Get("source_server_id").(string),
|
||||
Type: d.Get("type").(string),
|
||||
IPaddress: d.Get("private_ip_address").(string),
|
||||
NetworkID: d.Get("network_id").(string),
|
||||
Storagetype: d.Get("storage_type").(string),
|
||||
}
|
||||
|
||||
var err error
|
||||
disks, err := parseAdditionalDisks(d)
|
||||
if err != nil {
|
||||
return fmt.Errorf("Failed parsing disks: %v", err)
|
||||
}
|
||||
spec.Additionaldisks = disks
|
||||
fields, err := parseCustomFields(d)
|
||||
if err != nil {
|
||||
return fmt.Errorf("Failed setting customfields: %v", err)
|
||||
}
|
||||
spec.Customfields = fields
|
||||
|
||||
resp, err := client.Server.Create(spec)
|
||||
if err != nil || !resp.IsQueued {
|
||||
return fmt.Errorf("Failed creating server: %v", err)
|
||||
}
|
||||
// server's UUID returned under rel=self link
|
||||
_, uuid := resp.Links.GetID("self")
|
||||
|
||||
ok, st := resp.GetStatusID()
|
||||
if !ok {
|
||||
return fmt.Errorf("Failed extracting status to poll on %v: %v", resp, err)
|
||||
}
|
||||
err = waitStatus(client, st)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
s, err := client.Server.Get(uuid)
|
||||
d.SetId(strings.ToUpper(s.Name))
|
||||
log.Printf("[INFO] Server created. id: %v", s.Name)
|
||||
return resourceCLCServerRead(d, meta)
|
||||
}
|
||||
|
||||
func resourceCLCServerRead(d *schema.ResourceData, meta interface{}) error {
|
||||
client := meta.(*clc.Client)
|
||||
s, err := client.Server.Get(d.Id())
|
||||
if err != nil {
|
||||
log.Printf("[INFO] Failed finding server: %v. Marking destroyed", d.Id())
|
||||
d.SetId("")
|
||||
return nil
|
||||
}
|
||||
if len(s.Details.IPaddresses) > 0 {
|
||||
d.Set("private_ip_address", s.Details.IPaddresses[0].Internal)
|
||||
if "" != s.Details.IPaddresses[0].Public {
|
||||
d.Set("public_ip_address", s.Details.IPaddresses[0].Public)
|
||||
}
|
||||
}
|
||||
|
||||
d.Set("name", s.Name)
|
||||
d.Set("groupId", s.GroupID)
|
||||
d.Set("status", s.Status)
|
||||
d.Set("power_state", s.Details.Powerstate)
|
||||
d.Set("cpu", s.Details.CPU)
|
||||
d.Set("memory_mb", s.Details.MemoryMB)
|
||||
d.Set("disk_gb", s.Details.Storagegb)
|
||||
d.Set("status", s.Status)
|
||||
d.Set("storage_type", s.Storagetype)
|
||||
d.Set("created_date", s.ChangeInfo.CreatedDate)
|
||||
d.Set("modified_date", s.ChangeInfo.ModifiedDate)
|
||||
return nil
|
||||
}
|
||||
|
||||
func resourceCLCServerUpdate(d *schema.ResourceData, meta interface{}) error {
|
||||
client := meta.(*clc.Client)
|
||||
id := d.Id()
|
||||
|
||||
var err error
|
||||
var edits []api.Update
|
||||
var updates []api.Update
|
||||
var i int
|
||||
|
||||
poll := make(chan *status.Response, 1)
|
||||
d.Partial(true)
|
||||
s, err := client.Server.Get(id)
|
||||
if err != nil {
|
||||
return fmt.Errorf("Failed fetching server: %v - %v", d.Id(), err)
|
||||
}
|
||||
// edits happen synchronously
|
||||
if delta, orig := d.Get("description").(string), s.Description; delta != orig {
|
||||
d.SetPartial("description")
|
||||
edits = append(edits, server.UpdateDescription(delta))
|
||||
}
|
||||
if delta, orig := d.Get("group_id").(string), s.GroupID; delta != orig {
|
||||
d.SetPartial("group_id")
|
||||
edits = append(edits, server.UpdateGroup(delta))
|
||||
}
|
||||
if len(edits) > 0 {
|
||||
err = client.Server.Edit(id, edits...)
|
||||
if err != nil {
|
||||
return fmt.Errorf("Failed saving edits: %v", err)
|
||||
}
|
||||
}
|
||||
// updates are queue processed
|
||||
if d.HasChange("password") {
|
||||
d.SetPartial("password")
|
||||
o, _ := d.GetChange("password")
|
||||
old := o.(string)
|
||||
pass := d.Get("password").(string)
|
||||
updates = append(updates, server.UpdateCredentials(old, pass))
|
||||
}
|
||||
if i = d.Get("cpu").(int); i != s.Details.CPU {
|
||||
d.SetPartial("cpu")
|
||||
updates = append(updates, server.UpdateCPU(i))
|
||||
}
|
||||
if i = d.Get("memory_mb").(int); i != s.Details.MemoryMB {
|
||||
d.SetPartial("memory_mb")
|
||||
updates = append(updates, server.UpdateMemory(i/1024)) // takes GB
|
||||
}
|
||||
|
||||
if d.HasChange("custom_fields") {
|
||||
d.SetPartial("custom_fields")
|
||||
fields, err := parseCustomFields(d)
|
||||
if err != nil {
|
||||
return fmt.Errorf("Failed setting customfields: %v", err)
|
||||
}
|
||||
updates = append(updates, server.UpdateCustomfields(fields))
|
||||
}
|
||||
if d.HasChange("additional_disks") {
|
||||
d.SetPartial("additional_disks")
|
||||
disks, err := parseAdditionalDisks(d)
|
||||
if err != nil {
|
||||
return fmt.Errorf("Failed parsing disks: %v", err)
|
||||
}
|
||||
updates = append(updates, server.UpdateAdditionaldisks(disks))
|
||||
}
|
||||
|
||||
if len(updates) > 0 {
|
||||
resp, err := client.Server.Update(id, updates...)
|
||||
if err != nil {
|
||||
return fmt.Errorf("Failed saving updates: %v", err)
|
||||
}
|
||||
|
||||
err = client.Status.Poll(resp.ID, poll)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
status := <-poll
|
||||
if status.Failed() {
|
||||
return fmt.Errorf("Update failed")
|
||||
}
|
||||
log.Printf("[INFO] Server updated! status: %v", status.Status)
|
||||
}
|
||||
|
||||
if d.HasChange("power_state") {
|
||||
st := d.Get("power_state").(string)
|
||||
log.Printf("[DEBUG] POWER: %v => %v", s.Details.Powerstate, st)
|
||||
newst := stateFromString(st)
|
||||
servers, err := client.Server.PowerState(newst, s.Name)
|
||||
if err != nil {
|
||||
return fmt.Errorf("Failed setting power state to: %v", newst)
|
||||
}
|
||||
ok, id := servers[0].GetStatusID()
|
||||
if !ok {
|
||||
return fmt.Errorf("Failed extracting power state queue status from: %v", servers[0])
|
||||
}
|
||||
err = client.Status.Poll(id, poll)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
status := <-poll
|
||||
if status.Failed() {
|
||||
return fmt.Errorf("Update failed")
|
||||
}
|
||||
log.Printf("[INFO] state updated: %v", status)
|
||||
}
|
||||
|
||||
d.Partial(false)
|
||||
return nil
|
||||
}
|
||||
|
||||
func resourceCLCServerDelete(d *schema.ResourceData, meta interface{}) error {
|
||||
client := meta.(*clc.Client)
|
||||
id := d.Id()
|
||||
resp, err := client.Server.Delete(id)
|
||||
if err != nil || !resp.IsQueued {
|
||||
return fmt.Errorf("Failed queueing delete of %v - %v", id, err)
|
||||
}
|
||||
|
||||
ok, st := resp.GetStatusID()
|
||||
if !ok {
|
||||
return fmt.Errorf("Failed extracting status to poll on %v: %v", resp, err)
|
||||
}
|
||||
err = waitStatus(client, st)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
log.Printf("[INFO] Server sucessfully deleted: %v", st)
|
||||
return nil
|
||||
}
|
|
@ -0,0 +1,256 @@
|
|||
package clc
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"strings"
|
||||
"testing"
|
||||
|
||||
clc "github.com/CenturyLinkCloud/clc-sdk"
|
||||
"github.com/CenturyLinkCloud/clc-sdk/server"
|
||||
"github.com/hashicorp/terraform/helper/resource"
|
||||
"github.com/hashicorp/terraform/terraform"
|
||||
)
|
||||
|
||||
// things to test:
|
||||
// basic crud
|
||||
// modify specs
|
||||
// power operations
|
||||
// add'l disks
|
||||
// custom fields? (skip)
|
||||
|
||||
func TestAccServerBasic(t *testing.T) {
|
||||
var resp server.Response
|
||||
resource.Test(t, resource.TestCase{
|
||||
PreCheck: func() { testAccPreCheck(t) },
|
||||
Providers: testAccProviders,
|
||||
CheckDestroy: testAccCheckServerDestroy,
|
||||
Steps: []resource.TestStep{
|
||||
resource.TestStep{
|
||||
Config: testAccCheckServerConfigBasic,
|
||||
Check: resource.ComposeTestCheckFunc(
|
||||
testAccCheckServerExists("clc_server.acc_test_server", &resp),
|
||||
resource.TestCheckResourceAttr(
|
||||
"clc_server.acc_test_server", "name_template", "test"),
|
||||
resource.TestCheckResourceAttr(
|
||||
"clc_server.acc_test_server", "cpu", "1"),
|
||||
resource.TestCheckResourceAttr(
|
||||
"clc_server.acc_test_server", "memory_mb", "1024"),
|
||||
),
|
||||
},
|
||||
// update simple attrs
|
||||
resource.TestStep{
|
||||
Config: testAccCheckServerConfigCPUMEM,
|
||||
Check: resource.ComposeTestCheckFunc(
|
||||
testAccCheckServerExists("clc_server.acc_test_server", &resp),
|
||||
resource.TestCheckResourceAttr(
|
||||
"clc_server.acc_test_server", "cpu", "2"),
|
||||
resource.TestCheckResourceAttr(
|
||||
"clc_server.acc_test_server", "memory_mb", "2048"),
|
||||
testAccCheckServerUpdatedSpec("clc_server.acc_test_server", &resp),
|
||||
),
|
||||
},
|
||||
// toggle power
|
||||
resource.TestStep{
|
||||
Config: testAccCheckServerConfigPower,
|
||||
Check: resource.ComposeTestCheckFunc(
|
||||
testAccCheckServerExists("clc_server.acc_test_server", &resp),
|
||||
resource.TestCheckResourceAttr(
|
||||
"clc_server.acc_test_server", "power_state", "stopped"),
|
||||
),
|
||||
},
|
||||
/* // currently broken since disk updates require diskId
|
||||
// add disks
|
||||
resource.TestStep{
|
||||
Config: testAccCheckServerConfig_disks,
|
||||
Check: resource.ComposeTestCheckFunc(
|
||||
testAccCheckServerExists("clc_server.acc_test_server", &resp),
|
||||
// power still off
|
||||
resource.TestCheckResourceAttr(
|
||||
"clc_server.acc_test_server", "power_state", "stopped"),
|
||||
testAccCheckServerUpdatedDisks("clc_server.acc_test_server", &resp),
|
||||
),
|
||||
},
|
||||
*/
|
||||
/* // broken since network id is a (account-specific) guid
|
||||
// set network id
|
||||
resource.TestStep{
|
||||
Config: testAccCheckServerConfigNetwork,
|
||||
Check: resource.ComposeTestCheckFunc(
|
||||
testAccCheckServerExists("clc_server.acc_test_server", &resp),
|
||||
resource.TestCheckResourceAttr(
|
||||
"clc_server.acc_test_server", "network_id", "15a0f669c332435ebf375e010ac79fbb"),
|
||||
testAccCheckServerUpdatedSpec("clc_server.acc_test_server", &resp),
|
||||
),
|
||||
},
|
||||
*/
|
||||
},
|
||||
})
|
||||
}
|
||||
|
||||
func testAccCheckServerDestroy(s *terraform.State) error {
|
||||
client := testAccProvider.Meta().(*clc.Client)
|
||||
|
||||
for _, rs := range s.RootModule().Resources {
|
||||
if rs.Type != "clc_server" {
|
||||
continue
|
||||
}
|
||||
|
||||
_, err := client.Server.Get(rs.Primary.ID)
|
||||
|
||||
if err == nil {
|
||||
return fmt.Errorf("Server still exists")
|
||||
}
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func testAccCheckServerExists(n string, resp *server.Response) resource.TestCheckFunc {
|
||||
return func(s *terraform.State) error {
|
||||
rs, ok := s.RootModule().Resources[n]
|
||||
if !ok {
|
||||
return fmt.Errorf("Not found: %s", n)
|
||||
}
|
||||
if rs.Primary.ID == "" {
|
||||
return fmt.Errorf("No server ID is set")
|
||||
}
|
||||
|
||||
client := testAccProvider.Meta().(*clc.Client)
|
||||
srv, err := client.Server.Get(rs.Primary.ID)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
if strings.ToUpper(srv.ID) != rs.Primary.ID {
|
||||
return fmt.Errorf("Server not found")
|
||||
}
|
||||
*resp = *srv
|
||||
return nil
|
||||
}
|
||||
}
|
||||
|
||||
func testAccCheckServerUpdatedSpec(n string, resp *server.Response) resource.TestCheckFunc {
|
||||
return func(s *terraform.State) error {
|
||||
rs, ok := s.RootModule().Resources[n]
|
||||
if !ok {
|
||||
return fmt.Errorf("Not found: %s", n)
|
||||
}
|
||||
client := testAccProvider.Meta().(*clc.Client)
|
||||
srv, err := client.Server.Get(rs.Primary.ID)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
cpu := srv.Details.CPU
|
||||
mem := srv.Details.MemoryMB
|
||||
scpu := fmt.Sprintf("%v", cpu)
|
||||
smem := fmt.Sprintf("%v", mem)
|
||||
excpu := rs.Primary.Attributes["cpu"]
|
||||
exmem := rs.Primary.Attributes["memory_mb"]
|
||||
if scpu != excpu {
|
||||
return fmt.Errorf("Expected CPU to be %v but found %v", excpu, scpu)
|
||||
}
|
||||
if smem != exmem {
|
||||
return fmt.Errorf("Expected MEM to be %v but found %v", exmem, smem)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
}
|
||||
|
||||
func testAccCheckServerUpdatedDisks(n string, resp *server.Response) resource.TestCheckFunc {
|
||||
return func(s *terraform.State) error {
|
||||
rs, ok := s.RootModule().Resources[n]
|
||||
if !ok {
|
||||
return fmt.Errorf("Not found: %s", n)
|
||||
}
|
||||
client := testAccProvider.Meta().(*clc.Client)
|
||||
srv, err := client.Server.Get(rs.Primary.ID)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
if len(srv.Details.Disks) <= 3 {
|
||||
return fmt.Errorf("Expected total of > 3 drives. found: %v", len(srv.Details.Disks))
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
}
|
||||
|
||||
const testAccCheckServerConfigBasic = `
|
||||
resource "clc_group" "acc_test_group_server" {
|
||||
location_id = "WA1"
|
||||
name = "acc_test_group_server"
|
||||
parent = "Default Group"
|
||||
}
|
||||
|
||||
resource "clc_server" "acc_test_server" {
|
||||
name_template = "test"
|
||||
source_server_id = "UBUNTU-14-64-TEMPLATE"
|
||||
group_id = "${clc_group.acc_test_group_server.id}"
|
||||
cpu = 1
|
||||
memory_mb = 1024
|
||||
password = "Green123$"
|
||||
}
|
||||
`
|
||||
|
||||
const testAccCheckServerConfigCPUMEM = `
|
||||
resource "clc_group" "acc_test_group_server" {
|
||||
location_id = "WA1"
|
||||
name = "acc_test_group_server"
|
||||
parent = "Default Group"
|
||||
}
|
||||
|
||||
resource "clc_server" "acc_test_server" {
|
||||
name_template = "test"
|
||||
source_server_id = "UBUNTU-14-64-TEMPLATE"
|
||||
group_id = "${clc_group.acc_test_group_server.id}"
|
||||
cpu = 2
|
||||
memory_mb = 2048
|
||||
password = "Green123$"
|
||||
power_state = "started"
|
||||
}
|
||||
`
|
||||
|
||||
const testAccCheckServerConfigPower = `
|
||||
resource "clc_group" "acc_test_group_server" {
|
||||
location_id = "WA1"
|
||||
name = "acc_test_group_server"
|
||||
parent = "Default Group"
|
||||
}
|
||||
|
||||
resource "clc_server" "acc_test_server" {
|
||||
name_template = "test"
|
||||
source_server_id = "UBUNTU-14-64-TEMPLATE"
|
||||
group_id = "${clc_group.acc_test_group_server.id}"
|
||||
cpu = 2
|
||||
memory_mb = 2048
|
||||
password = "Green123$"
|
||||
power_state = "stopped"
|
||||
}
|
||||
`
|
||||
|
||||
const testAccCheckServerConfigDisks = `
|
||||
resource "clc_group" "acc_test_group_server" {
|
||||
location_id = "WA1"
|
||||
name = "acc_test_group_server"
|
||||
parent = "Default Group"
|
||||
}
|
||||
|
||||
resource "clc_server" "acc_test_server" {
|
||||
name_template = "test"
|
||||
source_server_id = "UBUNTU-14-64-TEMPLATE"
|
||||
group_id = "${clc_group.acc_test_group_server.id}"
|
||||
cpu = 2
|
||||
memory_mb = 2048
|
||||
password = "Green123$"
|
||||
power_state = "stopped"
|
||||
# network_id = "15a0f669c332435ebf375e010ac79fbb"
|
||||
additional_disks
|
||||
{
|
||||
path = "/data1"
|
||||
size_gb = 100
|
||||
type = "partitioned"
|
||||
}
|
||||
|
||||
}
|
||||
`
|
|
@ -59,6 +59,10 @@
|
|||
"ImportPath": "github.com/DreamItGetIT/statuscake",
|
||||
"Rev": "8cbe86575f00210a6df2c19cb2f59b00cd181de3"
|
||||
},
|
||||
{
|
||||
"ImportPath": "github.com/CenturyLinkCloud/clc-sdk",
|
||||
"Rev": "74abd779894192c559ad29f0183a1716370490ad"
|
||||
},
|
||||
{
|
||||
"ImportPath": "github.com/apparentlymart/go-cidr/cidr",
|
||||
"Rev": "a3ebdb999b831ecb6ab8a226e31b07b2b9061c47"
|
||||
|
|
|
@ -0,0 +1,9 @@
|
|||
## Creating a standard web application
|
||||
|
||||
This example provides sample configuration for creating the infra for a basic webapp.
|
||||
|
||||
For CLC provider, set up your CLC environment as outlined in https://www.terraform.io/docs/providers/clc/index.html
|
||||
|
||||
Once ready run 'terraform plan' to review.
|
||||
|
||||
Once satisfied with plan, run 'terraform apply'
|
|
@ -0,0 +1,96 @@
|
|||
# --------------------
|
||||
# Credentials
|
||||
provider "clc" {
|
||||
username = "${var.clc_username}"
|
||||
password = "${var.clc_password}"
|
||||
account = "${var.clc_account}"
|
||||
}
|
||||
|
||||
# --------------------
|
||||
# Provision/Resolve a server group
|
||||
resource "clc_group" "frontends" {
|
||||
location_id = "CA1"
|
||||
name = "frontends"
|
||||
parent = "Default Group"
|
||||
}
|
||||
|
||||
# --------------------
|
||||
# Provision a server
|
||||
resource "clc_server" "node" {
|
||||
name_template = "trusty"
|
||||
source_server_id = "UBUNTU-14-64-TEMPLATE"
|
||||
group_id = "${clc_group.frontends.id}"
|
||||
cpu = 2
|
||||
memory_mb = 2048
|
||||
password = "Green123$"
|
||||
additional_disks
|
||||
{
|
||||
path = "/var"
|
||||
size_gb = 100
|
||||
type = "partitioned"
|
||||
}
|
||||
additional_disks
|
||||
{
|
||||
size_gb = 10
|
||||
type = "raw"
|
||||
}
|
||||
}
|
||||
|
||||
# --------------------
|
||||
# Provision a public ip
|
||||
resource "clc_public_ip" "backdoor" {
|
||||
server_id = "${clc_server.node.0.id}"
|
||||
internal_ip_address = "${clc_server.node.0.private_ip_address}"
|
||||
ports
|
||||
{
|
||||
protocol = "ICMP"
|
||||
port = -1
|
||||
}
|
||||
ports
|
||||
{
|
||||
protocol = "TCP"
|
||||
port = 22
|
||||
}
|
||||
source_restrictions
|
||||
{ cidr = "173.60.0.0/16" }
|
||||
|
||||
|
||||
# ssh in and start a simple http server on :8080
|
||||
provisioner "remote-exec" {
|
||||
inline = [
|
||||
"cd /tmp; python -mSimpleHTTPServer > /dev/null 2>&1 &"
|
||||
]
|
||||
connection {
|
||||
host = "${clc_public_ip.backdoor.id}"
|
||||
user = "root"
|
||||
password = "${clc_server.node.password}"
|
||||
}
|
||||
}
|
||||
|
||||
}
|
||||
|
||||
|
||||
# --------------------
|
||||
# Provision a load balancer
|
||||
resource "clc_load_balancer" "frontdoor" {
|
||||
data_center = "${clc_group.frontends.location_id}"
|
||||
name = "frontdoor"
|
||||
description = "frontdoor"
|
||||
status = "enabled"
|
||||
}
|
||||
|
||||
# --------------------
|
||||
# Provision a load balancer pool
|
||||
resource "clc_load_balancer_pool" "pool" {
|
||||
data_center = "${clc_group.frontends.location_id}"
|
||||
load_balancer = "${clc_load_balancer.frontdoor.id}"
|
||||
method = "roundRobin"
|
||||
persistence = "standard"
|
||||
port = 80
|
||||
nodes
|
||||
{
|
||||
status = "enabled"
|
||||
ipAddress = "${clc_server.node.private_ip_address}"
|
||||
privatePort = 8000
|
||||
}
|
||||
}
|
|
@ -0,0 +1,27 @@
|
|||
output "group_id" {
|
||||
value = "${clc_group.frontends.id}"
|
||||
}
|
||||
|
||||
output "node_id" {
|
||||
value = "${clc_server.node.id}"
|
||||
}
|
||||
|
||||
output "node_ip" {
|
||||
value = "${clc_server.node.private_ip_address}"
|
||||
}
|
||||
|
||||
output "node_password" {
|
||||
value = "${clc_server.node.password}"
|
||||
}
|
||||
|
||||
output "backdoor" {
|
||||
value = "${clc_public_ip.backdoor.id}"
|
||||
}
|
||||
|
||||
output "frontdoor" {
|
||||
value = "${clc_load_balancer.frontdoor.ip_address}"
|
||||
}
|
||||
|
||||
output "pool" {
|
||||
value = "curl -vv ${clc_load_balancer.frontdoor.ip_address}"
|
||||
}
|
|
@ -0,0 +1,19 @@
|
|||
variable "clc_username" {
|
||||
default = "<username>"
|
||||
}
|
||||
variable "clc_password" {
|
||||
default = "<password>"
|
||||
}
|
||||
|
||||
variable "clc_account" {
|
||||
default = "<alias>"
|
||||
}
|
||||
|
||||
# Ubuntu 14.04
|
||||
variable "image" {
|
||||
default = "ubuntu-14-64-template"
|
||||
}
|
||||
|
||||
variable "app_port" {
|
||||
default = 8080
|
||||
}
|
|
@ -11,6 +11,7 @@ body.layout-aws,
|
|||
body.layout-azure,
|
||||
body.layout-chef,
|
||||
body.layout-azurerm,
|
||||
body.layout-clc,
|
||||
body.layout-cloudflare,
|
||||
body.layout-cloudstack,
|
||||
body.layout-consul,
|
||||
|
|
|
@ -0,0 +1,59 @@
|
|||
---
|
||||
layout: "clc"
|
||||
page_title: "Provider: CenturyLinkCloud"
|
||||
sidebar_current: "docs-clc-index"
|
||||
description: |-
|
||||
The CenturyLinkCloud provider is used to interact with the many resources
|
||||
supported by CLC. The provider needs to be configured with account
|
||||
credentials before it can be used.
|
||||
---
|
||||
|
||||
# CLC Provider
|
||||
|
||||
The clc provider is used to interact with the many resources supported
|
||||
by CenturyLinkCloud. The provider needs to be configured with account
|
||||
credentials before it can be used.
|
||||
|
||||
Use the navigation to the left to read about the available resources.
|
||||
|
||||
For additional documentation, see the [CLC Developer Center](https://www.ctl.io/developers/)
|
||||
|
||||
## Example Usage
|
||||
|
||||
```
|
||||
# Configure the CLC Provider
|
||||
provider "clc" {
|
||||
username = "${var.clc_username}"
|
||||
password = "${var.clc_password}"
|
||||
account = "${var.clc_account}"
|
||||
}
|
||||
|
||||
# Create a server
|
||||
resource "clc_server" "node" {
|
||||
...
|
||||
}
|
||||
```
|
||||
|
||||
|
||||
## Account Bootstrap
|
||||
|
||||
Trial accounts are available by signing up on the control portal [https://control.ctl.io](https://control.ctl.io).
|
||||
|
||||
For new accounts, you should initially run these steps manually:
|
||||
|
||||
- [Create a network.](https://control.ctl.io/Network/network)
|
||||
- [Provision a server.](https://control.ctl.io/create)
|
||||
|
||||
|
||||
## Argument Reference
|
||||
|
||||
The following arguments are supported:
|
||||
|
||||
* `clc_username` - (Required) This is the CLC account username. It must be provided, but
|
||||
it can also be sourced from the `CLC_USERNAME` environment variable.
|
||||
|
||||
* `clc_password` - (Required) This is the CLC account password. It must be provided, but
|
||||
it can also be sourced from the `CLC_PASSWORD` environment variable.
|
||||
|
||||
* `clc_account` - (Required) This is the CLC account alias. It must be provided, but
|
||||
it can also be sourced from the `CLC_ACCOUNT` environment variable.
|
|
@ -0,0 +1,55 @@
|
|||
---
|
||||
layout: "clc"
|
||||
page_title: "clc: clc_group"
|
||||
sidebar_current: "docs-clc-resource-group"
|
||||
description: |-
|
||||
Manages a CLC server group.
|
||||
---
|
||||
|
||||
# clc\_group
|
||||
|
||||
Manages a CLC server group. Either provisions or resolves to an existing group.
|
||||
|
||||
See also [Complete API documentation](https://www.ctl.io/api-docs/v2/#groups).
|
||||
|
||||
## Example Usage
|
||||
|
||||
```
|
||||
# Provision/Resolve a server group
|
||||
resource "clc_group" "frontends" {
|
||||
location_id = "WA1"
|
||||
name = "frontends"
|
||||
parent = "Default Group"
|
||||
}
|
||||
|
||||
output "group_id" {
|
||||
value = "clc_group.frontends.id"
|
||||
}
|
||||
|
||||
```
|
||||
|
||||
|
||||
## Argument Reference
|
||||
|
||||
|
||||
The following arguments are supported:
|
||||
|
||||
* `name` - (Required, string) The name (or GUID) of this server group. Will resolve to existing if present.
|
||||
* `parent` - (Required, string) The name or ID of the parent group. Will error if absent or unable to resolve.
|
||||
* `location_id` - (Required, string) The datacenter location of both parent group and this group.
|
||||
Examples: "WA1", "VA1"
|
||||
* `description` - (Optional, string) Description for server group (visible in control portal only)
|
||||
* `custom_fields` - (Optional) See [CustomFields](#custom_fields) below for details.
|
||||
|
||||
|
||||
|
||||
<a id="custom_fields"></a>
|
||||
## CustomFields
|
||||
|
||||
`custom_fields` is a block within the configuration that may be
|
||||
repeated to bind custom fields for a server. CustomFields need be set
|
||||
up in advance. Each `custom_fields` block supports the following:
|
||||
|
||||
* `id` - (Required, string) The ID of the custom field to set.
|
||||
* `value` - (Required, string) The value for the specified field.
|
||||
|
|
@ -0,0 +1,45 @@
|
|||
---
|
||||
layout: "clc"
|
||||
page_title: "clc: clc_load_balancer"
|
||||
sidebar_current: "docs-clc-resource-load-balancer"
|
||||
description: |-
|
||||
Manages a CLC load balacner.
|
||||
---
|
||||
|
||||
# clc\_load\_balancer
|
||||
|
||||
Manages a CLC load balancer. Manage connected backends with [clc_load_balancer_pool](load_balancer_pool.html)
|
||||
|
||||
See also [Complete API documentation](https://www.ctl.io/api-docs/v2/#shared-load-balancer).
|
||||
|
||||
## Example Usage
|
||||
|
||||
|
||||
```
|
||||
# Provision a load balancer
|
||||
resource "clc_load_balancer" "api" {
|
||||
data_center = "${clc_group.frontends.location_id}"
|
||||
name = "api"
|
||||
description = "api load balancer"
|
||||
status = "enabled"
|
||||
}
|
||||
|
||||
output "api_ip" {
|
||||
value = "clc_load_balancer.api.ip_address"
|
||||
}
|
||||
```
|
||||
|
||||
|
||||
## Argument Reference
|
||||
|
||||
The following arguments are supported:
|
||||
|
||||
* `name` - (Required, string) The name of the load balancer.
|
||||
* `data_center` - (Required, string) The datacenter location of both parent group and this group.
|
||||
* `status` - (Required, string) Either "enabled" or "disabled"
|
||||
* `description` - (Optional, string) Description for server group (visible in control portal only)
|
||||
|
||||
|
||||
|
||||
|
||||
|
|
@ -0,0 +1,70 @@
|
|||
---
|
||||
layout: "clc"
|
||||
page_title: "clc: clc_load_balancer_pool"
|
||||
sidebar_current: "docs-clc-resource-load-balancer-pool"
|
||||
description: |-
|
||||
Manages a CLC load balancer pool.
|
||||
---
|
||||
|
||||
# clc\_load\_balancer\_pool
|
||||
|
||||
Manages a CLC load balancer pool. Manage related frontend with [clc_load_balancer](load_balancer.html)
|
||||
|
||||
See also [Complete API documentation](https://www.ctl.io/api-docs/v2/#shared-load-balancer).
|
||||
|
||||
## Example Usage
|
||||
|
||||
|
||||
```
|
||||
# Provision a load balancer pool
|
||||
resource "clc_load_balancer_pool" "pool" {
|
||||
data_center = "${clc_group.frontends.location_id}"
|
||||
load_balancer = "${clc_load_balancer.api.id}"
|
||||
method = "roundRobin"
|
||||
persistence = "standard"
|
||||
port = 80
|
||||
nodes
|
||||
{
|
||||
status = "enabled"
|
||||
ipAddress = "${clc_server.node.0.private_ip_address}"
|
||||
privatePort = 3000
|
||||
}
|
||||
}
|
||||
|
||||
output "pool" {
|
||||
value = "$join(" ", clc_load_balancer.pool.nodes)}"
|
||||
}
|
||||
```
|
||||
|
||||
|
||||
## Argument Reference
|
||||
|
||||
The following arguments are supported:
|
||||
|
||||
* `load_balancer` - (Required, string) The id of the load balancer.
|
||||
* `data_center` - (Required, string) The datacenter location for this pool.
|
||||
* `port` - (Required, int) Either 80 or 443
|
||||
* `method` - (Optional, string) The configured balancing method. Either
|
||||
"roundRobin" (default) or "leastConnection".
|
||||
* `persistence` - (Optional, string) The configured persistence
|
||||
method. Either "standard" (default) or "sticky".
|
||||
* nodes - (Optional) See [Nodes](#nodes) below for details.
|
||||
|
||||
|
||||
<a id="nodes"></a>
|
||||
## Nodes
|
||||
|
||||
|
||||
`nodes` is a block within the configuration that may be repeated to
|
||||
specify connected nodes on this pool. Each `nodes` block supports the
|
||||
following:
|
||||
|
||||
* `ipAddress` (Required, string) The destination internal ip of pool node.
|
||||
* `privatePort` (Required, int) The destination port on the pool node.
|
||||
* `status` (Optional, string) Either "enabled" or "disabled".
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
|
|
@ -0,0 +1,85 @@
|
|||
---
|
||||
layout: "clc"
|
||||
page_title: "clc: clc_public_ip"
|
||||
sidebar_current: "docs-clc-resource-public-ip"
|
||||
description: |-
|
||||
Manages a CLC public ip.
|
||||
---
|
||||
|
||||
# clc\_public\_ip
|
||||
|
||||
Manages a CLC public ip (for an existing server).
|
||||
|
||||
See also [Complete API documentation](https://www.ctl.io/api-docs/v2/#public-ip).
|
||||
|
||||
## Example Usage
|
||||
|
||||
```
|
||||
# Provision a public ip
|
||||
resource "clc_public_ip" "backdoor" {
|
||||
server_id = "${clc_server.node.0.id}"
|
||||
internal_ip_address = "${clc_server.node.0.private_ip_address}"
|
||||
ports
|
||||
{
|
||||
protocol = "ICMP"
|
||||
port = -1
|
||||
}
|
||||
ports
|
||||
{
|
||||
protocol = "TCP"
|
||||
port = 22
|
||||
}
|
||||
ports
|
||||
{
|
||||
protocol = "TCP"
|
||||
port = 2000
|
||||
port_to = 9000
|
||||
}
|
||||
source_restrictions
|
||||
{ cidr = "85.39.22.15/30" }
|
||||
}
|
||||
|
||||
|
||||
output "ip" {
|
||||
value = "clc_public_ip.backdoor.id"
|
||||
}
|
||||
|
||||
```
|
||||
|
||||
## Argument Reference
|
||||
|
||||
The following arguments are supported:
|
||||
|
||||
* `server_id` - (Required, string) The name or ID of the server to bind IP to.
|
||||
* `internal_ip_address` - (Required, string) The internal IP of the
|
||||
NIC to attach to. If not provided, a new internal NIC will be
|
||||
provisioned and used.
|
||||
* `ports` - (Optional) See [Ports](#ports) below for details.
|
||||
* `source_restrictions` - (Optional) See
|
||||
[SourceRestrictions](#source_restrictions) below for details.
|
||||
|
||||
|
||||
<a id="ports"></a>
|
||||
## Ports
|
||||
|
||||
`ports` is a block within the configuration that may be
|
||||
repeated to specify open ports on the target IP. Each
|
||||
`ports` block supports the following:
|
||||
|
||||
* `protocol` (Required, string) One of "tcp", "udp", "icmp".
|
||||
* `port` (Required, int) The port to open. If defining a range, demarks starting port
|
||||
* `portTo` (Optional, int) Given a port range, demarks the ending port.
|
||||
|
||||
|
||||
<a id="source_restrictions"></a>
|
||||
## SourceRestrictions
|
||||
|
||||
`source_restrictions` is a block within the configuration that may be
|
||||
repeated to restrict ingress traffic on specified CIDR blocks. Each
|
||||
`source_restrictions` block supports the following:
|
||||
|
||||
* `cidr` (Required, string) The IP or range of IPs in CIDR notation.
|
||||
|
||||
|
||||
|
||||
|
|
@ -0,0 +1,106 @@
|
|||
---
|
||||
layout: "clc"
|
||||
page_title: "clc: clc_server"
|
||||
sidebar_current: "docs-clc-resource-server"
|
||||
description: |-
|
||||
Manages the lifecycle of a CLC server.
|
||||
---
|
||||
|
||||
# clc\_server
|
||||
|
||||
Manages a CLC server.
|
||||
|
||||
See also [Complete API documentation](https://www.ctl.io/api-docs/v2/#servers-create-server).
|
||||
|
||||
## Example Usage
|
||||
|
||||
```
|
||||
# Provision a server
|
||||
resource "clc_server" "node" {
|
||||
name_template = "trusty"
|
||||
source_server_id = "UBUNTU-14-64-TEMPLATE"
|
||||
group_id = "${clc_group.frontends.id}"
|
||||
cpu = 2
|
||||
memory_mb = 2048
|
||||
password = "Green123$"
|
||||
additional_disks
|
||||
{
|
||||
path = "/var"
|
||||
size_gb = 100
|
||||
type = "partitioned"
|
||||
}
|
||||
additional_disks
|
||||
{
|
||||
size_gb = 10
|
||||
type = "raw"
|
||||
}
|
||||
}
|
||||
|
||||
output "server_id" {
|
||||
value = "clc_server.node.id"
|
||||
}
|
||||
|
||||
|
||||
```
|
||||
|
||||
## Argument Reference
|
||||
|
||||
|
||||
The following arguments are supported:
|
||||
|
||||
* `name_template` - (Required, string) The basename of the server. A unique name will be generated by the platform.
|
||||
* `source_server_id` - (Required, string) The name or ID of the base OS image.
|
||||
Examples: "ubuntu-14-64-template", "rhel-7-64-template", "win2012r2dtc-64"
|
||||
* `group_id` - (Required, string) The name or ID of the server group to spawn server into.
|
||||
* `cpu` - (Required, int) The number of virtual cores
|
||||
* `memory_mb` - (Required, int) Provisioned RAM
|
||||
* `type` - (Required, string) The virtualization type
|
||||
One of "standard", "hyperscale", "bareMetal"
|
||||
* `password` - (Optional, string) The root/adminstrator password. Will be generated by platform if not provided.
|
||||
* `description` - (Optional, string) Description for server (visible in control portal only)
|
||||
* `power_state` - (Optional, string) See [PowerStates](#power_states) below for details.
|
||||
If absent, defaults to `started`.
|
||||
* `private_ip_address` - (Optional, string) Set internal IP address. If absent, allocated and assigned from pool.
|
||||
* `network_id` - (Optional, string) GUID of network to use. (Must be set up in advance from control portal.)
|
||||
When absent, the default network will be used.
|
||||
* `storage_type` - (Optional, string) Backup and replication strategy for disks.
|
||||
One of "standard", "premium"
|
||||
* `additional_disks` - (Optional) See [Disks](#disks) below for details.
|
||||
* `custom_fields` - (Optional) See [CustomFields](#custom_fields) below for details.
|
||||
|
||||
|
||||
|
||||
<a id="power_states"></a>
|
||||
## PowerStates
|
||||
|
||||
`power_state` may be used to set initial power state or modify existing instances.
|
||||
|
||||
* `on` | `started` - machine powered on
|
||||
* `off` | `stopped` - machine powered off forcefully
|
||||
* `paused` - freeze machine: memory, processes, billing, monitoring.
|
||||
* `shutdown` - shutdown gracefully
|
||||
* `reboot` - restart gracefully
|
||||
* `reset` - restart forcefully
|
||||
|
||||
<a id="disks"></a>
|
||||
## Disks
|
||||
|
||||
`additional_disks` is a block within the configuration that may be
|
||||
repeated to specify the attached disks on a server. Each
|
||||
`additional_disks` block supports the following:
|
||||
|
||||
* `type` - (Required, string) Either "raw" or "partitioned".
|
||||
* `size_gb` - (Required, int) Size of allocated disk.
|
||||
* `path` - (Required, string, type:`partitioned`) The mountpoint for the disk.
|
||||
|
||||
|
||||
<a id="custom_fields"></a>
|
||||
## CustomFields
|
||||
|
||||
`custom_fields` is a block within the configuration that may be
|
||||
repeated to bind custom fields for a server. CustomFields need be set
|
||||
up in advance. Each `custom_fields` block supports the following:
|
||||
|
||||
* `id` - (Required, string) The ID of the custom field to set.
|
||||
* `value` - (Required, string) The value for the specified field.
|
||||
|
|
@ -0,0 +1,38 @@
|
|||
<% wrap_layout :inner do %>
|
||||
<% content_for :sidebar do %>
|
||||
<div class="docs-sidebar hidden-print affix-top" role="complementary">
|
||||
<ul class="nav docs-sidenav">
|
||||
<li<%= sidebar_current("docs-home") %>>
|
||||
<a href="/docs/providers/index.html">« Documentation Home</a>
|
||||
</li>
|
||||
|
||||
<li<%= sidebar_current("docs-clc-index") %>>
|
||||
<a href="/docs/providers/clc/index.html">CenturyLinkCloud Provider</a>
|
||||
</li>
|
||||
|
||||
<li<%= sidebar_current(/^docs-clc-resource/) %>>
|
||||
<a href="#">Resources</a>
|
||||
<ul class="nav nav-visible">
|
||||
<li<%= sidebar_current("docs-clc-resource-server") %>>
|
||||
<a href="/docs/providers/clc/r/server.html">clc_server</a>
|
||||
</li>
|
||||
<li<%= sidebar_current("docs-clc-resource-group") %>>
|
||||
<a href="/docs/providers/clc/r/group.html">clc_group</a>
|
||||
</li>
|
||||
<li<%= sidebar_current("docs-clc-resource-public-ip") %>>
|
||||
<a href="/docs/providers/clc/r/public_ip.html">clc_public_ip</a>
|
||||
</li>
|
||||
<li<%= sidebar_current("docs-clc-resource-load-balancer") %>>
|
||||
<a href="/docs/providers/clc/r/load_balancer.html">clc_load_balancer</a>
|
||||
</li>
|
||||
<li<%= sidebar_current("docs-clc-resource-load-balancer-pool") %>>
|
||||
<a href="/docs/providers/clc/r/load_balancer_pool.html">clc_load_balancer_pool</a>
|
||||
</li>
|
||||
</ul>
|
||||
</li>
|
||||
</ul>
|
||||
</div>
|
||||
<% end %>
|
||||
|
||||
<%= yield %>
|
||||
<% end %>
|
|
@ -149,6 +149,10 @@
|
|||
<a href="/docs/providers/chef/index.html">Chef</a>
|
||||
</li>
|
||||
|
||||
<li<%= sidebar_current("docs-providers-clc") %>>
|
||||
<a href="/docs/providers/clc/index.html">CenturyLinkCloud</a>
|
||||
</li>
|
||||
|
||||
<li<%= sidebar_current("docs-providers-cloudflare") %>>
|
||||
<a href="/docs/providers/cloudflare/index.html">CloudFlare</a>
|
||||
</li>
|
||||
|
|
Loading…
Reference in New Issue