provider/fastly: Add S3 Log Streaming to Fastly Service (#6378)

* provider/fastly: Add S3 Log Streaming to Fastly Service

Adds streaming logs to an S3 bucket to Fastly Service V1

* provider/fastly: Bump go-fastly version for domain support in S3 Logging
This commit is contained in:
Clint 2016-04-28 10:36:25 -05:00
parent 77c3ff9127
commit a4407d9af7
5 changed files with 511 additions and 5 deletions

2
Godeps/Godeps.json generated
View File

@ -1181,7 +1181,7 @@
},
{
"ImportPath": "github.com/sethvargo/go-fastly",
"Rev": "058f71c351c7fd4b7cf7e4604e827f9705a35ce0"
"Rev": "6566b161e807516f4a45bc3054eac291a120e217"
},
{
"ImportPath": "github.com/soniah/dnsmadeeasy",

View File

@ -296,6 +296,73 @@ func resourceServiceV1() *schema.Resource {
},
},
},
"s3logging": &schema.Schema{
Type: schema.TypeSet,
Optional: true,
Elem: &schema.Resource{
Schema: map[string]*schema.Schema{
// Required fields
"name": &schema.Schema{
Type: schema.TypeString,
Required: true,
Description: "Unique name to refer to this logging setup",
},
"bucket_name": &schema.Schema{
Type: schema.TypeString,
Required: true,
Description: "S3 Bucket name to store logs in",
},
"s3_access_key": &schema.Schema{
Type: schema.TypeString,
Optional: true,
DefaultFunc: schema.EnvDefaultFunc("FASTLY_S3_ACCESS_KEY", ""),
Description: "AWS Access Key",
},
"s3_secret_key": &schema.Schema{
Type: schema.TypeString,
Optional: true,
DefaultFunc: schema.EnvDefaultFunc("FASTLY_S3_SECRET_KEY", ""),
Description: "AWS Secret Key",
},
// Optional fields
"path": &schema.Schema{
Type: schema.TypeString,
Optional: true,
Description: "Path to store the files. Must end with a trailing slash",
},
"domain": &schema.Schema{
Type: schema.TypeString,
Optional: true,
Description: "Bucket endpoint",
},
"gzip_level": &schema.Schema{
Type: schema.TypeInt,
Optional: true,
Default: 0,
Description: "Gzip Compression level",
},
"period": &schema.Schema{
Type: schema.TypeInt,
Optional: true,
Default: 3600,
Description: "How frequently the logs should be transferred, in seconds (Default 3600)",
},
"format": &schema.Schema{
Type: schema.TypeString,
Optional: true,
Default: "%h %l %u %t %r %>s",
Description: "Apache-style string or VCL variables to use for log formatting",
},
"timestamp_format": &schema.Schema{
Type: schema.TypeString,
Optional: true,
Default: "%Y-%m-%dT%H:%M:%S.000",
Description: "specified timestamp formatting (default `%Y-%m-%dT%H:%M:%S.000`)",
},
},
},
},
},
}
}
@ -341,6 +408,7 @@ func resourceServiceV1Update(d *schema.ResourceData, meta interface{}) error {
"default_ttl",
"header",
"gzip",
"s3logging",
} {
if d.HasChange(v) {
needsChange = true
@ -644,6 +712,78 @@ func resourceServiceV1Update(d *schema.ResourceData, meta interface{}) error {
}
}
// find difference in s3logging
if d.HasChange("s3logging") {
// POST new Logging
// Note: we don't utilize the PUT endpoint to update a S3 Logs, we simply
// destroy it and create a new one. This is how Terraform works with nested
// sub resources, we only get the full diff not a partial set item diff.
// Because this is done on a new version of the configuration, this is
// considered safe
os, ns := d.GetChange("s3logging")
if os == nil {
os = new(schema.Set)
}
if ns == nil {
ns = new(schema.Set)
}
oss := os.(*schema.Set)
nss := ns.(*schema.Set)
removeS3Logging := oss.Difference(nss).List()
addS3Logging := nss.Difference(oss).List()
// DELETE old S3 Log configurations
for _, sRaw := range removeS3Logging {
sf := sRaw.(map[string]interface{})
opts := gofastly.DeleteS3Input{
Service: d.Id(),
Version: latestVersion,
Name: sf["name"].(string),
}
log.Printf("[DEBUG] Fastly S3 Logging Removal opts: %#v", opts)
err := conn.DeleteS3(&opts)
if err != nil {
return err
}
}
// POST new/updated S3 Logging
for _, sRaw := range addS3Logging {
sf := sRaw.(map[string]interface{})
// Fastly API will not error if these are omitted, so we throw an error
// if any of these are empty
for _, sk := range []string{"s3_access_key", "s3_secret_key"} {
if sf[sk].(string) == "" {
return fmt.Errorf("[ERR] No %s found for S3 Log stream setup for Service (%s)", sk, d.Id())
}
}
opts := gofastly.CreateS3Input{
Service: d.Id(),
Version: latestVersion,
Name: sf["name"].(string),
BucketName: sf["bucket_name"].(string),
AccessKey: sf["s3_access_key"].(string),
SecretKey: sf["s3_secret_key"].(string),
Period: uint(sf["period"].(int)),
GzipLevel: uint(sf["gzip_level"].(int)),
Domain: sf["domain"].(string),
Path: sf["path"].(string),
Format: sf["format"].(string),
TimestampFormat: sf["timestamp_format"].(string),
}
log.Printf("[DEBUG] Create S3 Logging Opts: %#v", opts)
_, err := conn.CreateS3(&opts)
if err != nil {
return err
}
}
}
// validate version
log.Printf("[DEBUG] Validating Fastly Service (%s), Version (%s)", d.Id(), latestVersion)
valid, msg, err := conn.ValidateVersion(&gofastly.ValidateVersionInput{
@ -790,6 +930,23 @@ func resourceServiceV1Read(d *schema.ResourceData, meta interface{}) error {
log.Printf("[WARN] Error setting Gzips for (%s): %s", d.Id(), err)
}
// refresh S3 Logging
log.Printf("[DEBUG] Refreshing S3 Logging for (%s)", d.Id())
s3List, err := conn.ListS3s(&gofastly.ListS3sInput{
Service: d.Id(),
Version: s.ActiveVersion.Number,
})
if err != nil {
return fmt.Errorf("[ERR] Error looking up S3 Logging for (%s), version (%s): %s", d.Id(), s.ActiveVersion.Number, err)
}
sl := flattenS3s(s3List)
if err := d.Set("s3logging", sl); err != nil {
log.Printf("[WARN] Error setting S3 Logging for (%s): %s", d.Id(), err)
}
} else {
log.Printf("[DEBUG] Active Version for Service (%s) is empty, no state to refresh", d.Id())
}
@ -1028,3 +1185,33 @@ func flattenGzips(gzipsList []*gofastly.Gzip) []map[string]interface{} {
return gl
}
func flattenS3s(s3List []*gofastly.S3) []map[string]interface{} {
var sl []map[string]interface{}
for _, s := range s3List {
// Convert S3s to a map for saving to state.
ns := map[string]interface{}{
"name": s.Name,
"bucket_name": s.BucketName,
"s3_access_key": s.AccessKey,
"s3_secret_key": s.SecretKey,
"path": s.Path,
"period": s.Period,
"domain": s.Domain,
"gzip_level": s.GzipLevel,
"format": s.Format,
"timestamp_format": s.TimestampFormat,
}
// prune any empty values that come from the default string value in structs
for k, v := range ns {
if v == "" {
delete(ns, k)
}
}
sl = append(sl, ns)
}
return sl
}

View File

@ -0,0 +1,287 @@
package fastly
import (
"fmt"
"os"
"reflect"
"testing"
"github.com/hashicorp/terraform/helper/acctest"
"github.com/hashicorp/terraform/helper/resource"
"github.com/hashicorp/terraform/terraform"
gofastly "github.com/sethvargo/go-fastly"
)
func TestAccFastlyServiceV1_s3logging_basic(t *testing.T) {
var service gofastly.ServiceDetail
name := fmt.Sprintf("tf-test-%s", acctest.RandString(10))
domainName1 := fmt.Sprintf("%s.notadomain.com", acctest.RandString(10))
log1 := gofastly.S3{
Version: "1",
Name: "somebucketlog",
BucketName: "fastlytestlogging",
Domain: "s3-us-west-2.amazonaws.com",
AccessKey: "somekey",
SecretKey: "somesecret",
Period: uint(3600),
GzipLevel: uint(0),
Format: "%h %l %u %t %r %>s",
TimestampFormat: "%Y-%m-%dT%H:%M:%S.000",
}
log2 := gofastly.S3{
Version: "1",
Name: "someotherbucketlog",
BucketName: "fastlytestlogging2",
Domain: "s3-us-west-2.amazonaws.com",
AccessKey: "someotherkey",
SecretKey: "someothersecret",
GzipLevel: uint(3),
Period: uint(60),
Format: "%h %l %u %t %r %>s",
TimestampFormat: "%Y-%m-%dT%H:%M:%S.000",
}
resource.Test(t, resource.TestCase{
PreCheck: func() { testAccPreCheck(t) },
Providers: testAccProviders,
CheckDestroy: testAccCheckServiceV1Destroy,
Steps: []resource.TestStep{
resource.TestStep{
Config: testAccServiceV1S3LoggingConfig(name, domainName1),
Check: resource.ComposeTestCheckFunc(
testAccCheckServiceV1Exists("fastly_service_v1.foo", &service),
testAccCheckFastlyServiceV1S3LoggingAttributes(&service, []*gofastly.S3{&log1}),
resource.TestCheckResourceAttr(
"fastly_service_v1.foo", "name", name),
resource.TestCheckResourceAttr(
"fastly_service_v1.foo", "s3logging.#", "1"),
),
},
resource.TestStep{
Config: testAccServiceV1S3LoggingConfig_update(name, domainName1),
Check: resource.ComposeTestCheckFunc(
testAccCheckServiceV1Exists("fastly_service_v1.foo", &service),
testAccCheckFastlyServiceV1S3LoggingAttributes(&service, []*gofastly.S3{&log1, &log2}),
resource.TestCheckResourceAttr(
"fastly_service_v1.foo", "name", name),
resource.TestCheckResourceAttr(
"fastly_service_v1.foo", "s3logging.#", "2"),
),
},
},
})
}
// Tests that s3_access_key and s3_secret_key are read from the env
func TestAccFastlyServiceV1_s3logging_s3_env(t *testing.T) {
var service gofastly.ServiceDetail
name := fmt.Sprintf("tf-test-%s", acctest.RandString(10))
domainName1 := fmt.Sprintf("%s.notadomain.com", acctest.RandString(10))
// set env Vars to something we expect
resetEnv := setEnv("someEnv", t)
defer resetEnv()
log3 := gofastly.S3{
Version: "1",
Name: "somebucketlog",
BucketName: "fastlytestlogging",
Domain: "s3-us-west-2.amazonaws.com",
AccessKey: "someEnv",
SecretKey: "someEnv",
Period: uint(3600),
GzipLevel: uint(0),
Format: "%h %l %u %t %r %>s",
TimestampFormat: "%Y-%m-%dT%H:%M:%S.000",
}
resource.Test(t, resource.TestCase{
PreCheck: func() { testAccPreCheck(t) },
Providers: testAccProviders,
CheckDestroy: testAccCheckServiceV1Destroy,
Steps: []resource.TestStep{
resource.TestStep{
Config: testAccServiceV1S3LoggingConfig_env(name, domainName1),
Check: resource.ComposeTestCheckFunc(
testAccCheckServiceV1Exists("fastly_service_v1.foo", &service),
testAccCheckFastlyServiceV1S3LoggingAttributes(&service, []*gofastly.S3{&log3}),
resource.TestCheckResourceAttr(
"fastly_service_v1.foo", "name", name),
resource.TestCheckResourceAttr(
"fastly_service_v1.foo", "s3logging.#", "1"),
),
},
},
})
}
func testAccCheckFastlyServiceV1S3LoggingAttributes(service *gofastly.ServiceDetail, s3s []*gofastly.S3) resource.TestCheckFunc {
return func(s *terraform.State) error {
conn := testAccProvider.Meta().(*FastlyClient).conn
s3List, err := conn.ListS3s(&gofastly.ListS3sInput{
Service: service.ID,
Version: service.ActiveVersion.Number,
})
if err != nil {
return fmt.Errorf("[ERR] Error looking up S3 Logging for (%s), version (%s): %s", service.Name, service.ActiveVersion.Number, err)
}
if len(s3List) != len(s3s) {
return fmt.Errorf("S3 List count mismatch, expected (%d), got (%d)", len(s3s), len(s3List))
}
var found int
for _, s := range s3s {
for _, ls := range s3List {
if s.Name == ls.Name {
// we don't know these things ahead of time, so populate them now
s.ServiceID = service.ID
s.Version = service.ActiveVersion.Number
// We don't track these, so clear them out because we also wont know
// these ahead of time
ls.CreatedAt = nil
ls.UpdatedAt = nil
if !reflect.DeepEqual(s, ls) {
return fmt.Errorf("Bad match S3 logging match, expected (%#v), got (%#v)", s, ls)
}
found++
}
}
}
if found != len(s3s) {
return fmt.Errorf("Error matching S3 Logging rules")
}
return nil
}
}
func testAccServiceV1S3LoggingConfig(name, domain string) string {
return fmt.Sprintf(`
resource "fastly_service_v1" "foo" {
name = "%s"
domain {
name = "%s"
comment = "tf-testing-domain"
}
backend {
address = "aws.amazon.com"
name = "amazon docs"
}
s3logging {
name = "somebucketlog"
bucket_name = "fastlytestlogging"
domain = "s3-us-west-2.amazonaws.com"
s3_access_key = "somekey"
s3_secret_key = "somesecret"
}
force_destroy = true
}`, name, domain)
}
func testAccServiceV1S3LoggingConfig_update(name, domain string) string {
return fmt.Sprintf(`
resource "fastly_service_v1" "foo" {
name = "%s"
domain {
name = "%s"
comment = "tf-testing-domain"
}
backend {
address = "aws.amazon.com"
name = "amazon docs"
}
s3logging {
name = "somebucketlog"
bucket_name = "fastlytestlogging"
domain = "s3-us-west-2.amazonaws.com"
s3_access_key = "somekey"
s3_secret_key = "somesecret"
}
s3logging {
name = "someotherbucketlog"
bucket_name = "fastlytestlogging2"
domain = "s3-us-west-2.amazonaws.com"
s3_access_key = "someotherkey"
s3_secret_key = "someothersecret"
period = 60
gzip_level = 3
}
force_destroy = true
}`, name, domain)
}
func testAccServiceV1S3LoggingConfig_env(name, domain string) string {
return fmt.Sprintf(`
resource "fastly_service_v1" "foo" {
name = "%s"
domain {
name = "%s"
comment = "tf-testing-domain"
}
backend {
address = "aws.amazon.com"
name = "amazon docs"
}
s3logging {
name = "somebucketlog"
bucket_name = "fastlytestlogging"
domain = "s3-us-west-2.amazonaws.com"
}
force_destroy = true
}`, name, domain)
}
func setEnv(s string, t *testing.T) func() {
e := getEnv()
// Set all the envs to a dummy value
if err := os.Setenv("FASTLY_S3_ACCESS_KEY", s); err != nil {
t.Fatalf("Error setting env var AWS_ACCESS_KEY_ID: %s", err)
}
if err := os.Setenv("FASTLY_S3_SECRET_KEY", s); err != nil {
t.Fatalf("Error setting env var FASTLY_S3_SECRET_KEY: %s", err)
}
return func() {
// re-set all the envs we unset above
if err := os.Setenv("FASTLY_S3_ACCESS_KEY", e.Key); err != nil {
t.Fatalf("Error resetting env var AWS_ACCESS_KEY_ID: %s", err)
}
if err := os.Setenv("FASTLY_S3_SECRET_KEY", e.Secret); err != nil {
t.Fatalf("Error resetting env var FASTLY_S3_SECRET_KEY: %s", err)
}
}
}
// struct to preserve the current environment
type currentEnv struct {
Key, Secret string
}
func getEnv() *currentEnv {
// Grab any existing Fastly AWS S3 keys and preserve, in the off chance
// they're actually set in the enviornment
return &currentEnv{
Key: os.Getenv("FASTLY_S3_ACCESS_KEY"),
Secret: os.Getenv("FASTLY_S3_SECRET_KEY"),
}
}

View File

@ -13,6 +13,7 @@ type S3 struct {
Name string `mapstructure:"name"`
BucketName string `mapstructure:"bucket_name"`
Domain string `mapstructure:"domain"`
AccessKey string `mapstructure:"access_key"`
SecretKey string `mapstructure:"secret_key"`
Path string `mapstructure:"path"`
@ -78,6 +79,7 @@ type CreateS3Input struct {
Name string `form:"name,omitempty"`
BucketName string `form:"bucket_name,omitempty"`
Domain string `form:"domain,omitempty"`
AccessKey string `form:"access_key,omitempty"`
SecretKey string `form:"secret_key,omitempty"`
Path string `form:"path,omitempty"`
@ -161,6 +163,7 @@ type UpdateS3Input struct {
NewName string `form:"name,omitempty"`
BucketName string `form:"bucket_name,omitempty"`
Domain string `form:"domain,omitempty"`
AccessKey string `form:"access_key,omitempty"`
SecretKey string `form:"secret_key,omitempty"`
Path string `form:"path,omitempty"`

View File

@ -97,17 +97,19 @@ The following arguments are supported:
* `name` - (Required) The unique name for the Service to create
* `domain` - (Required) A set of Domain names to serve as entry points for your
Service. Defined below.
Service. Defined below
* `backend` - (Required) A set of Backends to service requests from your Domains.
Defined below.
Defined below
* `gzip` - (Required) A set of gzip rules to control automatic gzipping of
content. Defined below.
content. Defined below
* `header` - (Optional) A set of Headers to manipulate for each request. Defined
below.
below
* `default_host` - (Optional) The default hostname
* `default_ttl` - (Optional) The default Time-to-live (TTL) for requests
* `force_destroy` - (Optional) Services that are active cannot be destroyed. In
order to destroy the Service, set `force_destroy` to `true`. Default `false`.
* `s3logging` - (Optional) A set of S3 Buckets to send streaming logs too.
Defined below
The `domain` block supports:
@ -161,6 +163,32 @@ by the Action
* `substitution` - (Optional) Value to substitute in place of regular expression. (Only applies to `regex` and `regex_repeat`.)
* `priority` - (Optional) Lower priorities execute first. (Default: `100`.)
The `s3logging` block supports:
* `name` - (Required) A unique name to identify this S3 Logging Bucket
* `bucket_name` - (Optional) An optional comment about the Domain
* `s3_access_key` - (Required) AWS Access Key of an account with the required
permissions to post logs. It is **strongly** recommended you create a separate
IAM user with permissions to only operate on this Bucket. This key will be
not be encrypted. You can provide this key via an environment variable, `FASTLY_S3_ACCESS_KEY`
* `s3_secret_key` - (Required) AWS Secret Key of an account with the required
permissions to post logs. It is **strongly** recommended you create a separate
IAM user with permissions to only operate on this Bucket. This secret will be
not be encrypted. You can provide this secret via an environment variable, `FASTLY_S3_SECRET_KEY`
* `path` - (Optional) Path to store the files. Must end with a trailing slash.
If this field is left empty, the files will be saved in the bucket's root path.
* `domain` - (Optional) If you created the S3 bucket outside of `us-east-1`,
then specify the corresponding bucket endpoint. Ex: `s3-us-west-2.amazonaws.com`
* `period` - (Optional) How frequently the logs should be transferred, in
seconds. Default `3600`
* `gzip_level` - (Optional) Level of GZIP compression, from `0-9`. `0` is no
compression. `1` is fastest and least compressed, `9` is slowest and most
compressed. Default `0`
* `format` - (Optional) Apache-style string or VCL variables to use for log formatting. Default
Apache Common Log format (`%h %l %u %t %r %>s`)
* `timestamp_format` - (Optional) `strftime` specified timestamp formatting (default `%Y-%m-%dT%H:%M:%S.000`).
## Attributes Reference
The following attributes are exported:
@ -171,6 +199,7 @@ The following attributes are exported:
* `domain`  Set of Domains. See above for details
* `backend`  Set of Backends. See above for details
* `header`  Set of Headers. See above for details
* `s3logging`  Set of S3 Logging configurations. See above for details
* `default_host`  Default host specified
* `default_ttl` - Default TTL
* `force_destroy` - Force the destruction of the Service on delete