Merge branch 'paultyng-pt/newrelic'

This commit is contained in:
stack72 2016-12-15 19:17:52 +00:00
commit 950e24bc04
No known key found for this signature in database
GPG Key ID: 8619A619B085CB16
122 changed files with 24786 additions and 0 deletions

View File

@ -0,0 +1,29 @@
package newrelic
import (
"log"
"github.com/hashicorp/terraform/helper/logging"
newrelic "github.com/paultyng/go-newrelic/api"
)
// Config contains New Relic provider settings
type Config struct {
APIKey string
APIURL string
}
// Client returns a new client for accessing New Relic
func (c *Config) Client() (*newrelic.Client, error) {
nrConfig := newrelic.Config{
APIKey: c.APIKey,
Debug: logging.IsDebugOrHigher(),
BaseURL: c.APIURL,
}
client := newrelic.New(nrConfig)
log.Printf("[INFO] New Relic client configured")
return &client, nil
}

View File

@ -0,0 +1,65 @@
package newrelic
import (
"fmt"
"log"
"strconv"
"github.com/hashicorp/terraform/helper/schema"
newrelic "github.com/paultyng/go-newrelic/api"
)
func dataSourceNewRelicApplication() *schema.Resource {
return &schema.Resource{
Read: dataSourceNewRelicApplicationRead,
Schema: map[string]*schema.Schema{
"name": {
Type: schema.TypeString,
Required: true,
},
"instance_ids": {
Type: schema.TypeList,
Elem: &schema.Schema{Type: schema.TypeInt},
Computed: true,
},
"host_ids": {
Type: schema.TypeList,
Elem: &schema.Schema{Type: schema.TypeInt},
Computed: true,
},
},
}
}
func dataSourceNewRelicApplicationRead(d *schema.ResourceData, meta interface{}) error {
client := meta.(*newrelic.Client)
log.Printf("[INFO] Reading New Relic applications")
applications, err := client.ListApplications()
if err != nil {
return err
}
var application *newrelic.Application
name := d.Get("name").(string)
for _, a := range applications {
if a.Name == name {
application = &a
break
}
}
if application == nil {
return fmt.Errorf("The name '%s' does not match any New Relic applications.", name)
}
d.SetId(strconv.Itoa(application.ID))
d.Set("name", application.Name)
d.Set("instance_ids", application.Links.InstanceIDs)
d.Set("host_ids", application.Links.HostIDs)
return nil
}

View File

@ -0,0 +1,50 @@
package newrelic
import (
"fmt"
"testing"
"github.com/hashicorp/terraform/helper/resource"
"github.com/hashicorp/terraform/terraform"
)
func TestAccNewRelicApplication_Basic(t *testing.T) {
resource.Test(t, resource.TestCase{
PreCheck: func() { testAccPreCheck(t) },
Providers: testAccProviders,
Steps: []resource.TestStep{
resource.TestStep{
Config: testAccNewRelicApplicationConfig(),
Check: resource.ComposeTestCheckFunc(
testAccNewRelicApplication("data.newrelic_application.app"),
),
},
},
})
}
func testAccNewRelicApplication(n string) resource.TestCheckFunc {
return func(s *terraform.State) error {
r := s.RootModule().Resources[n]
a := r.Primary.Attributes
if a["id"] == "" {
return fmt.Errorf("Expected to get an application from New Relic")
}
if a["name"] != testAccExpectedApplicationName {
return fmt.Errorf("Expected the application name to be: %s, but got: %s", testAccExpectedApplicationName, a["name"])
}
return nil
}
}
// The test application for this data source is created in provider_test.go
func testAccNewRelicApplicationConfig() string {
return fmt.Sprintf(`
data "newrelic_application" "app" {
name = "%s"
}
`, testAccExpectedApplicationName)
}

View File

@ -0,0 +1,37 @@
package newrelic
import (
"fmt"
"strconv"
"strings"
)
func parseIDs(serializedID string, count int) ([]int, error) {
rawIDs := strings.SplitN(serializedID, ":", count)
if len(rawIDs) != count {
return []int{}, fmt.Errorf("Unable to parse ID %v", serializedID)
}
ids := make([]int, count)
for i, rawID := range rawIDs {
id, err := strconv.ParseInt(rawID, 10, 32)
if err != nil {
return ids, err
}
ids[i] = int(id)
}
return ids, nil
}
func serializeIDs(ids []int) string {
idStrings := make([]string, len(ids))
for i, id := range ids {
idStrings[i] = strconv.Itoa(id)
}
return strings.Join(idStrings, ":")
}

View File

@ -0,0 +1,26 @@
package newrelic
import "testing"
func TestParseIDs_Basic(t *testing.T) {
ids, err := parseIDs("1:2", 2)
if err != nil {
t.Fatal(err)
}
if len(ids) != 2 {
t.Fatal(len(ids))
}
if ids[0] != 1 || ids[1] != 2 {
t.Fatal(ids)
}
}
func TestSerializeIDs_Basic(t *testing.T) {
id := serializeIDs([]int{1, 2})
if id != "1:2" {
t.Fatal(id)
}
}

View File

@ -0,0 +1,29 @@
package newrelic
import (
"testing"
"github.com/hashicorp/terraform/helper/acctest"
"github.com/hashicorp/terraform/helper/resource"
)
func TestAccNewRelicAlertChannel_import(t *testing.T) {
resourceName := "newrelic_alert_channel.foo"
rName := acctest.RandString(5)
resource.Test(t, resource.TestCase{
PreCheck: func() { testAccPreCheck(t) },
Providers: testAccProviders,
CheckDestroy: testAccCheckNewRelicAlertChannelDestroy,
Steps: []resource.TestStep{
resource.TestStep{
Config: testAccCheckNewRelicAlertChannelConfig(rName),
},
resource.TestStep{
ResourceName: resourceName,
ImportState: true,
ImportStateVerify: true,
},
},
})
}

View File

@ -0,0 +1,30 @@
package newrelic
import (
"testing"
"github.com/hashicorp/terraform/helper/acctest"
"github.com/hashicorp/terraform/helper/resource"
)
func TestAccNewRelicAlertCondition_import(t *testing.T) {
resourceName := "newrelic_alert_condition.foo"
rName := acctest.RandString(5)
resource.Test(t, resource.TestCase{
PreCheck: func() { testAccPreCheck(t) },
Providers: testAccProviders,
CheckDestroy: testAccCheckNewRelicAlertConditionDestroy,
Steps: []resource.TestStep{
resource.TestStep{
Config: testAccCheckNewRelicAlertConditionConfig(rName),
},
resource.TestStep{
ResourceName: resourceName,
ImportState: true,
ImportStateVerify: true,
},
},
})
}

View File

@ -0,0 +1,30 @@
package newrelic
import (
"testing"
"github.com/hashicorp/terraform/helper/acctest"
"github.com/hashicorp/terraform/helper/resource"
)
func TestAccNewRelicAlertPolicy_import(t *testing.T) {
resourceName := "newrelic_alert_policy.foo"
rName := acctest.RandString(5)
resource.Test(t, resource.TestCase{
PreCheck: func() { testAccPreCheck(t) },
Providers: testAccProviders,
CheckDestroy: testAccCheckNewRelicAlertPolicyDestroy,
Steps: []resource.TestStep{
resource.TestStep{
Config: testAccCheckNewRelicAlertPolicyConfig(rName),
},
resource.TestStep{
ResourceName: resourceName,
ImportState: true,
ImportStateVerify: true,
},
},
})
}

View File

@ -0,0 +1,49 @@
package newrelic
import (
"log"
"github.com/hashicorp/terraform/helper/schema"
"github.com/hashicorp/terraform/terraform"
)
// Provider represents a resource provider in Terraform
func Provider() terraform.ResourceProvider {
return &schema.Provider{
Schema: map[string]*schema.Schema{
"api_key": {
Type: schema.TypeString,
Required: true,
DefaultFunc: schema.EnvDefaultFunc("NEWRELIC_API_KEY", nil),
Sensitive: true,
},
"api_url": {
Type: schema.TypeString,
Optional: true,
DefaultFunc: schema.EnvDefaultFunc("NEWRELIC_API_URL", "https://api.newrelic.com/v2"),
},
},
DataSourcesMap: map[string]*schema.Resource{
"newrelic_application": dataSourceNewRelicApplication(),
},
ResourcesMap: map[string]*schema.Resource{
"newrelic_alert_channel": resourceNewRelicAlertChannel(),
"newrelic_alert_condition": resourceNewRelicAlertCondition(),
"newrelic_alert_policy": resourceNewRelicAlertPolicy(),
"newrelic_alert_policy_channel": resourceNewRelicAlertPolicyChannel(),
},
ConfigureFunc: providerConfigure,
}
}
func providerConfigure(data *schema.ResourceData) (interface{}, error) {
config := Config{
APIKey: data.Get("api_key").(string),
APIURL: data.Get("api_url").(string),
}
log.Println("[INFO] Initializing New Relic client")
return config.Client()
}

View File

@ -0,0 +1,69 @@
package newrelic
import (
"fmt"
"os"
"testing"
"time"
"github.com/hashicorp/terraform/helper/acctest"
"github.com/hashicorp/terraform/helper/schema"
"github.com/hashicorp/terraform/terraform"
newrelic "github.com/newrelic/go-agent"
)
var (
testAccExpectedApplicationName string
testAccProviders map[string]terraform.ResourceProvider
testAccProvider *schema.Provider
)
func init() {
testAccExpectedApplicationName = fmt.Sprintf("tf_test_%s", acctest.RandString(10))
testAccProvider = Provider().(*schema.Provider)
testAccProviders = map[string]terraform.ResourceProvider{
"newrelic": testAccProvider,
}
}
func TestProvider(t *testing.T) {
if err := Provider().(*schema.Provider).InternalValidate(); err != nil {
t.Fatalf("err: %s", err)
}
}
func TestProviderImpl(t *testing.T) {
var _ terraform.ResourceProvider = Provider()
}
func testAccPreCheck(t *testing.T) {
if v := os.Getenv("NEWRELIC_API_KEY"); v == "" {
t.Log(v)
t.Fatal("NEWRELIC_API_KEY must be set for acceptance tests")
}
// setup fake application by logging some metrics
if v := os.Getenv("NEWRELIC_LICENSE_KEY"); len(v) > 0 {
config := newrelic.NewConfig(testAccExpectedApplicationName, v)
app, err := newrelic.NewApplication(config)
if err != nil {
t.Log(err)
t.Fatal("Error setting up New Relic application")
}
if err := app.WaitForConnection(30 * time.Second); err != nil {
t.Log(err)
t.Fatal("Unable to setup New Relic application connection")
}
if err := app.RecordCustomEvent("terraform test", nil); err != nil {
t.Log(err)
t.Fatal("Unable to record custom event in New Relic")
}
app.Shutdown(30 * time.Second)
} else {
t.Log(v)
t.Fatal("NEWRELIC_LICENSE_KEY must be set for acceptance tests")
}
}

View File

@ -0,0 +1,168 @@
package newrelic
import (
"fmt"
"log"
"strconv"
"github.com/hashicorp/terraform/helper/schema"
"github.com/hashicorp/terraform/helper/validation"
newrelic "github.com/paultyng/go-newrelic/api"
)
var alertChannelTypes = map[string][]string{
"campfire": []string{
"room",
"subdomain",
"token",
},
"email": []string{
"include_json_attachment",
"recipients",
},
"hipchat": []string{
"auth_token",
"base_url",
"room_id",
},
"opsgenie": []string{
"api_key",
"recipients",
"tags",
"teams",
},
"pagerduty": []string{
"service_key",
},
"slack": []string{
"channel",
"url",
},
"user": []string{
"user_id",
},
"victorops": []string{
"key",
"route_key",
},
"webhook": []string{
"auth_password",
"auth_type",
"auth_username",
"base_url",
"headers",
"payload_type",
"payload",
},
}
func resourceNewRelicAlertChannel() *schema.Resource {
validAlertChannelTypes := make([]string, 0, len(alertChannelTypes))
for k := range alertChannelTypes {
validAlertChannelTypes = append(validAlertChannelTypes, k)
}
return &schema.Resource{
Create: resourceNewRelicAlertChannelCreate,
Read: resourceNewRelicAlertChannelRead,
// Update: Not currently supported in API
Delete: resourceNewRelicAlertChannelDelete,
Importer: &schema.ResourceImporter{
State: schema.ImportStatePassthrough,
},
Schema: map[string]*schema.Schema{
"name": {
Type: schema.TypeString,
Required: true,
ForceNew: true,
},
"type": {
Type: schema.TypeString,
Required: true,
ForceNew: true,
ValidateFunc: validation.StringInSlice(validAlertChannelTypes, false),
},
"configuration": {
Type: schema.TypeMap,
Required: true,
ForceNew: true,
//TODO: ValidateFunc: (use list of keys from map above)
Sensitive: true,
},
},
}
}
func buildAlertChannelStruct(d *schema.ResourceData) *newrelic.AlertChannel {
channel := newrelic.AlertChannel{
Name: d.Get("name").(string),
Type: d.Get("type").(string),
Configuration: d.Get("configuration").(map[string]interface{}),
}
return &channel
}
func resourceNewRelicAlertChannelCreate(d *schema.ResourceData, meta interface{}) error {
client := meta.(*newrelic.Client)
channel := buildAlertChannelStruct(d)
log.Printf("[INFO] Creating New Relic alert channel %s", channel.Name)
channel, err := client.CreateAlertChannel(*channel)
if err != nil {
return err
}
d.SetId(strconv.Itoa(channel.ID))
return nil
}
func resourceNewRelicAlertChannelRead(d *schema.ResourceData, meta interface{}) error {
client := meta.(*newrelic.Client)
id, err := strconv.ParseInt(d.Id(), 10, 32)
if err != nil {
return err
}
log.Printf("[INFO] Reading New Relic alert channel %v", id)
channel, err := client.GetAlertChannel(int(id))
if err != nil {
if err == newrelic.ErrNotFound {
d.SetId("")
return nil
}
return err
}
d.Set("name", channel.Name)
d.Set("type", channel.Type)
if err := d.Set("configuration", channel.Configuration); err != nil {
return fmt.Errorf("[DEBUG] Error setting Alert Channel Configuration: %#v", err)
}
return nil
}
func resourceNewRelicAlertChannelDelete(d *schema.ResourceData, meta interface{}) error {
client := meta.(*newrelic.Client)
id, err := strconv.ParseInt(d.Id(), 10, 32)
if err != nil {
return err
}
log.Printf("[INFO] Deleting New Relic alert channel %v", id)
if err := client.DeleteAlertChannel(int(id)); err != nil {
return err
}
d.SetId("")
return nil
}

View File

@ -0,0 +1,131 @@
package newrelic
import (
"fmt"
"strconv"
"testing"
"github.com/hashicorp/terraform/helper/acctest"
"github.com/hashicorp/terraform/helper/resource"
"github.com/hashicorp/terraform/terraform"
newrelic "github.com/paultyng/go-newrelic/api"
)
func TestAccNewRelicAlertChannel_Basic(t *testing.T) {
rName := acctest.RandString(5)
resource.Test(t, resource.TestCase{
PreCheck: func() { testAccPreCheck(t) },
Providers: testAccProviders,
CheckDestroy: testAccCheckNewRelicAlertChannelDestroy,
Steps: []resource.TestStep{
resource.TestStep{
Config: testAccCheckNewRelicAlertChannelConfig(rName),
Check: resource.ComposeTestCheckFunc(
testAccCheckNewRelicAlertChannelExists("newrelic_alert_channel.foo"),
resource.TestCheckResourceAttr(
"newrelic_alert_channel.foo", "name", fmt.Sprintf("tf-test-%s", rName)),
resource.TestCheckResourceAttr(
"newrelic_alert_channel.foo", "type", "email"),
resource.TestCheckResourceAttr(
"newrelic_alert_channel.foo", "configuration.recipients", "foo@example.com"),
resource.TestCheckResourceAttr(
"newrelic_alert_channel.foo", "configuration.include_json_attachment", "1"),
),
},
resource.TestStep{
Config: testAccCheckNewRelicAlertChannelConfigUpdated(rName),
Check: resource.ComposeTestCheckFunc(
testAccCheckNewRelicAlertChannelExists("newrelic_alert_channel.foo"),
resource.TestCheckResourceAttr(
"newrelic_alert_channel.foo", "name", fmt.Sprintf("tf-test-updated-%s", rName)),
resource.TestCheckResourceAttr(
"newrelic_alert_channel.foo", "type", "email"),
resource.TestCheckResourceAttr(
"newrelic_alert_channel.foo", "configuration.recipients", "bar@example.com"),
resource.TestCheckResourceAttr(
"newrelic_alert_channel.foo", "configuration.include_json_attachment", "0"),
),
},
},
})
}
func testAccCheckNewRelicAlertChannelDestroy(s *terraform.State) error {
client := testAccProvider.Meta().(*newrelic.Client)
for _, r := range s.RootModule().Resources {
if r.Type != "newrelic_alert_channel" {
continue
}
id, err := strconv.ParseInt(r.Primary.ID, 10, 32)
if err != nil {
return err
}
_, err = client.GetAlertChannel(int(id))
if err == nil {
return fmt.Errorf("Alert channel still exists")
}
}
return nil
}
func testAccCheckNewRelicAlertChannelExists(n string) resource.TestCheckFunc {
return func(s *terraform.State) error {
rs, ok := s.RootModule().Resources[n]
if !ok {
return fmt.Errorf("Not found: %s", n)
}
if rs.Primary.ID == "" {
return fmt.Errorf("No channel ID is set")
}
client := testAccProvider.Meta().(*newrelic.Client)
id, err := strconv.ParseInt(rs.Primary.ID, 10, 32)
if err != nil {
return err
}
found, err := client.GetAlertChannel(int(id))
if err != nil {
return err
}
if strconv.Itoa(found.ID) != rs.Primary.ID {
return fmt.Errorf("Channel not found: %v - %v", rs.Primary.ID, found)
}
return nil
}
}
func testAccCheckNewRelicAlertChannelConfig(rName string) string {
return fmt.Sprintf(`
resource "newrelic_alert_channel" "foo" {
name = "tf-test-%s"
type = "email"
configuration = {
recipients = "foo@example.com"
include_json_attachment = "1"
}
}
`, rName)
}
func testAccCheckNewRelicAlertChannelConfigUpdated(rName string) string {
return fmt.Sprintf(`
resource "newrelic_alert_channel" "foo" {
name = "tf-test-updated-%s"
type = "email"
configuration = {
recipients = "bar@example.com"
include_json_attachment = "0"
}
}
`, rName)
}

View File

@ -0,0 +1,342 @@
package newrelic
import (
"fmt"
"log"
"strconv"
"github.com/hashicorp/terraform/helper/schema"
"github.com/hashicorp/terraform/helper/validation"
newrelic "github.com/paultyng/go-newrelic/api"
)
var alertConditionTypes = map[string][]string{
"apm_app_metric": []string{
"apdex",
"error_percentage",
"response_time_background",
"response_time_web",
"throughput_background",
"throughput_web",
"user_defined",
},
"apm_kt_metric": []string{
"apdex",
"error_count",
"error_percentage",
"response_time",
"throughput",
},
"browser_metric": []string{
"ajax_response_time",
"ajax_throughput",
"dom_processing",
"end_user_apdex",
"network",
"page_rendering",
"page_view_throughput",
"page_views_with_js_errors",
"request_queuing",
"total_page_load",
"user_defined",
"web_application",
},
"mobile_metric": []string{
"database",
"images",
"json",
"mobile_crash_rate",
"network_error_percentage",
"network",
"status_error_percentage",
"user_defined",
"view_loading",
},
"servers_metric": []string{
"cpu_percentage",
"disk_io_percentage",
"fullest_disk_percentage",
"load_average_one_minute",
"memory_percentage",
"user_defined",
},
}
func resourceNewRelicAlertCondition() *schema.Resource {
validAlertConditionTypes := make([]string, 0, len(alertConditionTypes))
for k := range alertConditionTypes {
validAlertConditionTypes = append(validAlertConditionTypes, k)
}
return &schema.Resource{
Create: resourceNewRelicAlertConditionCreate,
Read: resourceNewRelicAlertConditionRead,
Update: resourceNewRelicAlertConditionUpdate,
Delete: resourceNewRelicAlertConditionDelete,
Importer: &schema.ResourceImporter{
State: schema.ImportStatePassthrough,
},
Schema: map[string]*schema.Schema{
"policy_id": {
Type: schema.TypeInt,
Required: true,
ForceNew: true,
},
"name": {
Type: schema.TypeString,
Required: true,
},
"type": {
Type: schema.TypeString,
Required: true,
ValidateFunc: validation.StringInSlice(validAlertConditionTypes, false),
},
"entities": {
Type: schema.TypeList,
Elem: &schema.Schema{Type: schema.TypeInt},
Required: true,
MinItems: 1,
},
"metric": {
Type: schema.TypeString,
Required: true,
//TODO: ValidateFunc from map
},
"runbook_url": {
Type: schema.TypeString,
Optional: true,
},
"term": {
Type: schema.TypeList,
Elem: &schema.Resource{
Schema: map[string]*schema.Schema{
"duration": {
Type: schema.TypeInt,
Required: true,
ValidateFunc: intInSlice([]int{5, 10, 15, 30, 60, 120}),
},
"operator": {
Type: schema.TypeString,
Optional: true,
Default: "equal",
ValidateFunc: validation.StringInSlice([]string{"above", "below", "equal"}, false),
},
"priority": {
Type: schema.TypeString,
Optional: true,
Default: "critical",
ValidateFunc: validation.StringInSlice([]string{"critical", "warning"}, false),
},
"threshold": {
Type: schema.TypeFloat,
Required: true,
ValidateFunc: float64Gte(0.0),
},
"time_function": {
Type: schema.TypeString,
Required: true,
ValidateFunc: validation.StringInSlice([]string{"all", "any"}, false),
},
},
},
Required: true,
MinItems: 1,
},
"user_defined_metric": {
Type: schema.TypeString,
Optional: true,
},
"user_defined_value_function": {
Type: schema.TypeString,
Optional: true,
ValidateFunc: validation.StringInSlice([]string{"average", "min", "max", "total", "sample_size"}, false),
},
},
}
}
func buildAlertConditionStruct(d *schema.ResourceData) *newrelic.AlertCondition {
entitySet := d.Get("entities").([]interface{})
entities := make([]string, len(entitySet))
for i, entity := range entitySet {
entities[i] = strconv.Itoa(entity.(int))
}
termSet := d.Get("term").([]interface{})
terms := make([]newrelic.AlertConditionTerm, len(termSet))
for i, termI := range termSet {
termM := termI.(map[string]interface{})
terms[i] = newrelic.AlertConditionTerm{
Duration: termM["duration"].(int),
Operator: termM["operator"].(string),
Priority: termM["priority"].(string),
Threshold: termM["threshold"].(float64),
TimeFunction: termM["time_function"].(string),
}
}
condition := newrelic.AlertCondition{
Type: d.Get("type").(string),
Name: d.Get("name").(string),
Enabled: true,
Entities: entities,
Metric: d.Get("metric").(string),
Terms: terms,
PolicyID: d.Get("policy_id").(int),
}
if attr, ok := d.GetOk("runbook_url"); ok {
condition.RunbookURL = attr.(string)
}
if attrM, ok := d.GetOk("user_defined_metric"); ok {
if attrVF, ok := d.GetOk("user_defined_value_function"); ok {
condition.UserDefined = newrelic.AlertConditionUserDefined{
Metric: attrM.(string),
ValueFunction: attrVF.(string),
}
}
}
return &condition
}
func readAlertConditionStruct(condition *newrelic.AlertCondition, d *schema.ResourceData) error {
ids, err := parseIDs(d.Id(), 2)
if err != nil {
return err
}
policyID := ids[0]
entities := make([]int, len(condition.Entities))
for i, entity := range condition.Entities {
v, err := strconv.ParseInt(entity, 10, 32)
if err != nil {
return err
}
entities[i] = int(v)
}
d.Set("policy_id", policyID)
d.Set("name", condition.Name)
d.Set("type", condition.Type)
d.Set("metric", condition.Metric)
d.Set("runbook_url", condition.RunbookURL)
d.Set("user_defined_metric", condition.UserDefined.Metric)
d.Set("user_defined_value_function", condition.UserDefined.ValueFunction)
if err := d.Set("entities", entities); err != nil {
return fmt.Errorf("[DEBUG] Error setting alert condition entities: %#v", err)
}
var terms []map[string]interface{}
for _, src := range condition.Terms {
dst := map[string]interface{}{
"duration": src.Duration,
"operator": src.Operator,
"priority": src.Priority,
"threshold": src.Threshold,
"time_function": src.TimeFunction,
}
terms = append(terms, dst)
}
if err := d.Set("term", terms); err != nil {
return fmt.Errorf("[DEBUG] Error setting alert condition terms: %#v", err)
}
return nil
}
func resourceNewRelicAlertConditionCreate(d *schema.ResourceData, meta interface{}) error {
client := meta.(*newrelic.Client)
condition := buildAlertConditionStruct(d)
log.Printf("[INFO] Creating New Relic alert condition %s", condition.Name)
condition, err := client.CreateAlertCondition(*condition)
if err != nil {
return err
}
d.SetId(serializeIDs([]int{condition.PolicyID, condition.ID}))
return nil
}
func resourceNewRelicAlertConditionRead(d *schema.ResourceData, meta interface{}) error {
client := meta.(*newrelic.Client)
log.Printf("[INFO] Reading New Relic alert condition %s", d.Id())
ids, err := parseIDs(d.Id(), 2)
if err != nil {
return err
}
policyID := ids[0]
id := ids[1]
condition, err := client.GetAlertCondition(policyID, id)
if err != nil {
if err == newrelic.ErrNotFound {
d.SetId("")
return nil
}
return err
}
return readAlertConditionStruct(condition, d)
}
func resourceNewRelicAlertConditionUpdate(d *schema.ResourceData, meta interface{}) error {
client := meta.(*newrelic.Client)
condition := buildAlertConditionStruct(d)
ids, err := parseIDs(d.Id(), 2)
if err != nil {
return err
}
policyID := ids[0]
id := ids[1]
condition.PolicyID = policyID
condition.ID = id
log.Printf("[INFO] Updating New Relic alert condition %d", id)
updatedCondition, err := client.UpdateAlertCondition(*condition)
if err != nil {
return err
}
return readAlertConditionStruct(updatedCondition, d)
}
func resourceNewRelicAlertConditionDelete(d *schema.ResourceData, meta interface{}) error {
client := meta.(*newrelic.Client)
ids, err := parseIDs(d.Id(), 2)
if err != nil {
return err
}
policyID := ids[0]
id := ids[1]
log.Printf("[INFO] Deleting New Relic alert condition %d", id)
if err := client.DeleteAlertCondition(policyID, id); err != nil {
return err
}
d.SetId("")
return nil
}

View File

@ -0,0 +1,189 @@
package newrelic
import (
"fmt"
"testing"
"github.com/hashicorp/terraform/helper/acctest"
"github.com/hashicorp/terraform/helper/resource"
"github.com/hashicorp/terraform/terraform"
newrelic "github.com/paultyng/go-newrelic/api"
)
func TestAccNewRelicAlertCondition_Basic(t *testing.T) {
rName := acctest.RandString(5)
resource.Test(t, resource.TestCase{
PreCheck: func() { testAccPreCheck(t) },
Providers: testAccProviders,
CheckDestroy: testAccCheckNewRelicAlertConditionDestroy,
Steps: []resource.TestStep{
resource.TestStep{
Config: testAccCheckNewRelicAlertConditionConfig(rName),
Check: resource.ComposeTestCheckFunc(
testAccCheckNewRelicAlertConditionExists("newrelic_alert_condition.foo"),
resource.TestCheckResourceAttr(
"newrelic_alert_condition.foo", "name", fmt.Sprintf("tf-test-%s", rName)),
resource.TestCheckResourceAttr(
"newrelic_alert_condition.foo", "type", "apm_app_metric"),
resource.TestCheckResourceAttr(
"newrelic_alert_condition.foo", "runbook_url", "https://foo.example.com"),
resource.TestCheckResourceAttr(
"newrelic_alert_condition.foo", "entities.#", "1"),
resource.TestCheckResourceAttr(
"newrelic_alert_condition.foo", "entities.0", "12345"),
resource.TestCheckResourceAttr(
"newrelic_alert_condition.foo", "term.#", "1"),
resource.TestCheckResourceAttr(
"newrelic_alert_condition.foo", "term.0.duration", "5"),
resource.TestCheckResourceAttr(
"newrelic_alert_condition.foo", "term.0.operator", "below"),
resource.TestCheckResourceAttr(
"newrelic_alert_condition.foo", "term.0.priority", "critical"),
resource.TestCheckResourceAttr(
"newrelic_alert_condition.foo", "term.0.threshold", "0.75"),
resource.TestCheckResourceAttr(
"newrelic_alert_condition.foo", "term.0.time_function", "all"),
),
},
resource.TestStep{
Config: testAccCheckNewRelicAlertConditionConfigUpdated(rName),
Check: resource.ComposeTestCheckFunc(
testAccCheckNewRelicAlertConditionExists("newrelic_alert_condition.foo"),
resource.TestCheckResourceAttr(
"newrelic_alert_condition.foo", "name", fmt.Sprintf("tf-test-updated-%s", rName)),
resource.TestCheckResourceAttr(
"newrelic_alert_condition.foo", "runbook_url", "https://bar.example.com"),
resource.TestCheckResourceAttr(
"newrelic_alert_condition.foo", "entities.#", "1"),
resource.TestCheckResourceAttr(
"newrelic_alert_condition.foo", "entities.0", "67890"),
resource.TestCheckResourceAttr(
"newrelic_alert_condition.foo", "term.#", "1"),
resource.TestCheckResourceAttr(
"newrelic_alert_condition.foo", "term.0.duration", "10"),
resource.TestCheckResourceAttr(
"newrelic_alert_condition.foo", "term.0.operator", "below"),
resource.TestCheckResourceAttr(
"newrelic_alert_condition.foo", "term.0.priority", "critical"),
resource.TestCheckResourceAttr(
"newrelic_alert_condition.foo", "term.0.threshold", "0.65"),
resource.TestCheckResourceAttr(
"newrelic_alert_condition.foo", "term.0.time_function", "all"),
),
},
},
})
}
// TODO: func TestAccNewRelicAlertCondition_Multi(t *testing.T) {
func testAccCheckNewRelicAlertConditionDestroy(s *terraform.State) error {
client := testAccProvider.Meta().(*newrelic.Client)
for _, r := range s.RootModule().Resources {
if r.Type != "newrelic_alert_condition" {
continue
}
ids, err := parseIDs(r.Primary.ID, 2)
if err != nil {
return err
}
policyID := ids[0]
id := ids[1]
_, err = client.GetAlertCondition(policyID, id)
if err == nil {
return fmt.Errorf("Alert condition still exists")
}
}
return nil
}
func testAccCheckNewRelicAlertConditionExists(n string) resource.TestCheckFunc {
return func(s *terraform.State) error {
rs, ok := s.RootModule().Resources[n]
if !ok {
return fmt.Errorf("Not found: %s", n)
}
if rs.Primary.ID == "" {
return fmt.Errorf("No alert condition ID is set")
}
client := testAccProvider.Meta().(*newrelic.Client)
ids, err := parseIDs(rs.Primary.ID, 2)
if err != nil {
return err
}
policyID := ids[0]
id := ids[1]
found, err := client.GetAlertCondition(policyID, id)
if err != nil {
return err
}
if found.ID != id {
return fmt.Errorf("Alert condition not found: %v - %v", id, found)
}
return nil
}
}
func testAccCheckNewRelicAlertConditionConfig(rName string) string {
return fmt.Sprintf(`
resource "newrelic_alert_policy" "foo" {
name = "tf-test-%[1]s"
}
resource "newrelic_alert_condition" "foo" {
policy_id = "${newrelic_alert_policy.foo.id}"
name = "tf-test-%[1]s"
type = "apm_app_metric"
entities = ["12345"]
metric = "apdex"
runbook_url = "https://foo.example.com"
term {
duration = 5
operator = "below"
priority = "critical"
threshold = "0.75"
time_function = "all"
}
}
`, rName)
}
func testAccCheckNewRelicAlertConditionConfigUpdated(rName string) string {
return fmt.Sprintf(`
resource "newrelic_alert_policy" "foo" {
name = "tf-test-updated-%[1]s"
}
resource "newrelic_alert_condition" "foo" {
policy_id = "${newrelic_alert_policy.foo.id}"
name = "tf-test-updated-%[1]s"
type = "apm_app_metric"
entities = ["67890"]
metric = "apdex"
runbook_url = "https://bar.example.com"
term {
duration = 10
operator = "below"
priority = "critical"
threshold = "0.65"
time_function = "all"
}
}
`, rName)
}
// TODO: const testAccCheckNewRelicAlertConditionConfigMulti = `

View File

@ -0,0 +1,119 @@
package newrelic
import (
"log"
"strconv"
"github.com/hashicorp/terraform/helper/schema"
"github.com/hashicorp/terraform/helper/validation"
newrelic "github.com/paultyng/go-newrelic/api"
)
func resourceNewRelicAlertPolicy() *schema.Resource {
return &schema.Resource{
Create: resourceNewRelicAlertPolicyCreate,
Read: resourceNewRelicAlertPolicyRead,
// Update: Not currently supported in API
Delete: resourceNewRelicAlertPolicyDelete,
Importer: &schema.ResourceImporter{
State: schema.ImportStatePassthrough,
},
Schema: map[string]*schema.Schema{
"name": {
Type: schema.TypeString,
Required: true,
ForceNew: true,
},
"incident_preference": {
Type: schema.TypeString,
Optional: true,
Default: "PER_POLICY",
ForceNew: true,
ValidateFunc: validation.StringInSlice([]string{"PER_POLICY", "PER_CONDITION", "PER_CONDITION_AND_TARGET"}, false),
},
"created_at": {
Type: schema.TypeInt,
Computed: true,
},
"updated_at": {
Type: schema.TypeInt,
Computed: true,
},
},
}
}
func buildAlertPolicyStruct(d *schema.ResourceData) *newrelic.AlertPolicy {
policy := newrelic.AlertPolicy{
Name: d.Get("name").(string),
}
if attr, ok := d.GetOk("incident_preference"); ok {
policy.IncidentPreference = attr.(string)
}
return &policy
}
func resourceNewRelicAlertPolicyCreate(d *schema.ResourceData, meta interface{}) error {
client := meta.(*newrelic.Client)
policy := buildAlertPolicyStruct(d)
log.Printf("[INFO] Creating New Relic alert policy %s", policy.Name)
policy, err := client.CreateAlertPolicy(*policy)
if err != nil {
return err
}
d.SetId(strconv.Itoa(policy.ID))
return nil
}
func resourceNewRelicAlertPolicyRead(d *schema.ResourceData, meta interface{}) error {
client := meta.(*newrelic.Client)
id, err := strconv.ParseInt(d.Id(), 10, 32)
if err != nil {
return err
}
log.Printf("[INFO] Reading New Relic alert policy %v", id)
policy, err := client.GetAlertPolicy(int(id))
if err != nil {
if err == newrelic.ErrNotFound {
d.SetId("")
return nil
}
return err
}
d.Set("name", policy.Name)
d.Set("incident_preference", policy.IncidentPreference)
d.Set("created_at", policy.CreatedAt)
d.Set("updated_at", policy.UpdatedAt)
return nil
}
func resourceNewRelicAlertPolicyDelete(d *schema.ResourceData, meta interface{}) error {
client := meta.(*newrelic.Client)
id, err := strconv.ParseInt(d.Id(), 10, 32)
if err != nil {
return err
}
log.Printf("[INFO] Deleting New Relic alert policy %v", id)
if err := client.DeleteAlertPolicy(int(id)); err != nil {
return err
}
d.SetId("")
return nil
}

View File

@ -0,0 +1,137 @@
package newrelic
import (
"log"
"github.com/hashicorp/terraform/helper/schema"
newrelic "github.com/paultyng/go-newrelic/api"
)
func policyChannelExists(client *newrelic.Client, policyID int, channelID int) (bool, error) {
channel, err := client.GetAlertChannel(channelID)
if err != nil {
if err == newrelic.ErrNotFound {
return false, nil
}
return false, err
}
for _, id := range channel.Links.PolicyIDs {
if id == policyID {
return true, nil
}
}
return false, nil
}
func resourceNewRelicAlertPolicyChannel() *schema.Resource {
return &schema.Resource{
Create: resourceNewRelicAlertPolicyChannelCreate,
Read: resourceNewRelicAlertPolicyChannelRead,
// Update: Not currently supported in API
Delete: resourceNewRelicAlertPolicyChannelDelete,
Schema: map[string]*schema.Schema{
"policy_id": {
Type: schema.TypeInt,
Required: true,
ForceNew: true,
},
"channel_id": {
Type: schema.TypeInt,
Required: true,
ForceNew: true,
},
},
}
}
func resourceNewRelicAlertPolicyChannelCreate(d *schema.ResourceData, meta interface{}) error {
client := meta.(*newrelic.Client)
policyID := d.Get("policy_id").(int)
channelID := d.Get("channel_id").(int)
serializedID := serializeIDs([]int{policyID, channelID})
log.Printf("[INFO] Creating New Relic alert policy channel %s", serializedID)
exists, err := policyChannelExists(client, policyID, channelID)
if err != nil {
return err
}
if !exists {
err = client.UpdateAlertPolicyChannels(policyID, []int{channelID})
if err != nil {
return err
}
}
d.SetId(serializedID)
return nil
}
func resourceNewRelicAlertPolicyChannelRead(d *schema.ResourceData, meta interface{}) error {
client := meta.(*newrelic.Client)
ids, err := parseIDs(d.Id(), 2)
if err != nil {
return err
}
policyID := ids[0]
channelID := ids[1]
log.Printf("[INFO] Reading New Relic alert policy channel %s", d.Id())
exists, err := policyChannelExists(client, policyID, channelID)
if err != nil {
return err
}
if !exists {
d.SetId("")
return nil
}
d.Set("policy_id", policyID)
d.Set("channel_id", channelID)
return nil
}
func resourceNewRelicAlertPolicyChannelDelete(d *schema.ResourceData, meta interface{}) error {
client := meta.(*newrelic.Client)
ids, err := parseIDs(d.Id(), 2)
if err != nil {
return err
}
policyID := ids[0]
channelID := ids[1]
log.Printf("[INFO] Deleting New Relic alert policy channel %s", d.Id())
exists, err := policyChannelExists(client, policyID, channelID)
if err != nil {
return err
}
if exists {
if err := client.DeleteAlertPolicyChannel(policyID, channelID); err != nil {
switch err {
case newrelic.ErrNotFound:
return nil
}
return err
}
}
d.SetId("")
return nil
}

View File

@ -0,0 +1,139 @@
package newrelic
import (
"fmt"
"testing"
"github.com/hashicorp/terraform/helper/acctest"
"github.com/hashicorp/terraform/helper/resource"
"github.com/hashicorp/terraform/terraform"
newrelic "github.com/paultyng/go-newrelic/api"
)
func TestAccNewRelicAlertPolicyChannel_Basic(t *testing.T) {
rName := acctest.RandString(5)
resource.Test(t, resource.TestCase{
PreCheck: func() { testAccPreCheck(t) },
Providers: testAccProviders,
CheckDestroy: testAccCheckNewRelicAlertPolicyChannelDestroy,
Steps: []resource.TestStep{
resource.TestStep{
Config: testAccCheckNewRelicAlertPolicyChannelConfig(rName),
Check: resource.ComposeTestCheckFunc(
testAccCheckNewRelicAlertPolicyChannelExists("newrelic_alert_policy_channel.foo"),
),
},
resource.TestStep{
Config: testAccCheckNewRelicAlertPolicyChannelConfigUpdated(rName),
Check: resource.ComposeTestCheckFunc(
testAccCheckNewRelicAlertPolicyChannelExists("newrelic_alert_policy_channel.foo"),
),
},
},
})
}
func testAccCheckNewRelicAlertPolicyChannelDestroy(s *terraform.State) error {
client := testAccProvider.Meta().(*newrelic.Client)
for _, r := range s.RootModule().Resources {
if r.Type != "newrelic_alert_policy_channel" {
continue
}
ids, err := parseIDs(r.Primary.ID, 2)
if err != nil {
return err
}
policyID := ids[0]
channelID := ids[1]
exists, err := policyChannelExists(client, policyID, channelID)
if err != nil {
return err
}
if exists {
return fmt.Errorf("Resource still exists")
}
}
return nil
}
func testAccCheckNewRelicAlertPolicyChannelExists(n string) resource.TestCheckFunc {
return func(s *terraform.State) error {
rs, ok := s.RootModule().Resources[n]
if !ok {
return fmt.Errorf("Not found: %s", n)
}
if rs.Primary.ID == "" {
return fmt.Errorf("No resource ID is set")
}
client := testAccProvider.Meta().(*newrelic.Client)
ids, err := parseIDs(rs.Primary.ID, 2)
if err != nil {
return err
}
policyID := ids[0]
channelID := ids[1]
exists, err := policyChannelExists(client, policyID, channelID)
if err != nil {
return err
}
if !exists {
return fmt.Errorf("Resource not found: %v", rs.Primary.ID)
}
return nil
}
}
func testAccCheckNewRelicAlertPolicyChannelConfig(rName string) string {
return fmt.Sprintf(`
resource "newrelic_alert_policy" "foo" {
name = "tf-test-%[1]s"
}
resource "newrelic_alert_channel" "foo" {
name = "tf-test-%[1]s"
type = "email"
configuration = {
recipients = "foo@example.com"
include_json_attachment = "1"
}
}
resource "newrelic_alert_policy_channel" "foo" {
policy_id = "${newrelic_alert_policy.foo.id}"
channel_id = "${newrelic_alert_channel.foo.id}"
}
`, rName)
}
func testAccCheckNewRelicAlertPolicyChannelConfigUpdated(rName string) string {
return fmt.Sprintf(`
resource "newrelic_alert_policy" "bar" {
name = "tf-test-updated-%[1]s"
}
resource "newrelic_alert_channel" "foo" {
name = "tf-test-updated-%[1]s"
type = "email"
configuration = {
recipients = "bar@example.com"
include_json_attachment = "0"
}
}
resource "newrelic_alert_policy_channel" "foo" {
policy_id = "${newrelic_alert_policy.bar.id}"
channel_id = "${newrelic_alert_channel.foo.id}"
}
`, rName)
}

View File

@ -0,0 +1,112 @@
package newrelic
import (
"fmt"
"strconv"
"testing"
"github.com/hashicorp/terraform/helper/acctest"
"github.com/hashicorp/terraform/helper/resource"
"github.com/hashicorp/terraform/terraform"
newrelic "github.com/paultyng/go-newrelic/api"
)
func TestAccNewRelicAlertPolicy_Basic(t *testing.T) {
rName := acctest.RandString(5)
resource.Test(t, resource.TestCase{
PreCheck: func() { testAccPreCheck(t) },
Providers: testAccProviders,
CheckDestroy: testAccCheckNewRelicAlertPolicyDestroy,
Steps: []resource.TestStep{
resource.TestStep{
Config: testAccCheckNewRelicAlertPolicyConfig(rName),
Check: resource.ComposeTestCheckFunc(
testAccCheckNewRelicAlertPolicyExists("newrelic_alert_policy.foo"),
resource.TestCheckResourceAttr(
"newrelic_alert_policy.foo", "name", fmt.Sprintf("tf-test-%s", rName)),
resource.TestCheckResourceAttr(
"newrelic_alert_policy.foo", "incident_preference", "PER_POLICY"),
),
},
resource.TestStep{
Config: testAccCheckNewRelicAlertPolicyConfigUpdated(rName),
Check: resource.ComposeTestCheckFunc(
testAccCheckNewRelicAlertPolicyExists("newrelic_alert_policy.foo"),
resource.TestCheckResourceAttr(
"newrelic_alert_policy.foo", "name", fmt.Sprintf("tf-test-updated-%s", rName)),
resource.TestCheckResourceAttr(
"newrelic_alert_policy.foo", "incident_preference", "PER_CONDITION"),
),
},
},
})
}
func testAccCheckNewRelicAlertPolicyDestroy(s *terraform.State) error {
client := testAccProvider.Meta().(*newrelic.Client)
for _, r := range s.RootModule().Resources {
if r.Type != "newrelic_alert_policy" {
continue
}
id, err := strconv.ParseInt(r.Primary.ID, 10, 32)
if err != nil {
return err
}
_, err = client.GetAlertPolicy(int(id))
if err == nil {
return fmt.Errorf("Policy still exists")
}
}
return nil
}
func testAccCheckNewRelicAlertPolicyExists(n string) resource.TestCheckFunc {
return func(s *terraform.State) error {
rs, ok := s.RootModule().Resources[n]
if !ok {
return fmt.Errorf("Not found: %s", n)
}
if rs.Primary.ID == "" {
return fmt.Errorf("No policy ID is set")
}
client := testAccProvider.Meta().(*newrelic.Client)
id, err := strconv.ParseInt(rs.Primary.ID, 10, 32)
if err != nil {
return err
}
found, err := client.GetAlertPolicy(int(id))
if err != nil {
return err
}
if strconv.Itoa(found.ID) != rs.Primary.ID {
return fmt.Errorf("Policy not found: %v - %v", rs.Primary.ID, found)
}
return nil
}
}
func testAccCheckNewRelicAlertPolicyConfig(rName string) string {
return fmt.Sprintf(`
resource "newrelic_alert_policy" "foo" {
name = "tf-test-%s"
}
`, rName)
}
func testAccCheckNewRelicAlertPolicyConfigUpdated(rName string) string {
return fmt.Sprintf(`
resource "newrelic_alert_policy" "foo" {
name = "tf-test-updated-%s"
incident_preference = "PER_CONDITION"
}
`, rName)
}

View File

@ -0,0 +1,43 @@
package newrelic
import (
"fmt"
"github.com/hashicorp/terraform/helper/schema"
)
func float64Gte(gte float64) schema.SchemaValidateFunc {
return func(i interface{}, k string) (s []string, es []error) {
v, ok := i.(float64)
if !ok {
es = append(es, fmt.Errorf("expected type of %s to be float64", k))
return
}
if v >= gte {
return
}
es = append(es, fmt.Errorf("expected %s to be greater than or equal to %v, got %v", k, gte, v))
return
}
}
func intInSlice(valid []int) schema.SchemaValidateFunc {
return func(i interface{}, k string) (s []string, es []error) {
v, ok := i.(int)
if !ok {
es = append(es, fmt.Errorf("expected type of %s to be int", k))
return
}
for _, p := range valid {
if v == p {
return
}
}
es = append(es, fmt.Errorf("expected %s to be one of %v, got %v", k, valid, v))
return
}
}

View File

@ -0,0 +1,81 @@
package newrelic
import (
"regexp"
"testing"
"github.com/hashicorp/terraform/helper/schema"
)
type testCase struct {
val interface{}
f schema.SchemaValidateFunc
expectedErr *regexp.Regexp
}
func TestValidationIntInInSlice(t *testing.T) {
runTestCases(t, []testCase{
{
val: 2,
f: intInSlice([]int{1, 2, 3}),
},
{
val: 4,
f: intInSlice([]int{1, 2, 3}),
expectedErr: regexp.MustCompile("expected [\\w]+ to be one of \\[1 2 3\\], got 4"),
},
{
val: "foo",
f: intInSlice([]int{1, 2, 3}),
expectedErr: regexp.MustCompile("expected type of [\\w]+ to be int"),
},
})
}
func TestValidationFloat64Gte(t *testing.T) {
runTestCases(t, []testCase{
{
val: 1.1,
f: float64Gte(1.1),
},
{
val: 1.2,
f: float64Gte(1.1),
},
{
val: "foo",
f: float64Gte(1.1),
expectedErr: regexp.MustCompile("expected type of [\\w]+ to be float64"),
},
{
val: 0.1,
f: float64Gte(1.1),
expectedErr: regexp.MustCompile("expected [\\w]+ to be greater than or equal to 1.1, got 0.1"),
},
})
}
func runTestCases(t *testing.T, cases []testCase) {
matchErr := func(errs []error, r *regexp.Regexp) bool {
// err must match one provided
for _, err := range errs {
if r.MatchString(err.Error()) {
return true
}
}
return false
}
for i, tc := range cases {
_, errs := tc.f(tc.val, "test_property")
if len(errs) == 0 && tc.expectedErr == nil {
continue
}
if !matchErr(errs, tc.expectedErr) {
t.Fatalf("expected test case %d to produce error matching \"%s\", got %v", i, tc.expectedErr, errs)
}
}
}

View File

@ -36,6 +36,7 @@ import (
logentriesprovider "github.com/hashicorp/terraform/builtin/providers/logentries"
mailgunprovider "github.com/hashicorp/terraform/builtin/providers/mailgun"
mysqlprovider "github.com/hashicorp/terraform/builtin/providers/mysql"
newrelicprovider "github.com/hashicorp/terraform/builtin/providers/newrelic"
nomadprovider "github.com/hashicorp/terraform/builtin/providers/nomad"
nullprovider "github.com/hashicorp/terraform/builtin/providers/null"
openstackprovider "github.com/hashicorp/terraform/builtin/providers/openstack"
@ -99,6 +100,7 @@ var InternalProviders = map[string]plugin.ProviderFunc{
"logentries": logentriesprovider.Provider,
"mailgun": mailgunprovider.Provider,
"mysql": mysqlprovider.Provider,
"newrelic": newrelicprovider.Provider,
"nomad": nomadprovider.Provider,
"null": nullprovider.Provider,
"openstack": openstackprovider.Provider,

155
vendor/github.com/newrelic/go-agent/CHANGELOG.md generated vendored Normal file
View File

@ -0,0 +1,155 @@
## ChangeLog
## 1.5.0
* Added support for Windows. Thanks to @ianomad and @lvxv for the contributions.
* The number of heap objects allocated is recorded in the
`Memory/Heap/AllocatedObjects` metric. This will soon be displayed on the "Go
runtime" page.
* If the [DatastoreSegment](https://godoc.org/github.com/newrelic/go-agent#DatastoreSegment)
fields `Host` and `PortPathOrID` are not provided, they will no longer appear
as `"unknown"` in transaction traces and slow query traces.
* Stack traces will now be nicely aligned in the APM UI.
## 1.4.0
* Added support for slow query traces. Slow datastore segments will now
generate slow query traces viewable on the datastore tab. These traces include
a stack trace and help you to debug slow datastore activity.
[Slow Query Documentation](https://docs.newrelic.com/docs/apm/applications-menu/monitoring/viewing-slow-query-details)
* Added new
[DatastoreSegment](https://godoc.org/github.com/newrelic/go-agent#DatastoreSegment)
fields `ParameterizedQuery`, `QueryParameters`, `Host`, `PortPathOrID`, and
`DatabaseName`. These fields will be shown in transaction traces and in slow
query traces.
## 1.3.0
* Breaking Change: Added a timeout parameter to the `Application.Shutdown` method.
## 1.2.0
* Added support for instrumenting short-lived processes:
* The new `Application.Shutdown` method allows applications to report
data to New Relic without waiting a full minute.
* The new `Application.WaitForConnection` method allows your process to
defer instrumentation until the application is connected and ready to
gather data.
* Full documentation here: [application.go](application.go)
* Example short-lived process: [examples/short-lived-process/main.go](examples/short-lived-process/main.go)
* Error metrics are no longer created when `ErrorCollector.Enabled = false`.
* Added support for [github.com/mgutz/logxi](github.com/mgutz/logxi). See
[_integrations/nrlogxi/v1/nrlogxi.go](_integrations/nrlogxi/v1/nrlogxi.go).
* Fixed bug where Transaction Trace thresholds based upon Apdex were not being
applied to background transactions.
## 1.1.0
* Added support for Transaction Traces.
* Stack trace filenames have been shortened: Any thing preceding the first
`/src/` is now removed.
## 1.0.0
* Removed `BetaToken` from the `Config` structure.
* Breaking Datastore Change: `datastore` package contents moved to top level
`newrelic` package. `datastore.MySQL` has become `newrelic.DatastoreMySQL`.
* Breaking Attributes Change: `attributes` package contents moved to top
level `newrelic` package. `attributes.ResponseCode` has become
`newrelic.AttributeResponseCode`. Some attribute name constants have been
shortened.
* Added "runtime.NumCPU" to the environment tab. Thanks sergeylanzman for the
contribution.
* Prefixed the environment tab values "Compiler", "GOARCH", "GOOS", and
"Version" with "runtime.".
## 0.8.0
* Breaking Segments API Changes: The segments API has been rewritten with the
goal of being easier to use and to avoid nil Transaction checks. See:
* [segments.go](segments.go)
* [examples/server/main.go](examples/server/main.go)
* [GUIDE.md#segments](GUIDE.md#segments)
* Updated LICENSE.txt with contribution information.
## 0.7.1
* Fixed a bug causing the `Config` to fail to serialize into JSON when the
`Transport` field was populated.
## 0.7.0
* Eliminated `api`, `version`, and `log` packages. `Version`, `Config`,
`Application`, and `Transaction` now live in the top level `newrelic` package.
If you imported the `attributes` or `datastore` packages then you will need
to remove `api` from the import path.
* Breaking Logging Changes
Logging is no longer controlled though a single global. Instead, logging is
configured on a per-application basis with the new `Config.Logger` field. The
logger is an interface described in [log.go](log.go). See
[GUIDE.md#logging](GUIDE.md#logging).
## 0.6.1
* No longer create "GC/System/Pauses" metric if no GC pauses happened.
## 0.6.0
* Introduced beta token to support our beta program.
* Rename `Config.Development` to `Config.Enabled` (and change boolean
direction).
* Fixed a bug where exclusive time could be incorrect if segments were not
ended.
* Fix unit tests broken in 1.6.
* In `Config.Enabled = false` mode, the license must be the proper length or empty.
* Added runtime statistics for CPU/memory usage, garbage collection, and number
of goroutines.
## 0.5.0
* Added segment timing methods to `Transaction`. These methods must only be
used in a single goroutine.
* The license length check will not be performed in `Development` mode.
* Rename `SetLogFile` to `SetFile` to reduce redundancy.
* Added `DebugEnabled` logging guard to reduce overhead.
* `Transaction` now implements an `Ignore` method which will prevent
any of the transaction's data from being recorded.
* `Transaction` now implements a subset of the interfaces
`http.CloseNotifier`, `http.Flusher`, `http.Hijacker`, and `io.ReaderFrom`
to match the behavior of its wrapped `http.ResponseWriter`.
* Changed project name from `go-sdk` to `go-agent`.
## 0.4.0
* Queue time support added: if the inbound request contains an
`"X-Request-Start"` or `"X-Queue-Start"` header with a unix timestamp, the
agent will report queue time metrics. Queue time will appear on the
application overview chart. The timestamp may fractional seconds,
milliseconds, or microseconds: the agent will deduce the correct units.

9
vendor/github.com/newrelic/go-agent/CONTRIBUTING.md generated vendored Normal file
View File

@ -0,0 +1,9 @@
# Contributing
You are welcome to send pull requests to us. By doing so you agree that you are
granting New Relic a non-exclusive, non-revokable, no-cost license to use the
code, algorithms, patents, and ideas in that code in our products if we so
choose. You also agree the code is provided as-is and you provide no warranties
as to its fitness or correctness for any purpose.
* [LICENSE.txt](LICENSE.txt)

325
vendor/github.com/newrelic/go-agent/GUIDE.md generated vendored Normal file
View File

@ -0,0 +1,325 @@
# New Relic Go Agent Guide
* [Installation](#installation)
* [Config and Application](#config-and-application)
* [Logging](#logging)
* [logrus](#logrus)
* [Transactions](#transactions)
* [Segments](#segments)
* [Datastore Segments](#datastore-segments)
* [External Segments](#external-segments)
* [Attributes](#attributes)
* [Request Queuing](#request-queuing)
## Installation
Installing the Go Agent is the same as installing any other Go library. The
simplest way is to run:
```
go get github.com/newrelic/go-agent
```
Then import the `github.com/newrelic/go-agent` package in your application.
## Config and Application
* [config.go](config.go)
* [application.go](application.go)
In your `main` function or in an `init` block:
```go
config := newrelic.NewConfig("Your Application Name", "__YOUR_NEW_RELIC_LICENSE_KEY__")
app, err := newrelic.NewApplication(config)
```
Find your application in the New Relic UI. Click on it to see the Go runtime
tab that shows information about goroutine counts, garbage collection, memory,
and CPU usage.
If you are working in a development environment or running unit tests, you may
not want the Go Agent to spawn goroutines or report to New Relic. You're in
luck! Set the config's `Enabled` field to false. This makes the license key
optional.
```go
config := newrelic.NewConfig("Your Application Name", "")
config.Enabled = false
app, err := newrelic.NewApplication(config)
```
## Logging
* [log.go](log.go)
The agent's logging system is designed to be easily extensible. By default, no
logging will occur. To enable logging, assign the `Config.Logger` field to
something implementing the `Logger` interface. A basic logging
implementation is included.
To log at debug level to standard out, set:
```go
config.Logger = newrelic.NewDebugLogger(os.Stdout)
```
To log at info level to a file, set:
```go
w, err := os.OpenFile("my_log_file", os.O_CREATE|os.O_APPEND|os.O_WRONLY, 0644)
if nil == err {
config.Logger = newrelic.NewLogger(w)
}
```
### logrus
* [_integrations/nrlogrus/nrlogrus.go](_integrations/nrlogrus/nrlogrus.go)
If you are using `logrus` and would like to send the agent's log messages to its
standard logger, import the
`github.com/newrelic/go-agent/_integrations/nrlogrus` package, then set:
```go
config.Logger = nrlogrus.StandardLogger()
```
## Transactions
* [transaction.go](transaction.go)
* [More info on Transactions](https://docs.newrelic.com/docs/apm/applications-menu/monitoring/transactions-page)
Transactions time requests and background tasks. Each transaction should only
be used in a single goroutine. Start a new transaction when you spawn a new
goroutine.
The simplest way to create transactions is to use
`Application.StartTransaction` and `Transaction.End`.
```go
txn := app.StartTransaction("transactionName", responseWriter, request)
defer txn.End()
```
The response writer and request parameters are optional. Leave them `nil` to
instrument a background task.
```go
txn := app.StartTransaction("backgroundTask", nil, nil)
defer txn.End()
```
The transaction has helpful methods like `NoticeError` and `SetName`.
See more in [transaction.go](transaction.go).
If you are using the `http` standard library package, use `WrapHandle` and
`WrapHandleFunc`. These wrappers automatically start and end transactions with
the request and response writer. See [instrumentation.go](instrumentation.go).
```go
http.HandleFunc(newrelic.WrapHandleFunc(app, "/users", usersHandler))
```
To access the transaction in your handler, use type assertion on the response
writer passed to the handler.
```go
func myHandler(w http.ResponseWriter, r *http.Request) {
if txn, ok := w.(newrelic.Transaction); ok {
txn.NoticeError(errors.New("my error message"))
}
}
```
## Segments
* [segments.go](segments.go)
Find out where the time in your transactions is being spent! Each transaction
should only track segments in a single goroutine.
`Segment` is used to instrument functions, methods, and blocks of code. A
segment begins when its `StartTime` field is populated, and finishes when its
`End` method is called.
```go
segment := newrelic.Segment{}
segment.Name = "mySegmentName"
segment.StartTime = newrelic.StartSegmentNow(txn)
// ... code you want to time here ...
segment.End()
```
`StartSegment` is a convenient helper. It creates a segment and starts it:
```go
segment := newrelic.StartSegment(txn, "mySegmentName")
// ... code you want to time here ...
segment.End()
```
Timing a function is easy using `StartSegment` and `defer`. Just add the
following line to the beginning of that function:
```go
defer newrelic.StartSegment(txn, "mySegmentName").End()
```
Segments may be nested. The segment being ended must be the most recently
started segment.
```go
s1 := newrelic.StartSegment(txn, "outerSegment")
s2 := newrelic.StartSegment(txn, "innerSegment")
// s2 must be ended before s1
s2.End()
s1.End()
```
A zero value segment may safely be ended. Therefore, the following code
is safe even if the conditional fails:
```go
var s newrelic.Segment
if txn, ok := w.(newrelic.Transaction); ok {
s.StartTime = newrelic.StartSegmentNow(txn),
}
// ... code you wish to time here ...
s.End()
```
### Datastore Segments
Datastore segments appear in the transaction "Breakdown table" and in the
"Databases" tab.
* [datastore.go](datastore.go)
* [More info on Databases tab](https://docs.newrelic.com/docs/apm/applications-menu/monitoring/databases-slow-queries-page)
Datastore segments are instrumented using `DatastoreSegment`. Just like basic
segments, datastore segments begin when the `StartTime` field is populated and
finish when the `End` method is called. Here is an example:
```go
s := newrelic.DatastoreSegment{
// Product is the datastore type. See the constants in datastore.go.
Product: newrelic.DatastoreMySQL,
// Collection is the table or group.
Collection: "my_table",
// Operation is the relevant action, e.g. "SELECT" or "GET".
Operation: "SELECT",
}
s.StartTime = newrelic.StartSegmentNow(txn)
// ... make the datastore call
s.End()
```
This may be combined into a single line when instrumenting a datastore call
that spans an entire function call:
```go
defer newrelic.DatastoreSegment{
StartTime: newrelic.StartSegmentNow(txn),
Product: newrelic.DatastoreMySQL,
Collection: "my_table",
Operation: "SELECT",
}.End()
```
### External Segments
External segments appear in the transaction "Breakdown table" and in the
"External services" tab.
* [More info on External Services tab](https://docs.newrelic.com/docs/apm/applications-menu/monitoring/external-services-page)
External segments are instrumented using `ExternalSegment`. Populate either the
`URL` or `Request` field to indicate the endpoint. Here is an example:
```go
func external(txn newrelic.Transaction, url string) (*http.Response, error) {
defer newrelic.ExternalSegment{
StartTime: newrelic.StartSegmentNow(txn),
URL: url,
}.End()
return http.Get(url)
}
```
We recommend using the `Request` and `Response` fields since they provide more
information about the external call. The `StartExternalSegment` helper is
useful when the request is available. This function may be modified in the
future to add headers that will trace activity between applications that are
instrumented by New Relic.
```go
func external(txn newrelic.Transaction, req *http.Request) (*http.Response, error) {
s := newrelic.StartExternalSegment(txn, req)
response, err := http.DefaultClient.Do(req)
s.Response = response
s.End()
return response, err
}
```
`NewRoundTripper` is another useful helper. As with all segments, the round
tripper returned **must** only be used in the same goroutine as the transaction.
```go
client := &http.Client{}
client.Transport = newrelic.NewRoundTripper(txn, nil)
resp, err := client.Get("http://example.com/")
```
## Attributes
Attributes add context to errors and allow you to filter performance data
in Insights.
You may add them using the `Transaction.AddAttribute` method.
```go
txn.AddAttribute("key", "value")
txn.AddAttribute("product", "widget")
txn.AddAttribute("price", 19.99)
txn.AddAttribute("importantCustomer", true)
```
* [More info on Custom Attributes](https://docs.newrelic.com/docs/insights/new-relic-insights/decorating-events/insights-custom-attributes)
Some attributes are recorded automatically. These are called agent attributes.
They are listed here:
* [attributes.go](attributes.go)
To disable one of these agents attributes, `AttributeResponseCode` for
example, modify the config like this:
```go
config.Attributes.Exclude = append(config.Attributes.Exclude, newrelic.AttributeResponseCode)
```
* [More info on Agent Attributes](https://docs.newrelic.com/docs/agents/manage-apm-agents/agent-metrics/agent-attributes)
## Custom Events
You may track arbitrary events using custom Insights events.
```go
app.RecordCustomEvent("MyEventType", map[string]interface{}{
"myString": "hello",
"myFloat": 0.603,
"myInt": 123,
"myBool": true,
})
```
## Request Queuing
If you are running a load balancer or reverse web proxy then you may configure
it to add a `X-Queue-Start` header with a Unix timestamp. This will create a
band on the application overview chart showing queue time.
* [More info on Request Queuing](https://docs.newrelic.com/docs/apm/applications-menu/features/request-queuing-tracking-front-end-time)

50
vendor/github.com/newrelic/go-agent/LICENSE.txt generated vendored Normal file
View File

@ -0,0 +1,50 @@
This product includes source derived from 'go' by The Go Authors, distributed
under the following BSD license:
https://github.com/golang/go/blob/master/LICENSE
-------------------------------------------------------------------------------
All components of this product are Copyright (c) 2016 New Relic, Inc. All
rights reserved.
Certain inventions disclosed in this file may be claimed within patents owned or
patent applications filed by New Relic, Inc. or third parties.
Subject to the terms of this notice, New Relic grants you a nonexclusive,
nontransferable license, without the right to sublicense, to (a) install and
execute one copy of these files on any number of workstations owned or
controlled by you and (b) distribute verbatim copies of these files to third
parties. You may install, execute, and distribute these files and their
contents only in conjunction with your direct use of New Relics services.
These files and their contents shall not be used in conjunction with any other
product or software, including but not limited to those that may compete with
any New Relic product, feature, or software. As a condition to the foregoing
grant, you must provide this notice along with each copy you distribute and you
must not remove, alter, or obscure this notice. In the event you submit or
provide any feedback, code, pull requests, or suggestions to New Relic you
hereby grant New Relic a worldwide, non-exclusive, irrevocable, transferrable,
fully paid-up license to use the code, algorithms, patents, and ideas therein in
our products.
All other use, reproduction, modification, distribution, or other exploitation
of these files is strictly prohibited, except as may be set forth in a separate
written license agreement between you and New Relic. The terms of any such
license agreement will control over this notice. The license stated above will
be automatically terminated and revoked if you exceed its scope or violate any
of the terms of this notice.
This License does not grant permission to use the trade names, trademarks,
service marks, or product names of New Relic, except as required for reasonable
and customary use in describing the origin of this file and reproducing the
content of this notice. You may not mark or brand this file with any trade
name, trademarks, service marks, or product names other than the original brand
(if any) provided by New Relic.
Unless otherwise expressly agreed by New Relic in a separate written license
agreement, these files are provided AS IS, WITHOUT WARRANTY OF ANY KIND,
including without any implied warranties of MERCHANTABILITY, FITNESS FOR A
PARTICULAR PURPOSE, TITLE, or NON-INFRINGEMENT. As a condition to your use of
these files, you are solely responsible for such use. New Relic will have no
liability to you for direct, indirect, consequential, incidental, special, or
punitive damages or for lost profits or data.

157
vendor/github.com/newrelic/go-agent/README.md generated vendored Normal file
View File

@ -0,0 +1,157 @@
# New Relic Go Agent
## Description
The New Relic Go Agent allows you to monitor your Go applications with New
Relic. It helps you track transactions, outbound requests, database calls, and
other parts of your Go application's behavior and provides a running overview of
garbage collection, goroutine activity, and memory use.
## Requirements
Go 1.3+ is required, due to the use of http.Client's Timeout field.
Linux, OS X, and Windows (Vista, Server 2008 and later) are supported.
## Getting Started
Here are the basic steps to instrumenting your application. For more
information, see [GUIDE.md](GUIDE.md).
#### Step 0: Installation
Installing the Go Agent is the same as installing any other Go library. The
simplest way is to run:
```
go get github.com/newrelic/go-agent
```
Then import the `github.com/newrelic/go-agent` package in your application.
#### Step 1: Create a Config and an Application
In your `main` function or an `init` block:
```go
config := newrelic.NewConfig("Your Application Name", "__YOUR_NEW_RELIC_LICENSE_KEY__")
app, err := newrelic.NewApplication(config)
```
[more info](GUIDE.md#config-and-application), [application.go](application.go),
[config.go](config.go)
#### Step 2: Add Transactions
Transactions time requests and background tasks. Use `WrapHandle` and
`WrapHandleFunc` to create transactions for requests handled by the `http`
standard library package.
```go
http.HandleFunc(newrelic.WrapHandleFunc(app, "/users", usersHandler))
```
Alternatively, create transactions directly using the application's
`StartTransaction` method:
```go
txn := app.StartTransaction("myTxn", optionalResponseWriter, optionalRequest)
defer txn.End()
```
[more info](GUIDE.md#transactions), [transaction.go](transaction.go)
#### Step 3: Instrument Segments
Segments show you where time in your transactions is being spent. At the
beginning of important functions, add:
```go
defer newrelic.StartSegment(txn, "mySegmentName").End()
```
[more info](GUIDE.md#segments), [segments.go](segments.go)
## Runnable Example
[examples/server/main.go](./examples/server/main.go) is an example that will appear as "My Go
Application" in your New Relic applications list. To run it:
```
env NEW_RELIC_LICENSE_KEY=__YOUR_NEW_RELIC_LICENSE_KEY__LICENSE__ \
go run examples/server/main.go
```
Some endpoints exposed are [http://localhost:8000/](http://localhost:8000/)
and [http://localhost:8000/notice_error](http://localhost:8000/notice_error)
## Basic Example
Before Instrumentation
```go
package main
import (
"io"
"net/http"
)
func helloHandler(w http.ResponseWriter, r *http.Request) {
io.WriteString(w, "hello, world")
}
func main() {
http.HandleFunc("/", helloHandler)
http.ListenAndServe(":8000", nil)
}
```
After Instrumentation
```go
package main
import (
"fmt"
"io"
"net/http"
"os"
"github.com/newrelic/go-agent"
)
func helloHandler(w http.ResponseWriter, r *http.Request) {
io.WriteString(w, "hello, world")
}
func main() {
// Create a config. You need to provide the desired application name
// and your New Relic license key.
cfg := newrelic.NewConfig("My Go Application", "__YOUR_NEW_RELIC_LICENSE_KEY__")
// Create an application. This represents an application in the New
// Relic UI.
app, err := newrelic.NewApplication(cfg)
if err != nil {
fmt.Println(err)
os.Exit(1)
}
// Wrap helloHandler. The performance of this handler will be recorded.
http.HandleFunc(newrelic.WrapHandleFunc(app, "/", helloHandler))
http.ListenAndServe(":8000", nil)
}
```
## Support
You can find more detailed documentation [in the guide](GUIDE.md).
If you can't find what you're looking for there, reach out to us on our [support
site](http://support.newrelic.com/) or our [community
forum](http://forum.newrelic.com) and we'll be happy to help you.
Find a bug? Contact us via [support.newrelic.com](http://support.newrelic.com/),
or email support@newrelic.com.

58
vendor/github.com/newrelic/go-agent/application.go generated vendored Normal file
View File

@ -0,0 +1,58 @@
package newrelic
import (
"net/http"
"time"
)
// Application represents your application.
type Application interface {
// StartTransaction begins a Transaction.
// * The Transaction should only be used in a single goroutine.
// * This method never returns nil.
// * If an http.Request is provided then the Transaction is considered
// a web transaction.
// * If an http.ResponseWriter is provided then the Transaction can be
// used in its place. This allows instrumentation of the response
// code and response headers.
StartTransaction(name string, w http.ResponseWriter, r *http.Request) Transaction
// RecordCustomEvent adds a custom event to the application. This
// feature is incompatible with high security mode.
//
// eventType must consist of alphanumeric characters, underscores, and
// colons, and must contain fewer than 255 bytes.
//
// Each value in the params map must be a number, string, or boolean.
// Keys must be less than 255 bytes. The params map may not contain
// more than 64 attributes. For more information, and a set of
// restricted keywords, see:
//
// https://docs.newrelic.com/docs/insights/new-relic-insights/adding-querying-data/inserting-custom-events-new-relic-apm-agents
RecordCustomEvent(eventType string, params map[string]interface{}) error
// WaitForConnection blocks until the application is connected, is
// incapable of being connected, or the timeout has been reached. This
// method is useful for short-lived processes since the application will
// not gather data until it is connected. nil is returned if the
// application is connected successfully.
WaitForConnection(timeout time.Duration) error
// Shutdown flushes data to New Relic's servers and stops all
// agent-related goroutines managing this application. After Shutdown
// is called, the application is disabled and no more data will be
// collected. This method will block until all final data is sent to
// New Relic or the timeout has elapsed.
Shutdown(timeout time.Duration)
}
// NewApplication creates an Application and spawns goroutines to manage the
// aggregation and harvesting of data. On success, a non-nil Application and a
// nil error are returned. On failure, a nil Application and a non-nil error
// are returned.
//
// Applications do not share global state (other than the shared log.Logger).
// Therefore, it is safe to create multiple applications.
func NewApplication(c Config) (Application, error) {
return newApp(c)
}

42
vendor/github.com/newrelic/go-agent/attributes.go generated vendored Normal file
View File

@ -0,0 +1,42 @@
package newrelic
// This file contains the names of the automatically captured attributes.
// Attributes are key value pairs attached to transaction events, error events,
// and traced errors. You may add your own attributes using the
// Transaction.AddAttribute method (see transaction.go).
//
// These attribute names are exposed here to facilitate configuration.
//
// For more information, see:
// https://docs.newrelic.com/docs/agents/manage-apm-agents/agent-metrics/agent-attributes
// Attributes destined for Transaction Events and Errors:
const (
// AttributeResponseCode is the response status code for a web request.
AttributeResponseCode = "httpResponseCode"
// AttributeRequestMethod is the request's method.
AttributeRequestMethod = "request.method"
// AttributeRequestAccept is the request's "Accept" header.
AttributeRequestAccept = "request.headers.accept"
// AttributeRequestContentType is the request's "Content-Type" header.
AttributeRequestContentType = "request.headers.contentType"
// AttributeRequestContentLength is the request's "Content-Length" header.
AttributeRequestContentLength = "request.headers.contentLength"
// AttributeRequestHost is the request's "Host" header.
AttributeRequestHost = "request.headers.host"
// AttributeResponseContentType is the response "Content-Type" header.
AttributeResponseContentType = "response.headers.contentType"
// AttributeResponseContentLength is the response "Content-Length" header.
AttributeResponseContentLength = "response.headers.contentLength"
// AttributeHostDisplayName contains the value of Config.HostDisplayName.
AttributeHostDisplayName = "host.displayName"
)
// Attributes destined for Errors:
const (
// AttributeRequestUserAgent is the request's "User-Agent" header.
AttributeRequestUserAgent = "request.headers.User-Agent"
// AttributeRequestReferer is the request's "Referer" header. Query
// string parameters are removed.
AttributeRequestReferer = "request.headers.referer"
)

257
vendor/github.com/newrelic/go-agent/config.go generated vendored Normal file
View File

@ -0,0 +1,257 @@
package newrelic
import (
"errors"
"fmt"
"net/http"
"strings"
"time"
)
// Config contains Application and Transaction behavior settings.
// Use NewConfig to create a Config with proper defaults.
type Config struct {
// AppName is used by New Relic to link data across servers.
//
// https://docs.newrelic.com/docs/apm/new-relic-apm/installation-configuration/naming-your-application
AppName string
// License is your New Relic license key.
//
// https://docs.newrelic.com/docs/accounts-partnerships/accounts/account-setup/license-key
License string
// Logger controls go-agent logging. See log.go.
Logger Logger
// Enabled determines whether the agent will communicate with the New
// Relic servers and spawn goroutines. Setting this to be false can be
// useful in testing and staging situations.
Enabled bool
// Labels are key value pairs used to roll up applications into specific
// categories.
//
// https://docs.newrelic.com/docs/apm/new-relic-apm/maintenance/labels-categories-organizing-your-apps-servers
Labels map[string]string
// HighSecurity guarantees that certain agent settings can not be made
// more permissive. This setting must match the corresponding account
// setting in the New Relic UI.
//
// https://docs.newrelic.com/docs/accounts-partnerships/accounts/security/high-security
HighSecurity bool
// CustomInsightsEvents controls the behavior of
// Application.RecordCustomEvent.
//
// https://docs.newrelic.com/docs/insights/new-relic-insights/adding-querying-data/inserting-custom-events-new-relic-apm-agents
CustomInsightsEvents struct {
// Enabled controls whether RecordCustomEvent will collect
// custom analytics events. High security mode overrides this
// setting.
Enabled bool
}
// TransactionEvents controls the behavior of transaction analytics
// events.
TransactionEvents struct {
// Enabled controls whether transaction events are captured.
Enabled bool
// Attributes controls the attributes included with transaction
// events.
Attributes AttributeDestinationConfig
}
// ErrorCollector controls the capture of errors.
ErrorCollector struct {
// Enabled controls whether errors are captured. This setting
// affects both traced errors and error analytics events.
Enabled bool
// CaptureEvents controls whether error analytics events are
// captured.
CaptureEvents bool
// IgnoreStatusCodes controls which http response codes are
// automatically turned into errors. By default, response codes
// greater than or equal to 400, with the exception of 404, are
// turned into errors.
IgnoreStatusCodes []int
// Attributes controls the attributes included with errors.
Attributes AttributeDestinationConfig
}
// TransactionTracer controls the capture of transaction traces.
TransactionTracer struct {
// Enabled controls whether transaction traces are captured.
Enabled bool
// Threshold controls whether a transaction trace will be
// considered for capture. Of the traces exceeding the
// threshold, the slowest trace every minute is captured.
Threshold struct {
// If IsApdexFailing is true then the trace threshold is
// four times the apdex threshold.
IsApdexFailing bool
// If IsApdexFailing is false then this field is the
// threshold, otherwise it is ignored.
Duration time.Duration
}
// SegmentThreshold is the threshold at which segments will be
// added to the trace. Lowering this setting may increase
// overhead.
SegmentThreshold time.Duration
// StackTraceThreshold is the threshold at which segments will
// be given a stack trace in the transaction trace. Lowering
// this setting will drastically increase overhead.
StackTraceThreshold time.Duration
// Attributes controls the attributes included with transaction
// traces.
Attributes AttributeDestinationConfig
}
// HostDisplayName gives this server a recognizable name in the New
// Relic UI. This is an optional setting.
HostDisplayName string
// UseTLS controls whether http or https is used to send data to New
// Relic servers.
UseTLS bool
// Transport customizes http.Client communication with New Relic
// servers. This may be used to configure a proxy.
Transport http.RoundTripper
// Utilization controls the detection and gathering of system
// information.
Utilization struct {
// DetectAWS controls whether the Application attempts to detect
// AWS.
DetectAWS bool
// DetectDocker controls whether the Application attempts to
// detect Docker.
DetectDocker bool
// These settings provide system information when custom values
// are required.
LogicalProcessors int
TotalRAMMIB int
BillingHostname string
}
// DatastoreTracer controls behavior relating to datastore segments.
DatastoreTracer struct {
InstanceReporting struct {
Enabled bool
}
DatabaseNameReporting struct {
Enabled bool
}
QueryParameters struct {
Enabled bool
}
// SlowQuery controls the capture of slow query traces. Slow
// query traces show you instances of your slowest datastore
// segments.
SlowQuery struct {
Enabled bool
Threshold time.Duration
}
}
// Attributes controls the attributes included with errors and
// transaction events.
Attributes AttributeDestinationConfig
// RuntimeSampler controls the collection of runtime statistics like
// CPU/Memory usage, goroutine count, and GC pauses.
RuntimeSampler struct {
// Enabled controls whether runtime statistics are captured.
Enabled bool
}
}
// AttributeDestinationConfig controls the attributes included with errors and
// transaction events.
type AttributeDestinationConfig struct {
Enabled bool
Include []string
Exclude []string
}
// NewConfig creates an Config populated with the given appname, license,
// and expected default values.
func NewConfig(appname, license string) Config {
c := Config{}
c.AppName = appname
c.License = license
c.Enabled = true
c.Labels = make(map[string]string)
c.CustomInsightsEvents.Enabled = true
c.TransactionEvents.Enabled = true
c.TransactionEvents.Attributes.Enabled = true
c.HighSecurity = false
c.UseTLS = true
c.ErrorCollector.Enabled = true
c.ErrorCollector.CaptureEvents = true
c.ErrorCollector.IgnoreStatusCodes = []int{
http.StatusNotFound, // 404
}
c.ErrorCollector.Attributes.Enabled = true
c.Utilization.DetectAWS = true
c.Utilization.DetectDocker = true
c.Attributes.Enabled = true
c.RuntimeSampler.Enabled = true
c.TransactionTracer.Enabled = true
c.TransactionTracer.Threshold.IsApdexFailing = true
c.TransactionTracer.Threshold.Duration = 500 * time.Millisecond
c.TransactionTracer.SegmentThreshold = 2 * time.Millisecond
c.TransactionTracer.StackTraceThreshold = 500 * time.Millisecond
c.TransactionTracer.Attributes.Enabled = true
c.DatastoreTracer.InstanceReporting.Enabled = true
c.DatastoreTracer.DatabaseNameReporting.Enabled = true
c.DatastoreTracer.QueryParameters.Enabled = true
c.DatastoreTracer.SlowQuery.Enabled = true
c.DatastoreTracer.SlowQuery.Threshold = 10 * time.Millisecond
return c
}
const (
licenseLength = 40
appNameLimit = 3
)
// The following errors will be returned if your Config fails to validate.
var (
errLicenseLen = fmt.Errorf("license length is not %d", licenseLength)
errHighSecurityTLS = errors.New("high security requires TLS")
errAppNameMissing = errors.New("AppName required")
errAppNameLimit = fmt.Errorf("max of %d rollup application names", appNameLimit)
)
// Validate checks the config for improper fields. If the config is invalid,
// newrelic.NewApplication returns an error.
func (c Config) Validate() error {
if c.Enabled {
if len(c.License) != licenseLength {
return errLicenseLen
}
} else {
// The License may be empty when the agent is not enabled.
if len(c.License) != licenseLength && len(c.License) != 0 {
return errLicenseLen
}
}
if c.HighSecurity && !c.UseTLS {
return errHighSecurityTLS
}
if "" == c.AppName {
return errAppNameMissing
}
if strings.Count(c.AppName, ";") >= appNameLimit {
return errAppNameLimit
}
return nil
}

27
vendor/github.com/newrelic/go-agent/datastore.go generated vendored Normal file
View File

@ -0,0 +1,27 @@
package newrelic
// DatastoreProduct encourages consistent metrics across New Relic agents. You
// may create your own if your datastore is not listed below.
type DatastoreProduct string
// Datastore names used across New Relic agents:
const (
DatastoreCassandra DatastoreProduct = "Cassandra"
DatastoreDerby = "Derby"
DatastoreElasticsearch = "Elasticsearch"
DatastoreFirebird = "Firebird"
DatastoreIBMDB2 = "IBMDB2"
DatastoreInformix = "Informix"
DatastoreMemcached = "Memcached"
DatastoreMongoDB = "MongoDB"
DatastoreMySQL = "MySQL"
DatastoreMSSQL = "MSSQL"
DatastoreOracle = "Oracle"
DatastorePostgres = "Postgres"
DatastoreRedis = "Redis"
DatastoreSolr = "Solr"
DatastoreSQLite = "SQLite"
DatastoreCouchDB = "CouchDB"
DatastoreRiak = "Riak"
DatastoreVoltDB = "VoltDB"
)

68
vendor/github.com/newrelic/go-agent/instrumentation.go generated vendored Normal file
View File

@ -0,0 +1,68 @@
package newrelic
import "net/http"
// instrumentation.go contains helpers built on the lower level api.
// WrapHandle facilitates instrumentation of handlers registered with an
// http.ServeMux. For example, to instrument this code:
//
// http.Handle("/foo", fooHandler)
//
// Perform this replacement:
//
// http.Handle(newrelic.WrapHandle(app, "/foo", fooHandler))
//
// The Transaction is passed to the handler in place of the original
// http.ResponseWriter, so it can be accessed using type assertion.
// For example, to rename the transaction:
//
// // 'w' is the variable name of the http.ResponseWriter.
// if txn, ok := w.(newrelic.Transaction); ok {
// txn.SetName("other-name")
// }
//
func WrapHandle(app Application, pattern string, handler http.Handler) (string, http.Handler) {
return pattern, http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
txn := app.StartTransaction(pattern, w, r)
defer txn.End()
handler.ServeHTTP(txn, r)
})
}
// WrapHandleFunc serves the same purpose as WrapHandle for functions registered
// with ServeMux.HandleFunc.
func WrapHandleFunc(app Application, pattern string, handler func(http.ResponseWriter, *http.Request)) (string, func(http.ResponseWriter, *http.Request)) {
p, h := WrapHandle(app, pattern, http.HandlerFunc(handler))
return p, func(w http.ResponseWriter, r *http.Request) { h.ServeHTTP(w, r) }
}
// NewRoundTripper creates an http.RoundTripper to instrument external requests.
// This RoundTripper must be used in same the goroutine as the other uses of the
// Transaction's SegmentTracer methods. http.DefaultTransport is used if an
// http.RoundTripper is not provided.
//
// client := &http.Client{}
// client.Transport = newrelic.NewRoundTripper(txn, nil)
// resp, err := client.Get("http://example.com/")
//
func NewRoundTripper(txn Transaction, original http.RoundTripper) http.RoundTripper {
return roundTripperFunc(func(request *http.Request) (*http.Response, error) {
segment := StartExternalSegment(txn, request)
if nil == original {
original = http.DefaultTransport
}
response, err := original.RoundTrip(request)
segment.Response = response
segment.End()
return response, err
})
}
type roundTripperFunc func(*http.Request) (*http.Response, error)
func (f roundTripperFunc) RoundTrip(r *http.Request) (*http.Response, error) { return f(r) }

View File

@ -0,0 +1,122 @@
package internal
import (
"bytes"
"container/heap"
"github.com/newrelic/go-agent/internal/jsonx"
)
// eventStamp allows for uniform random sampling of events. When an event is
// created it is given an eventStamp. Whenever an event pool is full and events
// need to be dropped, the events with the lowest stamps are dropped.
type eventStamp float32
func eventStampCmp(a, b eventStamp) bool {
return a < b
}
type analyticsEvent struct {
stamp eventStamp
jsonWriter
}
type analyticsEventHeap []analyticsEvent
type analyticsEvents struct {
numSeen int
events analyticsEventHeap
failedHarvests int
}
func (events *analyticsEvents) NumSeen() float64 { return float64(events.numSeen) }
func (events *analyticsEvents) NumSaved() float64 { return float64(len(events.events)) }
func (h analyticsEventHeap) Len() int { return len(h) }
func (h analyticsEventHeap) Less(i, j int) bool { return eventStampCmp(h[i].stamp, h[j].stamp) }
func (h analyticsEventHeap) Swap(i, j int) { h[i], h[j] = h[j], h[i] }
// Push and Pop are unused: only heap.Init and heap.Fix are used.
func (h analyticsEventHeap) Push(x interface{}) {}
func (h analyticsEventHeap) Pop() interface{} { return nil }
func newAnalyticsEvents(max int) *analyticsEvents {
return &analyticsEvents{
numSeen: 0,
events: make(analyticsEventHeap, 0, max),
failedHarvests: 0,
}
}
func (events *analyticsEvents) addEvent(e analyticsEvent) {
events.numSeen++
if len(events.events) < cap(events.events) {
events.events = append(events.events, e)
if len(events.events) == cap(events.events) {
// Delay heap initialization so that we can have
// deterministic ordering for integration tests (the max
// is not being reached).
heap.Init(events.events)
}
return
}
if eventStampCmp(e.stamp, events.events[0].stamp) {
return
}
events.events[0] = e
heap.Fix(events.events, 0)
}
func (events *analyticsEvents) mergeFailed(other *analyticsEvents) {
fails := other.failedHarvests + 1
if fails >= failedEventsAttemptsLimit {
return
}
events.failedHarvests = fails
events.Merge(other)
}
func (events *analyticsEvents) Merge(other *analyticsEvents) {
allSeen := events.numSeen + other.numSeen
for _, e := range other.events {
events.addEvent(e)
}
events.numSeen = allSeen
}
func (events *analyticsEvents) CollectorJSON(agentRunID string) ([]byte, error) {
if 0 == events.numSeen {
return nil, nil
}
estimate := 256 * len(events.events)
buf := bytes.NewBuffer(make([]byte, 0, estimate))
buf.WriteByte('[')
jsonx.AppendString(buf, agentRunID)
buf.WriteByte(',')
buf.WriteByte('{')
buf.WriteString(`"reservoir_size":`)
jsonx.AppendUint(buf, uint64(cap(events.events)))
buf.WriteByte(',')
buf.WriteString(`"events_seen":`)
jsonx.AppendUint(buf, uint64(events.numSeen))
buf.WriteByte('}')
buf.WriteByte(',')
buf.WriteByte('[')
for i, e := range events.events {
if i > 0 {
buf.WriteByte(',')
}
e.WriteJSON(buf)
}
buf.WriteByte(']')
buf.WriteByte(']')
return buf.Bytes(), nil
}

48
vendor/github.com/newrelic/go-agent/internal/apdex.go generated vendored Normal file
View File

@ -0,0 +1,48 @@
package internal
import "time"
// ApdexZone is a transaction classification.
type ApdexZone int
// https://en.wikipedia.org/wiki/Apdex
const (
ApdexNone ApdexZone = iota
ApdexSatisfying
ApdexTolerating
ApdexFailing
)
// ApdexFailingThreshold calculates the threshold at which the transaction is
// considered a failure.
func ApdexFailingThreshold(threshold time.Duration) time.Duration {
return 4 * threshold
}
// CalculateApdexZone calculates the apdex based on the transaction duration and
// threshold.
//
// Note that this does not take into account whether or not the transaction
// had an error. That is expected to be done by the caller.
func CalculateApdexZone(threshold, duration time.Duration) ApdexZone {
if duration <= threshold {
return ApdexSatisfying
}
if duration <= ApdexFailingThreshold(threshold) {
return ApdexTolerating
}
return ApdexFailing
}
func (zone ApdexZone) label() string {
switch zone {
case ApdexSatisfying:
return "S"
case ApdexTolerating:
return "T"
case ApdexFailing:
return "F"
default:
return ""
}
}

View File

@ -0,0 +1,572 @@
package internal
import (
"bytes"
"encoding/json"
"fmt"
"net/http"
"sort"
"strconv"
"strings"
)
// New agent attributes must be added in the following places:
// * Constants here.
// * Top level attributes.go file.
// * agentAttributes
// * agentAttributeDests
// * calculateAgentAttributeDests
// * writeAgentAttributes
const (
responseCode = "httpResponseCode"
requestMethod = "request.method"
requestAccept = "request.headers.accept"
requestContentType = "request.headers.contentType"
requestContentLength = "request.headers.contentLength"
requestHost = "request.headers.host"
responseContentType = "response.headers.contentType"
responseContentLength = "response.headers.contentLength"
hostDisplayName = "host.displayName"
requestUserAgent = "request.headers.User-Agent"
requestReferer = "request.headers.referer"
)
// https://source.datanerd.us/agents/agent-specs/blob/master/Agent-Attributes-PORTED.md
// AttributeDestinationConfig matches newrelic.AttributeDestinationConfig to
// avoid circular dependency issues.
type AttributeDestinationConfig struct {
Enabled bool
Include []string
Exclude []string
}
type destinationSet int
const (
destTxnEvent destinationSet = 1 << iota
destError
destTxnTrace
destBrowser
)
const (
destNone destinationSet = 0
// DestAll contains all destinations.
DestAll destinationSet = destTxnEvent | destTxnTrace | destError | destBrowser
)
const (
attributeWildcardSuffix = '*'
)
type attributeModifier struct {
match string // This will not contain a trailing '*'.
includeExclude
}
type byMatch []*attributeModifier
func (m byMatch) Len() int { return len(m) }
func (m byMatch) Swap(i, j int) { m[i], m[j] = m[j], m[i] }
func (m byMatch) Less(i, j int) bool { return m[i].match < m[j].match }
// AttributeConfig is created at application creation and shared between all
// transactions.
type AttributeConfig struct {
disabledDestinations destinationSet
exactMatchModifiers map[string]*attributeModifier
// Once attributeConfig is constructed, wildcardModifiers is sorted in
// lexicographical order. Modifiers appearing later have precedence
// over modifiers appearing earlier.
wildcardModifiers []*attributeModifier
agentDests agentAttributeDests
}
type includeExclude struct {
include destinationSet
exclude destinationSet
}
func modifierApply(m *attributeModifier, d destinationSet) destinationSet {
// Include before exclude, since exclude has priority.
d |= m.include
d &^= m.exclude
return d
}
func applyAttributeConfig(c *AttributeConfig, key string, d destinationSet) destinationSet {
// Important: The wildcard modifiers must be applied before the exact
// match modifiers, and the slice must be iterated in a forward
// direction.
for _, m := range c.wildcardModifiers {
if strings.HasPrefix(key, m.match) {
d = modifierApply(m, d)
}
}
if m, ok := c.exactMatchModifiers[key]; ok {
d = modifierApply(m, d)
}
d &^= c.disabledDestinations
return d
}
func addModifier(c *AttributeConfig, match string, d includeExclude) {
if "" == match {
return
}
exactMatch := true
if attributeWildcardSuffix == match[len(match)-1] {
exactMatch = false
match = match[0 : len(match)-1]
}
mod := &attributeModifier{
match: match,
includeExclude: d,
}
if exactMatch {
if m, ok := c.exactMatchModifiers[mod.match]; ok {
m.include |= mod.include
m.exclude |= mod.exclude
} else {
c.exactMatchModifiers[mod.match] = mod
}
} else {
for _, m := range c.wildcardModifiers {
// Important: Duplicate entries for the same match
// string would not work because exclude needs
// precedence over include.
if m.match == mod.match {
m.include |= mod.include
m.exclude |= mod.exclude
return
}
}
c.wildcardModifiers = append(c.wildcardModifiers, mod)
}
}
func processDest(c *AttributeConfig, dc *AttributeDestinationConfig, d destinationSet) {
if !dc.Enabled {
c.disabledDestinations |= d
}
for _, match := range dc.Include {
addModifier(c, match, includeExclude{include: d})
}
for _, match := range dc.Exclude {
addModifier(c, match, includeExclude{exclude: d})
}
}
// AttributeConfigInput is used as the input to CreateAttributeConfig: it
// transforms newrelic.Config settings into an AttributeConfig.
type AttributeConfigInput struct {
Attributes AttributeDestinationConfig
ErrorCollector AttributeDestinationConfig
TransactionEvents AttributeDestinationConfig
browserMonitoring AttributeDestinationConfig
TransactionTracer AttributeDestinationConfig
}
var (
sampleAttributeConfigInput = AttributeConfigInput{
Attributes: AttributeDestinationConfig{Enabled: true},
ErrorCollector: AttributeDestinationConfig{Enabled: true},
TransactionEvents: AttributeDestinationConfig{Enabled: true},
TransactionTracer: AttributeDestinationConfig{Enabled: true},
}
)
// CreateAttributeConfig creates a new AttributeConfig.
func CreateAttributeConfig(input AttributeConfigInput) *AttributeConfig {
c := &AttributeConfig{
exactMatchModifiers: make(map[string]*attributeModifier),
wildcardModifiers: make([]*attributeModifier, 0, 64),
}
processDest(c, &input.Attributes, DestAll)
processDest(c, &input.ErrorCollector, destError)
processDest(c, &input.TransactionEvents, destTxnEvent)
processDest(c, &input.TransactionTracer, destTxnTrace)
processDest(c, &input.browserMonitoring, destBrowser)
sort.Sort(byMatch(c.wildcardModifiers))
c.agentDests = calculateAgentAttributeDests(c)
return c
}
type userAttribute struct {
value interface{}
dests destinationSet
}
// Attributes are key value pairs attached to the various collected data types.
type Attributes struct {
config *AttributeConfig
user map[string]userAttribute
Agent agentAttributes
}
type agentAttributes struct {
HostDisplayName string
RequestMethod string
RequestAcceptHeader string
RequestContentType string
RequestContentLength int
RequestHeadersHost string
RequestHeadersUserAgent string
RequestHeadersReferer string
ResponseHeadersContentType string
ResponseHeadersContentLength int
ResponseCode string
}
type agentAttributeDests struct {
HostDisplayName destinationSet
RequestMethod destinationSet
RequestAcceptHeader destinationSet
RequestContentType destinationSet
RequestContentLength destinationSet
RequestHeadersHost destinationSet
RequestHeadersUserAgent destinationSet
RequestHeadersReferer destinationSet
ResponseHeadersContentType destinationSet
ResponseHeadersContentLength destinationSet
ResponseCode destinationSet
}
func calculateAgentAttributeDests(c *AttributeConfig) agentAttributeDests {
usual := DestAll &^ destBrowser
traces := destTxnTrace | destError
return agentAttributeDests{
HostDisplayName: applyAttributeConfig(c, hostDisplayName, usual),
RequestMethod: applyAttributeConfig(c, requestMethod, usual),
RequestAcceptHeader: applyAttributeConfig(c, requestAccept, usual),
RequestContentType: applyAttributeConfig(c, requestContentType, usual),
RequestContentLength: applyAttributeConfig(c, requestContentLength, usual),
RequestHeadersHost: applyAttributeConfig(c, requestHost, usual),
RequestHeadersUserAgent: applyAttributeConfig(c, requestUserAgent, traces),
RequestHeadersReferer: applyAttributeConfig(c, requestReferer, traces),
ResponseHeadersContentType: applyAttributeConfig(c, responseContentType, usual),
ResponseHeadersContentLength: applyAttributeConfig(c, responseContentLength, usual),
ResponseCode: applyAttributeConfig(c, responseCode, usual),
}
}
type agentAttributeWriter struct {
jsonFieldsWriter
d destinationSet
}
func (w *agentAttributeWriter) writeString(name string, val string, d destinationSet) {
if "" != val && 0 != w.d&d {
w.stringField(name, truncateStringValueIfLong(val))
}
}
func (w *agentAttributeWriter) writeInt(name string, val int, d destinationSet) {
if val >= 0 && 0 != w.d&d {
w.intField(name, int64(val))
}
}
func writeAgentAttributes(buf *bytes.Buffer, d destinationSet, values agentAttributes, dests agentAttributeDests) {
w := &agentAttributeWriter{
jsonFieldsWriter: jsonFieldsWriter{buf: buf},
d: d,
}
buf.WriteByte('{')
w.writeString(hostDisplayName, values.HostDisplayName, dests.HostDisplayName)
w.writeString(requestMethod, values.RequestMethod, dests.RequestMethod)
w.writeString(requestAccept, values.RequestAcceptHeader, dests.RequestAcceptHeader)
w.writeString(requestContentType, values.RequestContentType, dests.RequestContentType)
w.writeInt(requestContentLength, values.RequestContentLength, dests.RequestContentLength)
w.writeString(requestHost, values.RequestHeadersHost, dests.RequestHeadersHost)
w.writeString(requestUserAgent, values.RequestHeadersUserAgent, dests.RequestHeadersUserAgent)
w.writeString(requestReferer, values.RequestHeadersReferer, dests.RequestHeadersReferer)
w.writeString(responseContentType, values.ResponseHeadersContentType, dests.ResponseHeadersContentType)
w.writeInt(responseContentLength, values.ResponseHeadersContentLength, dests.ResponseHeadersContentLength)
w.writeString(responseCode, values.ResponseCode, dests.ResponseCode)
buf.WriteByte('}')
}
// NewAttributes creates a new Attributes.
func NewAttributes(config *AttributeConfig) *Attributes {
return &Attributes{
config: config,
Agent: agentAttributes{
RequestContentLength: -1,
ResponseHeadersContentLength: -1,
},
}
}
// ErrInvalidAttribute is returned when the value is not valid.
type ErrInvalidAttribute struct{ typeString string }
func (e ErrInvalidAttribute) Error() string {
return fmt.Sprintf("attribute value type %s is invalid", e.typeString)
}
func valueIsValid(val interface{}) error {
switch val.(type) {
case string, bool, nil,
uint8, uint16, uint32, uint64, int8, int16, int32, int64,
float32, float64, uint, int, uintptr:
return nil
default:
return ErrInvalidAttribute{
typeString: fmt.Sprintf("%T", val),
}
}
}
type invalidAttributeKeyErr struct{ key string }
func (e invalidAttributeKeyErr) Error() string {
return fmt.Sprintf("attribute key '%.32s...' exceeds length limit %d",
e.key, attributeKeyLengthLimit)
}
type userAttributeLimitErr struct{ key string }
func (e userAttributeLimitErr) Error() string {
return fmt.Sprintf("attribute '%s' discarded: limit of %d reached", e.key,
attributeUserLimit)
}
func validAttributeKey(key string) error {
// Attributes whose keys are excessively long are dropped rather than
// truncated to avoid worrying about the application of configuration to
// truncated values or performing the truncation after configuration.
if len(key) > attributeKeyLengthLimit {
return invalidAttributeKeyErr{key: key}
}
return nil
}
func truncateStringValueIfLong(val string) string {
if len(val) > attributeValueLengthLimit {
return StringLengthByteLimit(val, attributeValueLengthLimit)
}
return val
}
func truncateStringValueIfLongInterface(val interface{}) interface{} {
if str, ok := val.(string); ok {
val = interface{}(truncateStringValueIfLong(str))
}
return val
}
// AddUserAttribute adds a user attribute.
func AddUserAttribute(a *Attributes, key string, val interface{}, d destinationSet) error {
val = truncateStringValueIfLongInterface(val)
if err := valueIsValid(val); nil != err {
return err
}
if err := validAttributeKey(key); nil != err {
return err
}
dests := applyAttributeConfig(a.config, key, d)
if destNone == dests {
return nil
}
if nil == a.user {
a.user = make(map[string]userAttribute)
}
if _, exists := a.user[key]; !exists && len(a.user) >= attributeUserLimit {
return userAttributeLimitErr{key}
}
// Note: Duplicates are overridden: last attribute in wins.
a.user[key] = userAttribute{
value: val,
dests: dests,
}
return nil
}
func writeAttributeValueJSON(w *jsonFieldsWriter, key string, val interface{}) {
switch v := val.(type) {
case nil:
w.rawField(key, `null`)
case string:
w.stringField(key, v)
case bool:
if v {
w.rawField(key, `true`)
} else {
w.rawField(key, `false`)
}
case uint8:
w.intField(key, int64(v))
case uint16:
w.intField(key, int64(v))
case uint32:
w.intField(key, int64(v))
case uint64:
w.intField(key, int64(v))
case uint:
w.intField(key, int64(v))
case uintptr:
w.intField(key, int64(v))
case int8:
w.intField(key, int64(v))
case int16:
w.intField(key, int64(v))
case int32:
w.intField(key, int64(v))
case int64:
w.intField(key, v)
case int:
w.intField(key, int64(v))
case float32:
w.floatField(key, float64(v))
case float64:
w.floatField(key, v)
default:
w.stringField(key, fmt.Sprintf("%T", v))
}
}
type agentAttributesJSONWriter struct {
attributes *Attributes
dest destinationSet
}
func (w agentAttributesJSONWriter) WriteJSON(buf *bytes.Buffer) {
if nil == w.attributes {
buf.WriteString("{}")
return
}
writeAgentAttributes(buf, w.dest, w.attributes.Agent, w.attributes.config.agentDests)
}
func agentAttributesJSON(a *Attributes, buf *bytes.Buffer, d destinationSet) {
agentAttributesJSONWriter{
attributes: a,
dest: d,
}.WriteJSON(buf)
}
type userAttributesJSONWriter struct {
attributes *Attributes
dest destinationSet
}
func (u userAttributesJSONWriter) WriteJSON(buf *bytes.Buffer) {
buf.WriteByte('{')
if nil != u.attributes {
w := jsonFieldsWriter{buf: buf}
for name, atr := range u.attributes.user {
if 0 != atr.dests&u.dest {
writeAttributeValueJSON(&w, name, atr.value)
}
}
}
buf.WriteByte('}')
}
func userAttributesJSON(a *Attributes, buf *bytes.Buffer, d destinationSet) {
userAttributesJSONWriter{
attributes: a,
dest: d,
}.WriteJSON(buf)
}
func userAttributesStringJSON(a *Attributes, d destinationSet) JSONString {
if nil == a {
return JSONString("{}")
}
estimate := len(a.user) * 128
buf := bytes.NewBuffer(make([]byte, 0, estimate))
userAttributesJSON(a, buf, d)
bs := buf.Bytes()
return JSONString(bs)
}
func agentAttributesStringJSON(a *Attributes, d destinationSet) JSONString {
if nil == a {
return JSONString("{}")
}
estimate := 1024
buf := bytes.NewBuffer(make([]byte, 0, estimate))
agentAttributesJSON(a, buf, d)
return JSONString(buf.Bytes())
}
func getUserAttributes(a *Attributes, d destinationSet) map[string]interface{} {
v := make(map[string]interface{})
json.Unmarshal([]byte(userAttributesStringJSON(a, d)), &v)
return v
}
func getAgentAttributes(a *Attributes, d destinationSet) map[string]interface{} {
v := make(map[string]interface{})
json.Unmarshal([]byte(agentAttributesStringJSON(a, d)), &v)
return v
}
// RequestAgentAttributes gathers agent attributes out of the request.
func RequestAgentAttributes(a *Attributes, r *http.Request) {
a.Agent.RequestMethod = r.Method
h := r.Header
if nil == h {
return
}
a.Agent.RequestAcceptHeader = h.Get("Accept")
a.Agent.RequestContentType = h.Get("Content-Type")
a.Agent.RequestHeadersHost = h.Get("Host")
a.Agent.RequestHeadersUserAgent = h.Get("User-Agent")
a.Agent.RequestHeadersReferer = SafeURLFromString(h.Get("Referer"))
if cl := h.Get("Content-Length"); "" != cl {
if x, err := strconv.Atoi(cl); nil == err {
a.Agent.RequestContentLength = x
}
}
}
// ResponseHeaderAttributes gather agent attributes from the response headers.
func ResponseHeaderAttributes(a *Attributes, h http.Header) {
if nil == h {
return
}
a.Agent.ResponseHeadersContentType = h.Get("Content-Type")
if val := h.Get("Content-Length"); "" != val {
if x, err := strconv.Atoi(val); nil == err {
a.Agent.ResponseHeadersContentLength = x
}
}
}
var (
// statusCodeLookup avoids a strconv.Itoa call.
statusCodeLookup = map[int]string{
100: "100", 101: "101",
200: "200", 201: "201", 202: "202", 203: "203", 204: "204", 205: "205", 206: "206",
300: "300", 301: "301", 302: "302", 303: "303", 304: "304", 305: "305", 307: "307",
400: "400", 401: "401", 402: "402", 403: "403", 404: "404", 405: "405", 406: "406",
407: "407", 408: "408", 409: "409", 410: "410", 411: "411", 412: "412", 413: "413",
414: "414", 415: "415", 416: "416", 417: "417", 418: "418", 428: "428", 429: "429",
431: "431", 451: "451",
500: "500", 501: "501", 502: "502", 503: "503", 504: "504", 505: "505", 511: "511",
}
)
// ResponseCodeAttribute sets the response code agent attribute.
func ResponseCodeAttribute(a *Attributes, code int) {
a.Agent.ResponseCode = statusCodeLookup[code]
if a.Agent.ResponseCode == "" {
a.Agent.ResponseCode = strconv.Itoa(code)
}
}

View File

@ -0,0 +1,267 @@
package internal
import (
"bytes"
"encoding/json"
"errors"
"fmt"
"io/ioutil"
"net/http"
"net/url"
"github.com/newrelic/go-agent/internal/logger"
)
const (
procotolVersion = "14"
userAgentPrefix = "NewRelic-Go-Agent/"
// Methods used in collector communication.
cmdRedirect = "get_redirect_host"
cmdConnect = "connect"
cmdMetrics = "metric_data"
cmdCustomEvents = "custom_event_data"
cmdTxnEvents = "analytic_event_data"
cmdErrorEvents = "error_event_data"
cmdErrorData = "error_data"
cmdTxnTraces = "transaction_sample_data"
cmdSlowSQLs = "sql_trace_data"
)
var (
// ErrPayloadTooLarge is created in response to receiving a 413 response
// code.
ErrPayloadTooLarge = errors.New("payload too large")
// ErrUnsupportedMedia is created in response to receiving a 415
// response code.
ErrUnsupportedMedia = errors.New("unsupported media")
)
// RpmCmd contains fields specific to an individual call made to RPM.
type RpmCmd struct {
Name string
Collector string
RunID string
Data []byte
}
// RpmControls contains fields which will be the same for all calls made
// by the same application.
type RpmControls struct {
UseTLS bool
License string
Client *http.Client
Logger logger.Logger
AgentVersion string
}
func rpmURL(cmd RpmCmd, cs RpmControls) string {
var u url.URL
u.Host = cmd.Collector
u.Path = "agent_listener/invoke_raw_method"
if cs.UseTLS {
u.Scheme = "https"
} else {
u.Scheme = "http"
}
query := url.Values{}
query.Set("marshal_format", "json")
query.Set("protocol_version", procotolVersion)
query.Set("method", cmd.Name)
query.Set("license_key", cs.License)
if len(cmd.RunID) > 0 {
query.Set("run_id", cmd.RunID)
}
u.RawQuery = query.Encode()
return u.String()
}
type unexpectedStatusCodeErr struct {
code int
}
func (e unexpectedStatusCodeErr) Error() string {
return fmt.Sprintf("unexpected HTTP status code: %d", e.code)
}
func collectorRequestInternal(url string, data []byte, cs RpmControls) ([]byte, error) {
deflated, err := compress(data)
if nil != err {
return nil, err
}
req, err := http.NewRequest("POST", url, bytes.NewBuffer(deflated))
if nil != err {
return nil, err
}
req.Header.Add("Accept-Encoding", "identity, deflate")
req.Header.Add("Content-Type", "application/octet-stream")
req.Header.Add("User-Agent", userAgentPrefix+cs.AgentVersion)
req.Header.Add("Content-Encoding", "deflate")
resp, err := cs.Client.Do(req)
if err != nil {
return nil, err
}
defer resp.Body.Close()
if 413 == resp.StatusCode {
return nil, ErrPayloadTooLarge
}
if 415 == resp.StatusCode {
return nil, ErrUnsupportedMedia
}
// If the response code is not 200, then the collector may not return
// valid JSON.
if 200 != resp.StatusCode {
return nil, unexpectedStatusCodeErr{code: resp.StatusCode}
}
b, err := ioutil.ReadAll(resp.Body)
if nil != err {
return nil, err
}
return parseResponse(b)
}
// CollectorRequest makes a request to New Relic.
func CollectorRequest(cmd RpmCmd, cs RpmControls) ([]byte, error) {
url := rpmURL(cmd, cs)
if cs.Logger.DebugEnabled() {
cs.Logger.Debug("rpm request", map[string]interface{}{
"command": cmd.Name,
"url": url,
"payload": JSONString(cmd.Data),
})
}
resp, err := collectorRequestInternal(url, cmd.Data, cs)
if err != nil {
cs.Logger.Debug("rpm failure", map[string]interface{}{
"command": cmd.Name,
"url": url,
"error": err.Error(),
})
}
if cs.Logger.DebugEnabled() {
cs.Logger.Debug("rpm response", map[string]interface{}{
"command": cmd.Name,
"url": url,
"response": JSONString(resp),
})
}
return resp, err
}
type rpmException struct {
Message string `json:"message"`
ErrorType string `json:"error_type"`
}
func (e *rpmException) Error() string {
return fmt.Sprintf("%s: %s", e.ErrorType, e.Message)
}
func hasType(e error, expected string) bool {
rpmErr, ok := e.(*rpmException)
if !ok {
return false
}
return rpmErr.ErrorType == expected
}
const (
forceRestartType = "NewRelic::Agent::ForceRestartException"
disconnectType = "NewRelic::Agent::ForceDisconnectException"
licenseInvalidType = "NewRelic::Agent::LicenseException"
runtimeType = "RuntimeError"
)
// IsRestartException indicates if the error was a restart exception.
func IsRestartException(e error) bool { return hasType(e, forceRestartType) }
// IsLicenseException indicates if the error was an invalid exception.
func IsLicenseException(e error) bool { return hasType(e, licenseInvalidType) }
// IsRuntime indicates if the error was a runtime exception.
func IsRuntime(e error) bool { return hasType(e, runtimeType) }
// IsDisconnect indicates if the error was a disconnect exception.
func IsDisconnect(e error) bool { return hasType(e, disconnectType) }
func parseResponse(b []byte) ([]byte, error) {
var r struct {
ReturnValue json.RawMessage `json:"return_value"`
Exception *rpmException `json:"exception"`
}
err := json.Unmarshal(b, &r)
if nil != err {
return nil, err
}
if nil != r.Exception {
return nil, r.Exception
}
return r.ReturnValue, nil
}
// ConnectAttempt tries to connect an application.
func ConnectAttempt(js []byte, redirectHost string, cs RpmControls) (*AppRun, error) {
call := RpmCmd{
Name: cmdRedirect,
Collector: redirectHost,
Data: []byte("[]"),
}
out, err := CollectorRequest(call, cs)
if nil != err {
// err is intentionally unmodified: We do not want to change
// the type of these collector errors.
return nil, err
}
var host string
err = json.Unmarshal(out, &host)
if nil != err {
return nil, fmt.Errorf("unable to parse redirect reply: %v", err)
}
call.Collector = host
call.Data = js
call.Name = cmdConnect
rawReply, err := CollectorRequest(call, cs)
if nil != err {
// err is intentionally unmodified: We do not want to change
// the type of these collector errors.
return nil, err
}
reply := ConnectReplyDefaults()
err = json.Unmarshal(rawReply, reply)
if nil != err {
return nil, fmt.Errorf("unable to parse connect reply: %v", err)
}
// Note: This should never happen. It would mean the collector
// response is malformed. This exists merely as extra defensiveness.
if "" == reply.RunID {
return nil, errors.New("connect reply missing agent run id")
}
return &AppRun{reply, host}, nil
}

View File

@ -0,0 +1,50 @@
package internal
import (
"bytes"
"compress/zlib"
"encoding/base64"
"io/ioutil"
)
func compress(b []byte) ([]byte, error) {
buf := bytes.Buffer{}
w := zlib.NewWriter(&buf)
_, err := w.Write(b)
w.Close()
if nil != err {
return nil, err
}
return buf.Bytes(), nil
}
func uncompress(b []byte) ([]byte, error) {
buf := bytes.NewBuffer(b)
r, err := zlib.NewReader(buf)
if nil != err {
return nil, err
}
defer r.Close()
return ioutil.ReadAll(r)
}
func compressEncode(b []byte) (string, error) {
compressed, err := compress(b)
if nil != err {
return "", err
}
return base64.StdEncoding.EncodeToString(compressed), nil
}
func uncompressDecode(s string) ([]byte, error) {
decoded, err := base64.StdEncoding.DecodeString(s)
if nil != err {
return nil, err
}
return uncompress(decoded)
}

View File

@ -0,0 +1,114 @@
package internal
import (
"strings"
"time"
)
// AgentRunID identifies the current connection with the collector.
type AgentRunID string
func (id AgentRunID) String() string {
return string(id)
}
// AppRun contains information regarding a single connection session with the
// collector. It is created upon application connect and is afterwards
// immutable.
type AppRun struct {
*ConnectReply
Collector string
}
// ConnectReply contains all of the settings and state send down from the
// collector. It should not be modified after creation.
type ConnectReply struct {
RunID AgentRunID `json:"agent_run_id"`
// Transaction Name Modifiers
SegmentTerms segmentRules `json:"transaction_segment_terms"`
TxnNameRules metricRules `json:"transaction_name_rules"`
URLRules metricRules `json:"url_rules"`
MetricRules metricRules `json:"metric_name_rules"`
// Cross Process
EncodingKey string `json:"encoding_key"`
CrossProcessID string `json:"cross_process_id"`
TrustedAccounts []int `json:"trusted_account_ids"`
// Settings
KeyTxnApdex map[string]float64 `json:"web_transactions_apdex"`
ApdexThresholdSeconds float64 `json:"apdex_t"`
CollectAnalyticsEvents bool `json:"collect_analytics_events"`
CollectCustomEvents bool `json:"collect_custom_events"`
CollectTraces bool `json:"collect_traces"`
CollectErrors bool `json:"collect_errors"`
CollectErrorEvents bool `json:"collect_error_events"`
// RUM
AgentLoader string `json:"js_agent_loader"`
Beacon string `json:"beacon"`
BrowserKey string `json:"browser_key"`
AppID string `json:"application_id"`
ErrorBeacon string `json:"error_beacon"`
JSAgentFile string `json:"js_agent_file"`
Messages []struct {
Message string `json:"message"`
Level string `json:"level"`
} `json:"messages"`
}
// ConnectReplyDefaults returns a newly allocated ConnectReply with the proper
// default settings. A pointer to a global is not used to prevent consumers
// from changing the default settings.
func ConnectReplyDefaults() *ConnectReply {
return &ConnectReply{
ApdexThresholdSeconds: 0.5,
CollectAnalyticsEvents: true,
CollectCustomEvents: true,
CollectTraces: true,
CollectErrors: true,
CollectErrorEvents: true,
}
}
// CalculateApdexThreshold calculates the apdex threshold.
func CalculateApdexThreshold(c *ConnectReply, txnName string) time.Duration {
if t, ok := c.KeyTxnApdex[txnName]; ok {
return floatSecondsToDuration(t)
}
return floatSecondsToDuration(c.ApdexThresholdSeconds)
}
// CreateFullTxnName uses collector rules and the appropriate metric prefix to
// construct the full transaction metric name from the name given by the
// consumer.
func CreateFullTxnName(input string, reply *ConnectReply, isWeb bool) string {
var afterURLRules string
if "" != input {
afterURLRules = reply.URLRules.Apply(input)
if "" == afterURLRules {
return ""
}
}
prefix := backgroundMetricPrefix
if isWeb {
prefix = webMetricPrefix
}
var beforeNameRules string
if strings.HasPrefix(afterURLRules, "/") {
beforeNameRules = prefix + afterURLRules
} else {
beforeNameRules = prefix + "/" + afterURLRules
}
afterNameRules := reply.TxnNameRules.Apply(beforeNameRules)
if "" == afterNameRules {
return ""
}
return reply.SegmentTerms.apply(afterNameRules)
}

View File

@ -0,0 +1,108 @@
package internal
import (
"bytes"
"fmt"
"regexp"
"time"
)
// https://newrelic.atlassian.net/wiki/display/eng/Custom+Events+in+New+Relic+Agents
var (
eventTypeRegexRaw = `^[a-zA-Z0-9:_ ]+$`
eventTypeRegex = regexp.MustCompile(eventTypeRegexRaw)
errEventTypeLength = fmt.Errorf("event type exceeds length limit of %d",
attributeKeyLengthLimit)
// ErrEventTypeRegex will be returned to caller of app.RecordCustomEvent
// if the event type is not valid.
ErrEventTypeRegex = fmt.Errorf("event type must match %s", eventTypeRegexRaw)
errNumAttributes = fmt.Errorf("maximum of %d attributes exceeded",
customEventAttributeLimit)
)
// CustomEvent is a custom event.
type CustomEvent struct {
eventType string
timestamp time.Time
truncatedParams map[string]interface{}
}
// WriteJSON prepares JSON in the format expected by the collector.
func (e *CustomEvent) WriteJSON(buf *bytes.Buffer) {
w := jsonFieldsWriter{buf: buf}
buf.WriteByte('[')
buf.WriteByte('{')
w.stringField("type", e.eventType)
w.floatField("timestamp", timeToFloatSeconds(e.timestamp))
buf.WriteByte('}')
buf.WriteByte(',')
buf.WriteByte('{')
w = jsonFieldsWriter{buf: buf}
for key, val := range e.truncatedParams {
writeAttributeValueJSON(&w, key, val)
}
buf.WriteByte('}')
buf.WriteByte(',')
buf.WriteByte('{')
buf.WriteByte('}')
buf.WriteByte(']')
}
// MarshalJSON is used for testing.
func (e *CustomEvent) MarshalJSON() ([]byte, error) {
buf := bytes.NewBuffer(make([]byte, 0, 256))
e.WriteJSON(buf)
return buf.Bytes(), nil
}
func eventTypeValidate(eventType string) error {
if len(eventType) > attributeKeyLengthLimit {
return errEventTypeLength
}
if !eventTypeRegex.MatchString(eventType) {
return ErrEventTypeRegex
}
return nil
}
// CreateCustomEvent creates a custom event.
func CreateCustomEvent(eventType string, params map[string]interface{}, now time.Time) (*CustomEvent, error) {
if err := eventTypeValidate(eventType); nil != err {
return nil, err
}
if len(params) > customEventAttributeLimit {
return nil, errNumAttributes
}
truncatedParams := make(map[string]interface{})
for key, val := range params {
if err := validAttributeKey(key); nil != err {
return nil, err
}
val = truncateStringValueIfLongInterface(val)
if err := valueIsValid(val); nil != err {
return nil, err
}
truncatedParams[key] = val
}
return &CustomEvent{
eventType: eventType,
timestamp: now,
truncatedParams: truncatedParams,
}, nil
}
// MergeIntoHarvest implements Harvestable.
func (e *CustomEvent) MergeIntoHarvest(h *Harvest) {
h.CustomEvents.Add(e)
}

View File

@ -0,0 +1,32 @@
package internal
import (
"math/rand"
"time"
)
type customEvents struct {
events *analyticsEvents
}
func newCustomEvents(max int) *customEvents {
return &customEvents{
events: newAnalyticsEvents(max),
}
}
func (cs *customEvents) Add(e *CustomEvent) {
stamp := eventStamp(rand.Float32())
cs.events.addEvent(analyticsEvent{stamp, e})
}
func (cs *customEvents) MergeIntoHarvest(h *Harvest) {
h.CustomEvents.events.mergeFailed(cs.events)
}
func (cs *customEvents) Data(agentRunID string, harvestStart time.Time) ([]byte, error) {
return cs.events.CollectorJSON(agentRunID)
}
func (cs *customEvents) numSeen() float64 { return cs.events.NumSeen() }
func (cs *customEvents) numSaved() float64 { return cs.events.NumSaved() }

View File

@ -0,0 +1,61 @@
package internal
import (
"encoding/json"
"reflect"
"runtime"
)
// Environment describes the application's environment.
type Environment struct {
Compiler string `env:"runtime.Compiler"`
GOARCH string `env:"runtime.GOARCH"`
GOOS string `env:"runtime.GOOS"`
Version string `env:"runtime.Version"`
NumCPU int `env:"runtime.NumCPU"`
}
var (
// SampleEnvironment is useful for testing.
SampleEnvironment = Environment{
Compiler: "comp",
GOARCH: "arch",
GOOS: "goos",
Version: "vers",
NumCPU: 8,
}
)
// NewEnvironment returns a new Environment.
func NewEnvironment() Environment {
return Environment{
Compiler: runtime.Compiler,
GOARCH: runtime.GOARCH,
GOOS: runtime.GOOS,
Version: runtime.Version(),
NumCPU: runtime.NumCPU(),
}
}
// MarshalJSON prepares Environment JSON in the format expected by the collector
// during the connect command.
func (e Environment) MarshalJSON() ([]byte, error) {
var arr [][]interface{}
val := reflect.ValueOf(e)
numFields := val.NumField()
arr = make([][]interface{}, numFields)
for i := 0; i < numFields; i++ {
v := val.Field(i)
t := val.Type().Field(i).Tag.Get("env")
arr[i] = []interface{}{
t,
v.Interface(),
}
}
return json.Marshal(arr)
}

View File

@ -0,0 +1,87 @@
package internal
import (
"bytes"
"math/rand"
"time"
)
// ErrorEvent is an error event.
type ErrorEvent struct {
Klass string
Msg string
When time.Time
TxnName string
Duration time.Duration
Queuing time.Duration
Attrs *Attributes
DatastoreExternalTotals
}
// MarshalJSON is used for testing.
func (e *ErrorEvent) MarshalJSON() ([]byte, error) {
buf := bytes.NewBuffer(make([]byte, 0, 256))
e.WriteJSON(buf)
return buf.Bytes(), nil
}
// WriteJSON prepares JSON in the format expected by the collector.
// https://source.datanerd.us/agents/agent-specs/blob/master/Error-Events.md
func (e *ErrorEvent) WriteJSON(buf *bytes.Buffer) {
w := jsonFieldsWriter{buf: buf}
buf.WriteByte('[')
buf.WriteByte('{')
w.stringField("type", "TransactionError")
w.stringField("error.class", e.Klass)
w.stringField("error.message", e.Msg)
w.floatField("timestamp", timeToFloatSeconds(e.When))
w.stringField("transactionName", e.TxnName)
w.floatField("duration", e.Duration.Seconds())
if e.Queuing > 0 {
w.floatField("queueDuration", e.Queuing.Seconds())
}
if e.externalCallCount > 0 {
w.intField("externalCallCount", int64(e.externalCallCount))
w.floatField("externalDuration", e.externalDuration.Seconds())
}
if e.datastoreCallCount > 0 {
// Note that "database" is used for the keys here instead of
// "datastore" for historical reasons.
w.intField("databaseCallCount", int64(e.datastoreCallCount))
w.floatField("databaseDuration", e.datastoreDuration.Seconds())
}
buf.WriteByte('}')
buf.WriteByte(',')
userAttributesJSON(e.Attrs, buf, destError)
buf.WriteByte(',')
agentAttributesJSON(e.Attrs, buf, destError)
buf.WriteByte(']')
}
type errorEvents struct {
events *analyticsEvents
}
func newErrorEvents(max int) *errorEvents {
return &errorEvents{
events: newAnalyticsEvents(max),
}
}
func (events *errorEvents) Add(e *ErrorEvent) {
stamp := eventStamp(rand.Float32())
events.events.addEvent(analyticsEvent{stamp, e})
}
func (events *errorEvents) MergeIntoHarvest(h *Harvest) {
h.ErrorEvents.events.mergeFailed(events.events)
}
func (events *errorEvents) Data(agentRunID string, harvestStart time.Time) ([]byte, error) {
return events.events.CollectorJSON(agentRunID)
}
func (events *errorEvents) numSeen() float64 { return events.events.NumSeen() }
func (events *errorEvents) numSaved() float64 { return events.events.NumSaved() }

179
vendor/github.com/newrelic/go-agent/internal/errors.go generated vendored Normal file
View File

@ -0,0 +1,179 @@
package internal
import (
"bytes"
"fmt"
"net/http"
"reflect"
"strconv"
"time"
"github.com/newrelic/go-agent/internal/jsonx"
)
const (
// PanicErrorKlass is the error klass used for errors generated by
// recovering panics in txn.End.
PanicErrorKlass = "panic"
)
func panicValueMsg(v interface{}) string {
switch val := v.(type) {
case error:
return val.Error()
default:
return fmt.Sprintf("%v", v)
}
}
// TxnErrorFromPanic creates a new TxnError from a panic.
func TxnErrorFromPanic(now time.Time, v interface{}) TxnError {
return TxnError{
When: now,
Msg: panicValueMsg(v),
Klass: PanicErrorKlass,
}
}
// TxnErrorFromError creates a new TxnError from an error.
func TxnErrorFromError(now time.Time, err error) TxnError {
return TxnError{
When: now,
Msg: err.Error(),
Klass: reflect.TypeOf(err).String(),
}
}
// TxnErrorFromResponseCode creates a new TxnError from an http response code.
func TxnErrorFromResponseCode(now time.Time, code int) TxnError {
return TxnError{
When: now,
Msg: http.StatusText(code),
Klass: strconv.Itoa(code),
}
}
// TxnError is an error captured in a Transaction.
type TxnError struct {
When time.Time
Stack *StackTrace
Msg string
Klass string
}
// TxnErrors is a set of errors captured in a Transaction.
type TxnErrors []*TxnError
// NewTxnErrors returns a new empty TxnErrors.
func NewTxnErrors(max int) TxnErrors {
return make([]*TxnError, 0, max)
}
// Add adds a TxnError.
func (errors *TxnErrors) Add(e TxnError) {
if len(*errors) < cap(*errors) {
*errors = append(*errors, &e)
}
}
func (h *harvestError) WriteJSON(buf *bytes.Buffer) {
buf.WriteByte('[')
jsonx.AppendFloat(buf, timeToFloatMilliseconds(h.When))
buf.WriteByte(',')
jsonx.AppendString(buf, h.txnName)
buf.WriteByte(',')
jsonx.AppendString(buf, h.Msg)
buf.WriteByte(',')
jsonx.AppendString(buf, h.Klass)
buf.WriteByte(',')
buf.WriteByte('{')
w := jsonFieldsWriter{buf: buf}
if nil != h.Stack {
w.writerField("stack_trace", h.Stack)
}
w.writerField("agentAttributes", agentAttributesJSONWriter{
attributes: h.attrs,
dest: destError,
})
w.writerField("userAttributes", userAttributesJSONWriter{
attributes: h.attrs,
dest: destError,
})
w.rawField("intrinsics", JSONString("{}"))
if h.requestURI != "" {
w.stringField("request_uri", h.requestURI)
}
buf.WriteByte('}')
buf.WriteByte(']')
}
// MarshalJSON is used for testing.
func (h *harvestError) MarshalJSON() ([]byte, error) {
buf := &bytes.Buffer{}
h.WriteJSON(buf)
return buf.Bytes(), nil
}
type harvestError struct {
TxnError
txnName string
requestURI string
attrs *Attributes
}
type harvestErrors struct {
errors []*harvestError
}
func newHarvestErrors(max int) *harvestErrors {
return &harvestErrors{
errors: make([]*harvestError, 0, max),
}
}
func harvestErrorFromTxnError(e *TxnError, txnName string, requestURI string, attrs *Attributes) *harvestError {
return &harvestError{
TxnError: *e,
txnName: txnName,
requestURI: requestURI,
attrs: attrs,
}
}
func addTxnError(errors *harvestErrors, e *TxnError, txnName string, requestURI string, attrs *Attributes) {
he := harvestErrorFromTxnError(e, txnName, requestURI, attrs)
errors.errors = append(errors.errors, he)
}
// MergeTxnErrors merges a transaction's errors into the harvest's errors.
func MergeTxnErrors(errors *harvestErrors, errs TxnErrors, txnName string, requestURI string, attrs *Attributes) {
for _, e := range errs {
if len(errors.errors) == cap(errors.errors) {
return
}
addTxnError(errors, e, txnName, requestURI, attrs)
}
}
func (errors *harvestErrors) Data(agentRunID string, harvestStart time.Time) ([]byte, error) {
if 0 == len(errors.errors) {
return nil, nil
}
estimate := 1024 * len(errors.errors)
buf := bytes.NewBuffer(make([]byte, 0, estimate))
buf.WriteByte('[')
jsonx.AppendString(buf, agentRunID)
buf.WriteByte(',')
buf.WriteByte('[')
for i, e := range errors.errors {
if i > 0 {
buf.WriteByte(',')
}
e.WriteJSON(buf)
}
buf.WriteByte(']')
buf.WriteByte(']')
return buf.Bytes(), nil
}
func (errors *harvestErrors) MergeIntoHarvest(h *Harvest) {}

402
vendor/github.com/newrelic/go-agent/internal/expect.go generated vendored Normal file
View File

@ -0,0 +1,402 @@
package internal
import (
"fmt"
"runtime"
"time"
)
var (
// Unfortunately, the resolution of time.Now() on Windows is coarse: Two
// sequential calls to time.Now() may return the same value, and tests
// which expect non-zero durations may fail. To avoid adding sleep
// statements or mocking time.Now(), those tests are skipped on Windows.
doDurationTests = runtime.GOOS != `windows`
)
// Validator is used for testing.
type Validator interface {
Error(...interface{})
}
func validateStringField(v Validator, fieldName, v1, v2 string) {
if v1 != v2 {
v.Error(fieldName, v1, v2)
}
}
type addValidatorField struct {
field interface{}
original Validator
}
func (a addValidatorField) Error(fields ...interface{}) {
fields = append([]interface{}{a.field}, fields...)
a.original.Error(fields...)
}
// ExtendValidator is used to add more context to a validator.
func ExtendValidator(v Validator, field interface{}) Validator {
return addValidatorField{
field: field,
original: v,
}
}
// WantMetric is a metric expectation. If Data is nil, then any data values are
// acceptable.
type WantMetric struct {
Name string
Scope string
Forced interface{} // true, false, or nil
Data []float64
}
// WantCustomEvent is a custom event expectation.
type WantCustomEvent struct {
Type string
Params map[string]interface{}
}
// WantError is a traced error expectation.
type WantError struct {
TxnName string
Msg string
Klass string
Caller string
URL string
UserAttributes map[string]interface{}
AgentAttributes map[string]interface{}
}
// WantErrorEvent is an error event expectation.
type WantErrorEvent struct {
TxnName string
Msg string
Klass string
Queuing bool
ExternalCallCount uint64
DatastoreCallCount uint64
UserAttributes map[string]interface{}
AgentAttributes map[string]interface{}
}
// WantTxnEvent is a transaction event expectation.
type WantTxnEvent struct {
Name string
Zone string
Queuing bool
ExternalCallCount uint64
DatastoreCallCount uint64
UserAttributes map[string]interface{}
AgentAttributes map[string]interface{}
}
// WantTxnTrace is a transaction trace expectation.
type WantTxnTrace struct {
MetricName string
CleanURL string
NumSegments int
UserAttributes map[string]interface{}
AgentAttributes map[string]interface{}
}
// WantSlowQuery is a slowQuery expectation.
type WantSlowQuery struct {
Count int32
MetricName string
Query string
TxnName string
TxnURL string
DatabaseName string
Host string
PortPathOrID string
Params map[string]interface{}
}
// Expect exposes methods that allow for testing whether the correct data was
// captured.
type Expect interface {
ExpectCustomEvents(t Validator, want []WantCustomEvent)
ExpectErrors(t Validator, want []WantError)
ExpectErrorEvents(t Validator, want []WantErrorEvent)
ExpectTxnEvents(t Validator, want []WantTxnEvent)
ExpectMetrics(t Validator, want []WantMetric)
ExpectTxnTraces(t Validator, want []WantTxnTrace)
ExpectSlowQueries(t Validator, want []WantSlowQuery)
}
func expectMetricField(t Validator, id metricID, v1, v2 float64, fieldName string) {
if v1 != v2 {
t.Error("metric fields do not match", id, v1, v2, fieldName)
}
}
// ExpectMetrics allows testing of metrics.
func ExpectMetrics(t Validator, mt *metricTable, expect []WantMetric) {
if len(mt.metrics) != len(expect) {
t.Error("metric counts do not match expectations", len(mt.metrics), len(expect))
}
expectedIds := make(map[metricID]struct{})
for _, e := range expect {
id := metricID{Name: e.Name, Scope: e.Scope}
expectedIds[id] = struct{}{}
m := mt.metrics[id]
if nil == m {
t.Error("unable to find metric", id)
continue
}
if b, ok := e.Forced.(bool); ok {
if b != (forced == m.forced) {
t.Error("metric forced incorrect", b, m.forced, id)
}
}
if nil != e.Data {
expectMetricField(t, id, e.Data[0], m.data.countSatisfied, "countSatisfied")
expectMetricField(t, id, e.Data[1], m.data.totalTolerated, "totalTolerated")
expectMetricField(t, id, e.Data[2], m.data.exclusiveFailed, "exclusiveFailed")
expectMetricField(t, id, e.Data[3], m.data.min, "min")
expectMetricField(t, id, e.Data[4], m.data.max, "max")
expectMetricField(t, id, e.Data[5], m.data.sumSquares, "sumSquares")
}
}
for id := range mt.metrics {
if _, ok := expectedIds[id]; !ok {
t.Error("expected metrics does not contain", id.Name, id.Scope)
}
}
}
func expectAttributes(v Validator, exists map[string]interface{}, expect map[string]interface{}) {
// TODO: This params comparison can be made smarter: Alert differences
// based on sub/super set behavior.
if len(exists) != len(expect) {
v.Error("attributes length difference", exists, expect)
return
}
for key, val := range expect {
found, ok := exists[key]
if !ok {
v.Error("missing key", key)
continue
}
v1 := fmt.Sprint(found)
v2 := fmt.Sprint(val)
if v1 != v2 {
v.Error("value difference", fmt.Sprintf("key=%s", key),
v1, v2)
}
}
}
func expectCustomEvent(v Validator, event *CustomEvent, expect WantCustomEvent) {
if event.eventType != expect.Type {
v.Error("type mismatch", event.eventType, expect.Type)
}
now := time.Now()
diff := absTimeDiff(now, event.timestamp)
if diff > time.Hour {
v.Error("large timestamp difference", event.eventType, now, event.timestamp)
}
expectAttributes(v, event.truncatedParams, expect.Params)
}
// ExpectCustomEvents allows testing of custom events.
func ExpectCustomEvents(v Validator, cs *customEvents, expect []WantCustomEvent) {
if len(cs.events.events) != len(expect) {
v.Error("number of custom events does not match", len(cs.events.events),
len(expect))
return
}
for i, e := range expect {
event, ok := cs.events.events[i].jsonWriter.(*CustomEvent)
if !ok {
v.Error("wrong custom event")
} else {
expectCustomEvent(v, event, e)
}
}
}
func expectErrorEvent(v Validator, err *ErrorEvent, expect WantErrorEvent) {
validateStringField(v, "txnName", expect.TxnName, err.TxnName)
validateStringField(v, "klass", expect.Klass, err.Klass)
validateStringField(v, "msg", expect.Msg, err.Msg)
if (0 != err.Queuing) != expect.Queuing {
v.Error("queuing", err.Queuing)
}
if nil != expect.UserAttributes {
expectAttributes(v, getUserAttributes(err.Attrs, destError), expect.UserAttributes)
}
if nil != expect.AgentAttributes {
expectAttributes(v, getAgentAttributes(err.Attrs, destError), expect.AgentAttributes)
}
if expect.ExternalCallCount != err.externalCallCount {
v.Error("external call count", expect.ExternalCallCount, err.externalCallCount)
}
if doDurationTests && (0 == expect.ExternalCallCount) != (err.externalDuration == 0) {
v.Error("external duration", err.externalDuration)
}
if expect.DatastoreCallCount != err.datastoreCallCount {
v.Error("datastore call count", expect.DatastoreCallCount, err.datastoreCallCount)
}
if doDurationTests && (0 == expect.DatastoreCallCount) != (err.datastoreDuration == 0) {
v.Error("datastore duration", err.datastoreDuration)
}
}
// ExpectErrorEvents allows testing of error events.
func ExpectErrorEvents(v Validator, events *errorEvents, expect []WantErrorEvent) {
if len(events.events.events) != len(expect) {
v.Error("number of custom events does not match",
len(events.events.events), len(expect))
return
}
for i, e := range expect {
event, ok := events.events.events[i].jsonWriter.(*ErrorEvent)
if !ok {
v.Error("wrong error event")
} else {
expectErrorEvent(v, event, e)
}
}
}
func expectTxnEvent(v Validator, e *TxnEvent, expect WantTxnEvent) {
validateStringField(v, "apdex zone", expect.Zone, e.Zone.label())
validateStringField(v, "name", expect.Name, e.Name)
if doDurationTests && 0 == e.Duration {
v.Error("zero duration", e.Duration)
}
if (0 != e.Queuing) != expect.Queuing {
v.Error("queuing", e.Queuing)
}
if nil != expect.UserAttributes {
expectAttributes(v, getUserAttributes(e.Attrs, destTxnEvent), expect.UserAttributes)
}
if nil != expect.AgentAttributes {
expectAttributes(v, getAgentAttributes(e.Attrs, destTxnEvent), expect.AgentAttributes)
}
if expect.ExternalCallCount != e.externalCallCount {
v.Error("external call count", expect.ExternalCallCount, e.externalCallCount)
}
if doDurationTests && (0 == expect.ExternalCallCount) != (e.externalDuration == 0) {
v.Error("external duration", e.externalDuration)
}
if expect.DatastoreCallCount != e.datastoreCallCount {
v.Error("datastore call count", expect.DatastoreCallCount, e.datastoreCallCount)
}
if doDurationTests && (0 == expect.DatastoreCallCount) != (e.datastoreDuration == 0) {
v.Error("datastore duration", e.datastoreDuration)
}
}
// ExpectTxnEvents allows testing of txn events.
func ExpectTxnEvents(v Validator, events *txnEvents, expect []WantTxnEvent) {
if len(events.events.events) != len(expect) {
v.Error("number of txn events does not match",
len(events.events.events), len(expect))
return
}
for i, e := range expect {
event, ok := events.events.events[i].jsonWriter.(*TxnEvent)
if !ok {
v.Error("wrong txn event")
} else {
expectTxnEvent(v, event, e)
}
}
}
func expectError(v Validator, err *harvestError, expect WantError) {
caller := topCallerNameBase(err.TxnError.Stack)
validateStringField(v, "caller", expect.Caller, caller)
validateStringField(v, "txnName", expect.TxnName, err.txnName)
validateStringField(v, "klass", expect.Klass, err.TxnError.Klass)
validateStringField(v, "msg", expect.Msg, err.TxnError.Msg)
validateStringField(v, "URL", expect.URL, err.requestURI)
if nil != expect.UserAttributes {
expectAttributes(v, getUserAttributes(err.attrs, destError), expect.UserAttributes)
}
if nil != expect.AgentAttributes {
expectAttributes(v, getAgentAttributes(err.attrs, destError), expect.AgentAttributes)
}
}
// ExpectErrors allows testing of errors.
func ExpectErrors(v Validator, errors *harvestErrors, expect []WantError) {
if len(errors.errors) != len(expect) {
v.Error("number of errors mismatch", len(errors.errors), len(expect))
return
}
for i, e := range expect {
expectError(v, errors.errors[i], e)
}
}
func expectTxnTrace(v Validator, trace *HarvestTrace, expect WantTxnTrace) {
if doDurationTests && 0 == trace.Duration {
v.Error("zero trace duration")
}
validateStringField(v, "metric name", expect.MetricName, trace.MetricName)
validateStringField(v, "request url", expect.CleanURL, trace.CleanURL)
if nil != expect.UserAttributes {
expectAttributes(v, getUserAttributes(trace.Attrs, destTxnTrace), expect.UserAttributes)
}
if nil != expect.AgentAttributes {
expectAttributes(v, getAgentAttributes(trace.Attrs, destTxnTrace), expect.AgentAttributes)
}
if expect.NumSegments != len(trace.Trace.nodes) {
v.Error("wrong number of segments", expect.NumSegments, len(trace.Trace.nodes))
}
}
// ExpectTxnTraces allows testing of transaction traces.
func ExpectTxnTraces(v Validator, traces *harvestTraces, want []WantTxnTrace) {
if len(want) == 0 {
if nil != traces.trace {
v.Error("trace exists when not expected")
}
} else if len(want) > 1 {
v.Error("too many traces expected")
} else {
if nil == traces.trace {
v.Error("missing expected trace")
} else {
expectTxnTrace(v, traces.trace, want[0])
}
}
}
func expectSlowQuery(t Validator, slowQuery *slowQuery, want WantSlowQuery) {
if slowQuery.Count != want.Count {
t.Error("wrong Count field", slowQuery.Count, want.Count)
}
validateStringField(t, "MetricName", slowQuery.DatastoreMetric, want.MetricName)
validateStringField(t, "Query", slowQuery.ParameterizedQuery, want.Query)
validateStringField(t, "TxnName", slowQuery.TxnName, want.TxnName)
validateStringField(t, "TxnURL", slowQuery.TxnURL, want.TxnURL)
validateStringField(t, "DatabaseName", slowQuery.DatabaseName, want.DatabaseName)
validateStringField(t, "Host", slowQuery.Host, want.Host)
validateStringField(t, "PortPathOrID", slowQuery.PortPathOrID, want.PortPathOrID)
expectAttributes(t, map[string]interface{}(slowQuery.QueryParameters), want.Params)
}
// ExpectSlowQueries allows testing of slow queries.
func ExpectSlowQueries(t Validator, slowQueries *slowQueries, want []WantSlowQuery) {
if len(want) != len(slowQueries.priorityQueue) {
t.Error("wrong number of slow queries",
"expected", len(want), "got", len(slowQueries.priorityQueue))
return
}
for _, s := range want {
idx, ok := slowQueries.lookup[s.Query]
if !ok {
t.Error("unable to find slow query", s.Query)
continue
}
expectSlowQuery(t, slowQueries.priorityQueue[idx], s)
}
}

153
vendor/github.com/newrelic/go-agent/internal/harvest.go generated vendored Normal file
View File

@ -0,0 +1,153 @@
package internal
import (
"strings"
"sync"
"time"
)
// Harvestable is something that can be merged into a Harvest.
type Harvestable interface {
MergeIntoHarvest(h *Harvest)
}
// Harvest contains collected data.
type Harvest struct {
Metrics *metricTable
CustomEvents *customEvents
TxnEvents *txnEvents
ErrorEvents *errorEvents
ErrorTraces *harvestErrors
TxnTraces *harvestTraces
SlowSQLs *slowQueries
}
// Payloads returns a map from expected collector method name to data type.
func (h *Harvest) Payloads() map[string]PayloadCreator {
return map[string]PayloadCreator{
cmdMetrics: h.Metrics,
cmdCustomEvents: h.CustomEvents,
cmdTxnEvents: h.TxnEvents,
cmdErrorEvents: h.ErrorEvents,
cmdErrorData: h.ErrorTraces,
cmdTxnTraces: h.TxnTraces,
cmdSlowSQLs: h.SlowSQLs,
}
}
// NewHarvest returns a new Harvest.
func NewHarvest(now time.Time) *Harvest {
return &Harvest{
Metrics: newMetricTable(maxMetrics, now),
CustomEvents: newCustomEvents(maxCustomEvents),
TxnEvents: newTxnEvents(maxTxnEvents),
ErrorEvents: newErrorEvents(maxErrorEvents),
ErrorTraces: newHarvestErrors(maxHarvestErrors),
TxnTraces: newHarvestTraces(),
SlowSQLs: newSlowQueries(maxHarvestSlowSQLs),
}
}
var (
trackMutex sync.Mutex
trackMetrics []string
)
// TrackUsage helps track which integration packages are used.
func TrackUsage(s ...string) {
trackMutex.Lock()
defer trackMutex.Unlock()
m := "Supportability/" + strings.Join(s, "/")
trackMetrics = append(trackMetrics, m)
}
func createTrackUsageMetrics(metrics *metricTable) {
trackMutex.Lock()
defer trackMutex.Unlock()
for _, m := range trackMetrics {
metrics.addSingleCount(m, forced)
}
}
// CreateFinalMetrics creates extra metrics at harvest time.
func (h *Harvest) CreateFinalMetrics() {
h.Metrics.addSingleCount(instanceReporting, forced)
h.Metrics.addCount(customEventsSeen, h.CustomEvents.numSeen(), forced)
h.Metrics.addCount(customEventsSent, h.CustomEvents.numSaved(), forced)
h.Metrics.addCount(txnEventsSeen, h.TxnEvents.numSeen(), forced)
h.Metrics.addCount(txnEventsSent, h.TxnEvents.numSaved(), forced)
h.Metrics.addCount(errorEventsSeen, h.ErrorEvents.numSeen(), forced)
h.Metrics.addCount(errorEventsSent, h.ErrorEvents.numSaved(), forced)
if h.Metrics.numDropped > 0 {
h.Metrics.addCount(supportabilityDropped, float64(h.Metrics.numDropped), forced)
}
createTrackUsageMetrics(h.Metrics)
}
// PayloadCreator is a data type in the harvest.
type PayloadCreator interface {
// In the event of a rpm request failure (hopefully simply an
// intermittent collector issue) the payload may be merged into the next
// time period's harvest.
Harvestable
// Data prepares JSON in the format expected by the collector endpoint.
// This method should return (nil, nil) if the payload is empty and no
// rpm request is necessary.
Data(agentRunID string, harvestStart time.Time) ([]byte, error)
}
// CreateTxnMetricsArgs contains the parameters to CreateTxnMetrics.
type CreateTxnMetricsArgs struct {
IsWeb bool
Duration time.Duration
Exclusive time.Duration
Name string
Zone ApdexZone
ApdexThreshold time.Duration
HasErrors bool
Queueing time.Duration
}
// CreateTxnMetrics creates metrics for a transaction.
func CreateTxnMetrics(args CreateTxnMetricsArgs, metrics *metricTable) {
// Duration Metrics
rollup := backgroundRollup
if args.IsWeb {
rollup = webRollup
metrics.addDuration(dispatcherMetric, "", args.Duration, 0, forced)
}
metrics.addDuration(args.Name, "", args.Duration, args.Exclusive, forced)
metrics.addDuration(rollup, "", args.Duration, args.Exclusive, forced)
// Apdex Metrics
if args.Zone != ApdexNone {
metrics.addApdex(apdexRollup, "", args.ApdexThreshold, args.Zone, forced)
mname := apdexPrefix + removeFirstSegment(args.Name)
metrics.addApdex(mname, "", args.ApdexThreshold, args.Zone, unforced)
}
// Error Metrics
if args.HasErrors {
metrics.addSingleCount(errorsAll, forced)
if args.IsWeb {
metrics.addSingleCount(errorsWeb, forced)
} else {
metrics.addSingleCount(errorsBackground, forced)
}
metrics.addSingleCount(errorsPrefix+args.Name, forced)
}
// Queueing Metrics
if args.Queueing > 0 {
metrics.addDuration(queueMetric, "", args.Queueing, args.Queueing, forced)
}
}

View File

@ -0,0 +1,52 @@
package internal
import (
"bytes"
"github.com/newrelic/go-agent/internal/jsonx"
)
type jsonWriter interface {
WriteJSON(buf *bytes.Buffer)
}
type jsonFieldsWriter struct {
buf *bytes.Buffer
needsComma bool
}
func (w *jsonFieldsWriter) addKey(key string) {
if w.needsComma {
w.buf.WriteByte(',')
} else {
w.needsComma = true
}
// defensively assume that the key needs escaping:
jsonx.AppendString(w.buf, key)
w.buf.WriteByte(':')
}
func (w *jsonFieldsWriter) stringField(key string, val string) {
w.addKey(key)
jsonx.AppendString(w.buf, val)
}
func (w *jsonFieldsWriter) intField(key string, val int64) {
w.addKey(key)
jsonx.AppendInt(w.buf, val)
}
func (w *jsonFieldsWriter) floatField(key string, val float64) {
w.addKey(key)
jsonx.AppendFloat(w.buf, val)
}
func (w *jsonFieldsWriter) rawField(key string, val JSONString) {
w.addKey(key)
w.buf.WriteString(string(val))
}
func (w *jsonFieldsWriter) writerField(key string, val jsonWriter) {
w.addKey(key)
val.WriteJSON(w.buf)
}

View File

@ -0,0 +1,174 @@
// Copyright 2010 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
// Package jsonx extends the encoding/json package to encode JSON
// incrementally and without requiring reflection.
package jsonx
import (
"bytes"
"encoding/json"
"math"
"reflect"
"strconv"
"unicode/utf8"
)
var hex = "0123456789abcdef"
// AppendString escapes s appends it to buf.
func AppendString(buf *bytes.Buffer, s string) {
buf.WriteByte('"')
start := 0
for i := 0; i < len(s); {
if b := s[i]; b < utf8.RuneSelf {
if 0x20 <= b && b != '\\' && b != '"' && b != '<' && b != '>' && b != '&' {
i++
continue
}
if start < i {
buf.WriteString(s[start:i])
}
switch b {
case '\\', '"':
buf.WriteByte('\\')
buf.WriteByte(b)
case '\n':
buf.WriteByte('\\')
buf.WriteByte('n')
case '\r':
buf.WriteByte('\\')
buf.WriteByte('r')
case '\t':
buf.WriteByte('\\')
buf.WriteByte('t')
default:
// This encodes bytes < 0x20 except for \n and \r,
// as well as <, > and &. The latter are escaped because they
// can lead to security holes when user-controlled strings
// are rendered into JSON and served to some browsers.
buf.WriteString(`\u00`)
buf.WriteByte(hex[b>>4])
buf.WriteByte(hex[b&0xF])
}
i++
start = i
continue
}
c, size := utf8.DecodeRuneInString(s[i:])
if c == utf8.RuneError && size == 1 {
if start < i {
buf.WriteString(s[start:i])
}
buf.WriteString(`\ufffd`)
i += size
start = i
continue
}
// U+2028 is LINE SEPARATOR.
// U+2029 is PARAGRAPH SEPARATOR.
// They are both technically valid characters in JSON strings,
// but don't work in JSONP, which has to be evaluated as JavaScript,
// and can lead to security holes there. It is valid JSON to
// escape them, so we do so unconditionally.
// See http://timelessrepo.com/json-isnt-a-javascript-subset for discussion.
if c == '\u2028' || c == '\u2029' {
if start < i {
buf.WriteString(s[start:i])
}
buf.WriteString(`\u202`)
buf.WriteByte(hex[c&0xF])
i += size
start = i
continue
}
i += size
}
if start < len(s) {
buf.WriteString(s[start:])
}
buf.WriteByte('"')
}
// AppendStringArray appends an array of string literals to buf.
func AppendStringArray(buf *bytes.Buffer, a ...string) {
buf.WriteByte('[')
for i, s := range a {
if i > 0 {
buf.WriteByte(',')
}
AppendString(buf, s)
}
buf.WriteByte(']')
}
// AppendFloat appends a numeric literal representing the value to buf.
func AppendFloat(buf *bytes.Buffer, x float64) error {
var scratch [64]byte
if math.IsInf(x, 0) || math.IsNaN(x) {
return &json.UnsupportedValueError{
Value: reflect.ValueOf(x),
Str: strconv.FormatFloat(x, 'g', -1, 64),
}
}
buf.Write(strconv.AppendFloat(scratch[:0], x, 'g', -1, 64))
return nil
}
// AppendFloatArray appends an array of numeric literals to buf.
func AppendFloatArray(buf *bytes.Buffer, a ...float64) error {
buf.WriteByte('[')
for i, x := range a {
if i > 0 {
buf.WriteByte(',')
}
if err := AppendFloat(buf, x); err != nil {
return err
}
}
buf.WriteByte(']')
return nil
}
// AppendInt appends a numeric literal representing the value to buf.
func AppendInt(buf *bytes.Buffer, x int64) {
var scratch [64]byte
buf.Write(strconv.AppendInt(scratch[:0], x, 10))
}
// AppendIntArray appends an array of numeric literals to buf.
func AppendIntArray(buf *bytes.Buffer, a ...int64) {
var scratch [64]byte
buf.WriteByte('[')
for i, x := range a {
if i > 0 {
buf.WriteByte(',')
}
buf.Write(strconv.AppendInt(scratch[:0], x, 10))
}
buf.WriteByte(']')
}
// AppendUint appends a numeric literal representing the value to buf.
func AppendUint(buf *bytes.Buffer, x uint64) {
var scratch [64]byte
buf.Write(strconv.AppendUint(scratch[:0], x, 10))
}
// AppendUintArray appends an array of numeric literals to buf.
func AppendUintArray(buf *bytes.Buffer, a ...uint64) {
var scratch [64]byte
buf.WriteByte('[')
for i, x := range a {
if i > 0 {
buf.WriteByte(',')
}
buf.Write(strconv.AppendUint(scratch[:0], x, 10))
}
buf.WriteByte(']')
}

23
vendor/github.com/newrelic/go-agent/internal/labels.go generated vendored Normal file
View File

@ -0,0 +1,23 @@
package internal
import "encoding/json"
// Labels is used for connect JSON formatting.
type Labels map[string]string
// MarshalJSON requires a comment for golint?
func (l Labels) MarshalJSON() ([]byte, error) {
ls := make([]struct {
Key string `json:"label_type"`
Value string `json:"label_value"`
}, len(l))
i := 0
for key, val := range l {
ls[i].Key = key
ls[i].Value = val
i++
}
return json.Marshal(ls)
}

53
vendor/github.com/newrelic/go-agent/internal/limits.go generated vendored Normal file
View File

@ -0,0 +1,53 @@
package internal
import "time"
const (
// app behavior
// ConnectBackoff is the wait time between unsuccessful connect
// attempts.
ConnectBackoff = 20 * time.Second
// HarvestPeriod is the period that collected data is sent to New Relic.
HarvestPeriod = 60 * time.Second
// CollectorTimeout is the timeout used in the client for communication
// with New Relic's servers.
CollectorTimeout = 20 * time.Second
// AppDataChanSize is the size of the channel that contains data sent
// the app processor.
AppDataChanSize = 200
failedMetricAttemptsLimit = 5
failedEventsAttemptsLimit = 10
// transaction behavior
maxStackTraceFrames = 100
// MaxTxnErrors is the maximum number of errors captured per
// transaction.
MaxTxnErrors = 5
maxTxnTraceNodes = 256
maxTxnSlowQueries = 10
// harvest data
maxMetrics = 2 * 1000
maxCustomEvents = 10 * 1000
maxTxnEvents = 10 * 1000
maxErrorEvents = 100
maxHarvestErrors = 20
maxHarvestSlowSQLs = 10
// attributes
attributeKeyLengthLimit = 255
attributeValueLengthLimit = 255
attributeUserLimit = 64
attributeAgentLimit = 255 - attributeUserLimit
customEventAttributeLimit = 64
// Limits affecting Config validation are found in the config package.
// RuntimeSamplerPeriod is the period of the runtime sampler. Runtime
// metrics should not depend on the sampler period, but the period must
// be the same across instances. For that reason, this value should not
// be changed without notifying customers that they must update all
// instance simultaneously for valid runtime metrics.
RuntimeSamplerPeriod = 60 * time.Second
)

View File

@ -0,0 +1,89 @@
package logger
import (
"encoding/json"
"fmt"
"io"
"log"
"os"
)
// Logger matches newrelic.Logger to allow implementations to be passed to
// internal packages.
type Logger interface {
Error(msg string, context map[string]interface{})
Warn(msg string, context map[string]interface{})
Info(msg string, context map[string]interface{})
Debug(msg string, context map[string]interface{})
DebugEnabled() bool
}
// ShimLogger implements Logger and does nothing.
type ShimLogger struct{}
// Error allows ShimLogger to implement Logger.
func (s ShimLogger) Error(string, map[string]interface{}) {}
// Warn allows ShimLogger to implement Logger.
func (s ShimLogger) Warn(string, map[string]interface{}) {}
// Info allows ShimLogger to implement Logger.
func (s ShimLogger) Info(string, map[string]interface{}) {}
// Debug allows ShimLogger to implement Logger.
func (s ShimLogger) Debug(string, map[string]interface{}) {}
// DebugEnabled allows ShimLogger to implement Logger.
func (s ShimLogger) DebugEnabled() bool { return false }
type logFile struct {
l *log.Logger
doDebug bool
}
// New creates a basic Logger.
func New(w io.Writer, doDebug bool) Logger {
return &logFile{
l: log.New(w, logPid, logFlags),
doDebug: doDebug,
}
}
const logFlags = log.Ldate | log.Ltime | log.Lmicroseconds
var (
logPid = fmt.Sprintf("(%d) ", os.Getpid())
)
func (f *logFile) fire(level, msg string, ctx map[string]interface{}) {
js, err := json.Marshal(struct {
Level string `json:"level"`
Event string `json:"msg"`
Context map[string]interface{} `json:"context"`
}{
level,
msg,
ctx,
})
if nil == err {
f.l.Printf(string(js))
} else {
f.l.Printf("unable to marshal log entry: %v", err)
}
}
func (f *logFile) Error(msg string, ctx map[string]interface{}) {
f.fire("error", msg, ctx)
}
func (f *logFile) Warn(msg string, ctx map[string]interface{}) {
f.fire("warn", msg, ctx)
}
func (f *logFile) Info(msg string, ctx map[string]interface{}) {
f.fire("info", msg, ctx)
}
func (f *logFile) Debug(msg string, ctx map[string]interface{}) {
if f.doDebug {
f.fire("debug", msg, ctx)
}
}
func (f *logFile) DebugEnabled() bool { return f.doDebug }

View File

@ -0,0 +1,145 @@
package internal
const (
apdexRollup = "Apdex"
apdexPrefix = "Apdex/"
webRollup = "WebTransaction"
backgroundRollup = "OtherTransaction/all"
errorsAll = "Errors/all"
errorsWeb = "Errors/allWeb"
errorsBackground = "Errors/allOther"
errorsPrefix = "Errors/"
// "HttpDispatcher" metric is used for the overview graph, and
// therefore should only be made for web transactions.
dispatcherMetric = "HttpDispatcher"
queueMetric = "WebFrontend/QueueTime"
webMetricPrefix = "WebTransaction/Go"
backgroundMetricPrefix = "OtherTransaction/Go"
instanceReporting = "Instance/Reporting"
// https://newrelic.atlassian.net/wiki/display/eng/Custom+Events+in+New+Relic+Agents
customEventsSeen = "Supportability/Events/Customer/Seen"
customEventsSent = "Supportability/Events/Customer/Sent"
// https://source.datanerd.us/agents/agent-specs/blob/master/Transaction-Events-PORTED.md
txnEventsSeen = "Supportability/AnalyticsEvents/TotalEventsSeen"
txnEventsSent = "Supportability/AnalyticsEvents/TotalEventsSent"
// https://source.datanerd.us/agents/agent-specs/blob/master/Error-Events.md
errorEventsSeen = "Supportability/Events/TransactionError/Seen"
errorEventsSent = "Supportability/Events/TransactionError/Sent"
supportabilityDropped = "Supportability/MetricsDropped"
// source.datanerd.us/agents/agent-specs/blob/master/Datastore-Metrics-PORTED.md
datastoreAll = "Datastore/all"
datastoreWeb = "Datastore/allWeb"
datastoreOther = "Datastore/allOther"
// source.datanerd.us/agents/agent-specs/blob/master/APIs/external_segment.md
// source.datanerd.us/agents/agent-specs/blob/master/APIs/external_cat.md
// source.datanerd.us/agents/agent-specs/blob/master/Cross-Application-Tracing-PORTED.md
externalAll = "External/all"
externalWeb = "External/allWeb"
externalOther = "External/allOther"
// Runtime/System Metrics
memoryPhysical = "Memory/Physical"
heapObjectsAllocated = "Memory/Heap/AllocatedObjects"
cpuUserUtilization = "CPU/User/Utilization"
cpuSystemUtilization = "CPU/System/Utilization"
cpuUserTime = "CPU/User Time"
cpuSystemTime = "CPU/System Time"
runGoroutine = "Go/Runtime/Goroutines"
gcPauseFraction = "GC/System/Pause Fraction"
gcPauses = "GC/System/Pauses"
)
func customSegmentMetric(s string) string {
return "Custom/" + s
}
// DatastoreMetricKey contains the fields by which datastore metrics are
// aggregated.
type DatastoreMetricKey struct {
Product string
Collection string
Operation string
Host string
PortPathOrID string
}
type externalMetricKey struct {
Host string
ExternalCrossProcessID string
ExternalTransactionName string
}
type datastoreProductMetrics struct {
All string // Datastore/{datastore}/all
Web string // Datastore/{datastore}/allWeb
Other string // Datastore/{datastore}/allOther
}
func datastoreScopedMetric(key DatastoreMetricKey) string {
if "" != key.Collection {
return datastoreStatementMetric(key)
}
return datastoreOperationMetric(key)
}
func datastoreProductMetric(key DatastoreMetricKey) datastoreProductMetrics {
d, ok := datastoreProductMetricsCache[key.Product]
if ok {
return d
}
return datastoreProductMetrics{
All: "Datastore/" + key.Product + "/all",
Web: "Datastore/" + key.Product + "/allWeb",
Other: "Datastore/" + key.Product + "/allOther",
}
}
// Datastore/operation/{datastore}/{operation}
func datastoreOperationMetric(key DatastoreMetricKey) string {
return "Datastore/operation/" + key.Product +
"/" + key.Operation
}
// Datastore/statement/{datastore}/{table}/{operation}
func datastoreStatementMetric(key DatastoreMetricKey) string {
return "Datastore/statement/" + key.Product +
"/" + key.Collection +
"/" + key.Operation
}
// Datastore/instance/{datastore}/{host}/{port_path_or_id}
func datastoreInstanceMetric(key DatastoreMetricKey) string {
return "Datastore/instance/" + key.Product +
"/" + key.Host +
"/" + key.PortPathOrID
}
// External/{host}/all
func externalHostMetric(key externalMetricKey) string {
return "External/" + key.Host + "/all"
}
// ExternalApp/{host}/{external_id}/all
func externalAppMetric(key externalMetricKey) string {
return "ExternalApp/" + key.Host +
"/" + key.ExternalCrossProcessID + "/all"
}
// ExternalTransaction/{host}/{external_id}/{external_txnname}
func externalTransactionMetric(key externalMetricKey) string {
return "ExternalTransaction/" + key.Host +
"/" + key.ExternalCrossProcessID +
"/" + key.ExternalTransactionName
}

View File

@ -0,0 +1,96 @@
package internal
var (
datastoreProductMetricsCache = map[string]datastoreProductMetrics{
"Cassandra": {
All: "Datastore/Cassandra/all",
Web: "Datastore/Cassandra/allWeb",
Other: "Datastore/Cassandra/allOther",
},
"Derby": {
All: "Datastore/Derby/all",
Web: "Datastore/Derby/allWeb",
Other: "Datastore/Derby/allOther",
},
"Elasticsearch": {
All: "Datastore/Elasticsearch/all",
Web: "Datastore/Elasticsearch/allWeb",
Other: "Datastore/Elasticsearch/allOther",
},
"Firebird": {
All: "Datastore/Firebird/all",
Web: "Datastore/Firebird/allWeb",
Other: "Datastore/Firebird/allOther",
},
"IBMDB2": {
All: "Datastore/IBMDB2/all",
Web: "Datastore/IBMDB2/allWeb",
Other: "Datastore/IBMDB2/allOther",
},
"Informix": {
All: "Datastore/Informix/all",
Web: "Datastore/Informix/allWeb",
Other: "Datastore/Informix/allOther",
},
"Memcached": {
All: "Datastore/Memcached/all",
Web: "Datastore/Memcached/allWeb",
Other: "Datastore/Memcached/allOther",
},
"MongoDB": {
All: "Datastore/MongoDB/all",
Web: "Datastore/MongoDB/allWeb",
Other: "Datastore/MongoDB/allOther",
},
"MySQL": {
All: "Datastore/MySQL/all",
Web: "Datastore/MySQL/allWeb",
Other: "Datastore/MySQL/allOther",
},
"MSSQL": {
All: "Datastore/MSSQL/all",
Web: "Datastore/MSSQL/allWeb",
Other: "Datastore/MSSQL/allOther",
},
"Oracle": {
All: "Datastore/Oracle/all",
Web: "Datastore/Oracle/allWeb",
Other: "Datastore/Oracle/allOther",
},
"Postgres": {
All: "Datastore/Postgres/all",
Web: "Datastore/Postgres/allWeb",
Other: "Datastore/Postgres/allOther",
},
"Redis": {
All: "Datastore/Redis/all",
Web: "Datastore/Redis/allWeb",
Other: "Datastore/Redis/allOther",
},
"Solr": {
All: "Datastore/Solr/all",
Web: "Datastore/Solr/allWeb",
Other: "Datastore/Solr/allOther",
},
"SQLite": {
All: "Datastore/SQLite/all",
Web: "Datastore/SQLite/allWeb",
Other: "Datastore/SQLite/allOther",
},
"CouchDB": {
All: "Datastore/CouchDB/all",
Web: "Datastore/CouchDB/allWeb",
Other: "Datastore/CouchDB/allOther",
},
"Riak": {
All: "Datastore/Riak/all",
Web: "Datastore/Riak/allWeb",
Other: "Datastore/Riak/allOther",
},
"VoltDB": {
All: "Datastore/VoltDB/all",
Web: "Datastore/VoltDB/allWeb",
Other: "Datastore/VoltDB/allOther",
},
}
)

View File

@ -0,0 +1,163 @@
package internal
import (
"encoding/json"
"regexp"
"sort"
"strings"
)
type ruleResult int
const (
ruleChanged ruleResult = iota
ruleUnchanged
ruleIgnore
)
type metricRule struct {
// 'Ignore' indicates if the entire transaction should be discarded if
// there is a match. This field is only used by "url_rules" and
// "transaction_name_rules", not "metric_name_rules".
Ignore bool `json:"ignore"`
EachSegment bool `json:"each_segment"`
ReplaceAll bool `json:"replace_all"`
Terminate bool `json:"terminate_chain"`
Order int `json:"eval_order"`
OriginalReplacement string `json:"replacement"`
RawExpr string `json:"match_expression"`
// Go's regexp backreferences use '${1}' instead of the Perlish '\1', so
// we transform the replacement string into the Go syntax and store it
// here.
TransformedReplacement string
re *regexp.Regexp
}
type metricRules []*metricRule
// Go's regexp backreferences use `${1}` instead of the Perlish `\1`, so we must
// transform the replacement string. This is non-trivial: `\1` is a
// backreference but `\\1` is not. Rather than count the number of back slashes
// preceding the digit, we simply skip rules with tricky replacements.
var (
transformReplacementAmbiguous = regexp.MustCompile(`\\\\([0-9]+)`)
transformReplacementRegex = regexp.MustCompile(`\\([0-9]+)`)
transformReplacementReplacement = "$${${1}}"
)
func (rules *metricRules) UnmarshalJSON(data []byte) (err error) {
var raw []*metricRule
if err := json.Unmarshal(data, &raw); nil != err {
return err
}
valid := make(metricRules, 0, len(raw))
for _, r := range raw {
re, err := regexp.Compile("(?i)" + r.RawExpr)
if err != nil {
// TODO
// Warn("unable to compile rule", {
// "match_expression": r.RawExpr,
// "error": err.Error(),
// })
continue
}
if transformReplacementAmbiguous.MatchString(r.OriginalReplacement) {
// TODO
// Warn("unable to transform replacement", {
// "match_expression": r.RawExpr,
// "replacement": r.OriginalReplacement,
// })
continue
}
r.re = re
r.TransformedReplacement = transformReplacementRegex.ReplaceAllString(r.OriginalReplacement,
transformReplacementReplacement)
valid = append(valid, r)
}
sort.Sort(valid)
*rules = valid
return nil
}
func (rules metricRules) Len() int {
return len(rules)
}
// Rules should be applied in increasing order
func (rules metricRules) Less(i, j int) bool {
return rules[i].Order < rules[j].Order
}
func (rules metricRules) Swap(i, j int) {
rules[i], rules[j] = rules[j], rules[i]
}
func replaceFirst(re *regexp.Regexp, s string, replacement string) string {
// Note that ReplaceAllStringFunc cannot be used here since it does
// not replace $1 placeholders.
loc := re.FindStringIndex(s)
if nil == loc {
return s
}
firstMatch := s[loc[0]:loc[1]]
firstMatchReplaced := re.ReplaceAllString(firstMatch, replacement)
return s[0:loc[0]] + firstMatchReplaced + s[loc[1]:]
}
func (r *metricRule) apply(s string) (ruleResult, string) {
// Rules are strange, and there is no spec.
// This code attempts to duplicate the logic of the PHP agent.
// Ambiguity abounds.
if r.Ignore {
if r.re.MatchString(s) {
return ruleIgnore, ""
}
return ruleUnchanged, s
}
var out string
if r.ReplaceAll {
out = r.re.ReplaceAllString(s, r.TransformedReplacement)
} else if r.EachSegment {
segments := strings.Split(string(s), "/")
applied := make([]string, len(segments))
for i, segment := range segments {
applied[i] = replaceFirst(r.re, segment, r.TransformedReplacement)
}
out = strings.Join(applied, "/")
} else {
out = replaceFirst(r.re, s, r.TransformedReplacement)
}
if out == s {
return ruleUnchanged, out
}
return ruleChanged, out
}
func (rules metricRules) Apply(input string) string {
var res ruleResult
s := input
for _, rule := range rules {
res, s = rule.apply(s)
if ruleIgnore == res {
return ""
}
if (ruleChanged == res) && rule.Terminate {
break
}
}
return s
}

258
vendor/github.com/newrelic/go-agent/internal/metrics.go generated vendored Normal file
View File

@ -0,0 +1,258 @@
package internal
import (
"bytes"
"time"
"github.com/newrelic/go-agent/internal/jsonx"
)
type metricForce int
const (
forced metricForce = iota
unforced
)
type metricID struct {
Name string `json:"name"`
Scope string `json:"scope,omitempty"`
}
type metricData struct {
// These values are in the units expected by the collector.
countSatisfied float64 // Seconds, or count for Apdex
totalTolerated float64 // Seconds, or count for Apdex
exclusiveFailed float64 // Seconds, or count for Apdex
min float64 // Seconds
max float64 // Seconds
sumSquares float64 // Seconds**2, or 0 for Apdex
}
func metricDataFromDuration(duration, exclusive time.Duration) metricData {
ds := duration.Seconds()
return metricData{
countSatisfied: 1,
totalTolerated: ds,
exclusiveFailed: exclusive.Seconds(),
min: ds,
max: ds,
sumSquares: ds * ds,
}
}
type metric struct {
forced metricForce
data metricData
}
type metricTable struct {
metricPeriodStart time.Time
failedHarvests int
maxTableSize int // After this max is reached, only forced metrics are added
numDropped int // Number of unforced metrics dropped due to full table
metrics map[metricID]*metric
}
func newMetricTable(maxTableSize int, now time.Time) *metricTable {
return &metricTable{
metricPeriodStart: now,
metrics: make(map[metricID]*metric),
maxTableSize: maxTableSize,
failedHarvests: 0,
}
}
func (mt *metricTable) full() bool {
return len(mt.metrics) >= mt.maxTableSize
}
func (data *metricData) aggregate(src metricData) {
data.countSatisfied += src.countSatisfied
data.totalTolerated += src.totalTolerated
data.exclusiveFailed += src.exclusiveFailed
if src.min < data.min {
data.min = src.min
}
if src.max > data.max {
data.max = src.max
}
data.sumSquares += src.sumSquares
}
func (mt *metricTable) mergeMetric(id metricID, m metric) {
if to := mt.metrics[id]; nil != to {
to.data.aggregate(m.data)
return
}
if mt.full() && (unforced == m.forced) {
mt.numDropped++
return
}
// NOTE: `new` is used in place of `&m` since the latter will make `m`
// get heap allocated regardless of whether or not this line gets
// reached (running go version go1.5 darwin/amd64). See
// BenchmarkAddingSameMetrics.
alloc := new(metric)
*alloc = m
mt.metrics[id] = alloc
}
func (mt *metricTable) mergeFailed(from *metricTable) {
fails := from.failedHarvests + 1
if fails >= failedMetricAttemptsLimit {
return
}
if from.metricPeriodStart.Before(mt.metricPeriodStart) {
mt.metricPeriodStart = from.metricPeriodStart
}
mt.failedHarvests = fails
mt.merge(from, "")
}
func (mt *metricTable) merge(from *metricTable, newScope string) {
if "" == newScope {
for id, m := range from.metrics {
mt.mergeMetric(id, *m)
}
} else {
for id, m := range from.metrics {
mt.mergeMetric(metricID{Name: id.Name, Scope: newScope}, *m)
}
}
}
func (mt *metricTable) add(name, scope string, data metricData, force metricForce) {
mt.mergeMetric(metricID{Name: name, Scope: scope}, metric{data: data, forced: force})
}
func (mt *metricTable) addCount(name string, count float64, force metricForce) {
mt.add(name, "", metricData{countSatisfied: count}, force)
}
func (mt *metricTable) addSingleCount(name string, force metricForce) {
mt.addCount(name, float64(1), force)
}
func (mt *metricTable) addDuration(name, scope string, duration, exclusive time.Duration, force metricForce) {
mt.add(name, scope, metricDataFromDuration(duration, exclusive), force)
}
func (mt *metricTable) addValueExclusive(name, scope string, total, exclusive float64, force metricForce) {
data := metricData{
countSatisfied: 1,
totalTolerated: total,
exclusiveFailed: exclusive,
min: total,
max: total,
sumSquares: total * total,
}
mt.add(name, scope, data, force)
}
func (mt *metricTable) addValue(name, scope string, total float64, force metricForce) {
mt.addValueExclusive(name, scope, total, total, force)
}
func (mt *metricTable) addApdex(name, scope string, apdexThreshold time.Duration, zone ApdexZone, force metricForce) {
apdexSeconds := apdexThreshold.Seconds()
data := metricData{min: apdexSeconds, max: apdexSeconds}
switch zone {
case ApdexSatisfying:
data.countSatisfied = 1
case ApdexTolerating:
data.totalTolerated = 1
case ApdexFailing:
data.exclusiveFailed = 1
}
mt.add(name, scope, data, force)
}
func (mt *metricTable) CollectorJSON(agentRunID string, now time.Time) ([]byte, error) {
if 0 == len(mt.metrics) {
return nil, nil
}
estimatedBytesPerMetric := 128
estimatedLen := len(mt.metrics) * estimatedBytesPerMetric
buf := bytes.NewBuffer(make([]byte, 0, estimatedLen))
buf.WriteByte('[')
jsonx.AppendString(buf, agentRunID)
buf.WriteByte(',')
jsonx.AppendInt(buf, mt.metricPeriodStart.Unix())
buf.WriteByte(',')
jsonx.AppendInt(buf, now.Unix())
buf.WriteByte(',')
buf.WriteByte('[')
first := true
for id, metric := range mt.metrics {
if first {
first = false
} else {
buf.WriteByte(',')
}
buf.WriteByte('[')
buf.WriteByte('{')
buf.WriteString(`"name":`)
jsonx.AppendString(buf, id.Name)
if id.Scope != "" {
buf.WriteString(`,"scope":`)
jsonx.AppendString(buf, id.Scope)
}
buf.WriteByte('}')
buf.WriteByte(',')
jsonx.AppendFloatArray(buf,
metric.data.countSatisfied,
metric.data.totalTolerated,
metric.data.exclusiveFailed,
metric.data.min,
metric.data.max,
metric.data.sumSquares)
buf.WriteByte(']')
}
buf.WriteByte(']')
buf.WriteByte(']')
return buf.Bytes(), nil
}
func (mt *metricTable) Data(agentRunID string, harvestStart time.Time) ([]byte, error) {
return mt.CollectorJSON(agentRunID, harvestStart)
}
func (mt *metricTable) MergeIntoHarvest(h *Harvest) {
h.Metrics.mergeFailed(mt)
}
func (mt *metricTable) ApplyRules(rules metricRules) *metricTable {
if nil == rules {
return mt
}
if len(rules) == 0 {
return mt
}
applied := newMetricTable(mt.maxTableSize, mt.metricPeriodStart)
cache := make(map[string]string)
for id, m := range mt.metrics {
out, ok := cache[id.Name]
if !ok {
out = rules.Apply(id.Name)
cache[id.Name] = out
}
if "" != out {
applied.mergeMetric(metricID{Name: out, Scope: id.Scope}, *m)
}
}
return applied
}

View File

@ -0,0 +1,72 @@
package internal
import (
"net/http"
"strconv"
"strings"
"time"
)
const (
xRequestStart = "X-Request-Start"
xQueueStart = "X-Queue-Start"
)
var (
earliestAcceptableSeconds = time.Date(2000, time.January, 1, 0, 0, 0, 0, time.UTC).Unix()
latestAcceptableSeconds = time.Date(2050, time.January, 1, 0, 0, 0, 0, time.UTC).Unix()
)
func checkQueueTimeSeconds(secondsFloat float64) time.Time {
seconds := int64(secondsFloat)
nanos := int64((secondsFloat - float64(seconds)) * (1000.0 * 1000.0 * 1000.0))
if seconds > earliestAcceptableSeconds && seconds < latestAcceptableSeconds {
return time.Unix(seconds, nanos)
}
return time.Time{}
}
func parseQueueTime(s string) time.Time {
f, err := strconv.ParseFloat(s, 64)
if nil != err {
return time.Time{}
}
if f <= 0 {
return time.Time{}
}
// try microseconds
if t := checkQueueTimeSeconds(f / (1000.0 * 1000.0)); !t.IsZero() {
return t
}
// try milliseconds
if t := checkQueueTimeSeconds(f / (1000.0)); !t.IsZero() {
return t
}
// try seconds
if t := checkQueueTimeSeconds(f); !t.IsZero() {
return t
}
return time.Time{}
}
// QueueDuration TODO
func QueueDuration(hdr http.Header, txnStart time.Time) time.Duration {
s := hdr.Get(xQueueStart)
if "" == s {
s = hdr.Get(xRequestStart)
}
if "" == s {
return 0
}
s = strings.TrimPrefix(s, "t=")
qt := parseQueueTime(s)
if qt.IsZero() {
return 0
}
if qt.After(txnStart) {
return 0
}
return txnStart.Sub(qt)
}

145
vendor/github.com/newrelic/go-agent/internal/sampler.go generated vendored Normal file
View File

@ -0,0 +1,145 @@
package internal
import (
"runtime"
"time"
"github.com/newrelic/go-agent/internal/logger"
"github.com/newrelic/go-agent/internal/sysinfo"
)
// Sample is a system/runtime snapshot.
type Sample struct {
when time.Time
memStats runtime.MemStats
usage sysinfo.Usage
numGoroutine int
numCPU int
}
func bytesToMebibytesFloat(bts uint64) float64 {
return float64(bts) / (1024 * 1024)
}
// GetSample gathers a new Sample.
func GetSample(now time.Time, lg logger.Logger) *Sample {
s := Sample{
when: now,
numGoroutine: runtime.NumGoroutine(),
numCPU: runtime.NumCPU(),
}
if usage, err := sysinfo.GetUsage(); err == nil {
s.usage = usage
} else {
lg.Warn("unable to usage", map[string]interface{}{
"error": err.Error(),
})
}
runtime.ReadMemStats(&s.memStats)
return &s
}
type cpuStats struct {
used time.Duration
fraction float64 // used / (elapsed * numCPU)
}
// Stats contains system information for a period of time.
type Stats struct {
numGoroutine int
allocBytes uint64
heapObjects uint64
user cpuStats
system cpuStats
gcPauseFraction float64
deltaNumGC uint32
deltaPauseTotal time.Duration
minPause time.Duration
maxPause time.Duration
}
// Samples is used as the parameter to GetStats to avoid mixing up the previous
// and current sample.
type Samples struct {
Previous *Sample
Current *Sample
}
// GetStats combines two Samples into a Stats.
func GetStats(ss Samples) Stats {
cur := ss.Current
prev := ss.Previous
elapsed := cur.when.Sub(prev.when)
s := Stats{
numGoroutine: cur.numGoroutine,
allocBytes: cur.memStats.Alloc,
heapObjects: cur.memStats.HeapObjects,
}
// CPU Utilization
totalCPUSeconds := elapsed.Seconds() * float64(cur.numCPU)
if prev.usage.User != 0 && cur.usage.User > prev.usage.User {
s.user.used = cur.usage.User - prev.usage.User
s.user.fraction = s.user.used.Seconds() / totalCPUSeconds
}
if prev.usage.System != 0 && cur.usage.System > prev.usage.System {
s.system.used = cur.usage.System - prev.usage.System
s.system.fraction = s.system.used.Seconds() / totalCPUSeconds
}
// GC Pause Fraction
deltaPauseTotalNs := cur.memStats.PauseTotalNs - prev.memStats.PauseTotalNs
frac := float64(deltaPauseTotalNs) / float64(elapsed.Nanoseconds())
s.gcPauseFraction = frac
// GC Pauses
if deltaNumGC := cur.memStats.NumGC - prev.memStats.NumGC; deltaNumGC > 0 {
// In case more than 256 pauses have happened between samples
// and we are examining a subset of the pauses, we ensure that
// the min and max are not on the same side of the average by
// using the average as the starting min and max.
maxPauseNs := deltaPauseTotalNs / uint64(deltaNumGC)
minPauseNs := deltaPauseTotalNs / uint64(deltaNumGC)
for i := prev.memStats.NumGC + 1; i <= cur.memStats.NumGC; i++ {
pause := cur.memStats.PauseNs[(i+255)%256]
if pause > maxPauseNs {
maxPauseNs = pause
}
if pause < minPauseNs {
minPauseNs = pause
}
}
s.deltaPauseTotal = time.Duration(deltaPauseTotalNs) * time.Nanosecond
s.deltaNumGC = deltaNumGC
s.minPause = time.Duration(minPauseNs) * time.Nanosecond
s.maxPause = time.Duration(maxPauseNs) * time.Nanosecond
}
return s
}
// MergeIntoHarvest implements Harvestable.
func (s Stats) MergeIntoHarvest(h *Harvest) {
h.Metrics.addValue(heapObjectsAllocated, "", float64(s.heapObjects), forced)
h.Metrics.addValue(runGoroutine, "", float64(s.numGoroutine), forced)
h.Metrics.addValueExclusive(memoryPhysical, "", bytesToMebibytesFloat(s.allocBytes), 0, forced)
h.Metrics.addValueExclusive(cpuUserUtilization, "", s.user.fraction, 0, forced)
h.Metrics.addValueExclusive(cpuSystemUtilization, "", s.system.fraction, 0, forced)
h.Metrics.addValue(cpuUserTime, "", s.user.used.Seconds(), forced)
h.Metrics.addValue(cpuSystemTime, "", s.system.used.Seconds(), forced)
h.Metrics.addValueExclusive(gcPauseFraction, "", s.gcPauseFraction, 0, forced)
if s.deltaNumGC > 0 {
h.Metrics.add(gcPauses, "", metricData{
countSatisfied: float64(s.deltaNumGC),
totalTolerated: s.deltaPauseTotal.Seconds(),
exclusiveFailed: 0,
min: s.minPause.Seconds(),
max: s.maxPause.Seconds(),
sumSquares: s.deltaPauseTotal.Seconds() * s.deltaPauseTotal.Seconds(),
}, forced)
}
}

View File

@ -0,0 +1,145 @@
package internal
// https://newrelic.atlassian.net/wiki/display/eng/Language+agent+transaction+segment+terms+rules
import (
"encoding/json"
"strings"
)
const (
placeholder = "*"
separator = "/"
)
type segmentRule struct {
Prefix string `json:"prefix"`
Terms []string `json:"terms"`
TermsMap map[string]struct{}
}
// segmentRules is keyed by each segmentRule's Prefix field with any trailing
// slash removed.
type segmentRules map[string]*segmentRule
func buildTermsMap(terms []string) map[string]struct{} {
m := make(map[string]struct{}, len(terms))
for _, t := range terms {
m[t] = struct{}{}
}
return m
}
func (rules *segmentRules) UnmarshalJSON(b []byte) error {
var raw []*segmentRule
if err := json.Unmarshal(b, &raw); nil != err {
return err
}
rs := make(map[string]*segmentRule)
for _, rule := range raw {
prefix := strings.TrimSuffix(rule.Prefix, "/")
if len(strings.Split(prefix, "/")) != 2 {
// TODO
// Warn("invalid segment term rule prefix",
// {"prefix": rule.Prefix})
continue
}
if nil == rule.Terms {
// TODO
// Warn("segment term rule has missing terms",
// {"prefix": rule.Prefix})
continue
}
rule.TermsMap = buildTermsMap(rule.Terms)
rs[prefix] = rule
}
*rules = rs
return nil
}
func (rule *segmentRule) apply(name string) string {
if !strings.HasPrefix(name, rule.Prefix) {
return name
}
s := strings.TrimPrefix(name, rule.Prefix)
leadingSlash := ""
if strings.HasPrefix(s, separator) {
leadingSlash = separator
s = strings.TrimPrefix(s, separator)
}
if "" != s {
segments := strings.Split(s, separator)
for i, segment := range segments {
_, whitelisted := rule.TermsMap[segment]
if whitelisted {
segments[i] = segment
} else {
segments[i] = placeholder
}
}
segments = collapsePlaceholders(segments)
s = strings.Join(segments, separator)
}
return rule.Prefix + leadingSlash + s
}
func (rules segmentRules) apply(name string) string {
if nil == rules {
return name
}
rule, ok := rules[firstTwoSegments(name)]
if !ok {
return name
}
return rule.apply(name)
}
func firstTwoSegments(name string) string {
firstSlashIdx := strings.Index(name, separator)
if firstSlashIdx == -1 {
return name
}
secondSlashIdx := strings.Index(name[firstSlashIdx+1:], separator)
if secondSlashIdx == -1 {
return name
}
return name[0 : firstSlashIdx+secondSlashIdx+1]
}
func collapsePlaceholders(segments []string) []string {
j := 0
prevStar := false
for i := 0; i < len(segments); i++ {
segment := segments[i]
if placeholder == segment {
if prevStar {
continue
}
segments[j] = placeholder
j++
prevStar = true
} else {
segments[j] = segment
j++
prevStar = false
}
}
return segments[0:j]
}

View File

@ -0,0 +1,254 @@
package internal
import (
"bytes"
"container/heap"
"hash/fnv"
"time"
"github.com/newrelic/go-agent/internal/jsonx"
)
type queryParameters map[string]interface{}
func vetQueryParameters(params map[string]interface{}) queryParameters {
if nil == params {
return nil
}
// Copying the parameters into a new map is safer than modifying the map
// from the customer.
vetted := make(map[string]interface{})
for key, val := range params {
if err := validAttributeKey(key); nil != err {
continue
}
val = truncateStringValueIfLongInterface(val)
if err := valueIsValid(val); nil != err {
continue
}
vetted[key] = val
}
return queryParameters(vetted)
}
func (q queryParameters) WriteJSON(buf *bytes.Buffer) {
buf.WriteByte('{')
w := jsonFieldsWriter{buf: buf}
for key, val := range q {
writeAttributeValueJSON(&w, key, val)
}
buf.WriteByte('}')
}
// https://source.datanerd.us/agents/agent-specs/blob/master/Slow-SQLs-LEGACY.md
// slowQueryInstance represents a single datastore call.
type slowQueryInstance struct {
// Fields populated right after the datastore segment finishes:
Duration time.Duration
DatastoreMetric string
ParameterizedQuery string
QueryParameters queryParameters
Host string
PortPathOrID string
DatabaseName string
StackTrace *StackTrace
// Fields populated when merging into the harvest:
TxnName string
TxnURL string
}
// Aggregation is performed to avoid reporting multiple slow queries with same
// query string. Since some datastore segments may be below the slow query
// threshold, the aggregation fields Count, Total, and Min should be taken with
// a grain of salt.
type slowQuery struct {
Count int32 // number of times the query has been observed
Total time.Duration // cummulative duration
Min time.Duration // minimum observed duration
// When Count > 1, slowQueryInstance contains values from the slowest
// observation.
slowQueryInstance
}
type slowQueries struct {
priorityQueue []*slowQuery
// lookup maps query strings to indices in the priorityQueue
lookup map[string]int
}
func (slows *slowQueries) Len() int {
return len(slows.priorityQueue)
}
func (slows *slowQueries) Less(i, j int) bool {
pq := slows.priorityQueue
return pq[i].Duration < pq[j].Duration
}
func (slows *slowQueries) Swap(i, j int) {
pq := slows.priorityQueue
si := pq[i]
sj := pq[j]
pq[i], pq[j] = pq[j], pq[i]
slows.lookup[si.ParameterizedQuery] = j
slows.lookup[sj.ParameterizedQuery] = i
}
// Push and Pop are unused: only heap.Init and heap.Fix are used.
func (slows *slowQueries) Push(x interface{}) {}
func (slows *slowQueries) Pop() interface{} { return nil }
func newSlowQueries(max int) *slowQueries {
return &slowQueries{
lookup: make(map[string]int, max),
priorityQueue: make([]*slowQuery, 0, max),
}
}
// Merge is used to merge slow queries from the transaction into the harvest.
func (slows *slowQueries) Merge(other *slowQueries, txnName, txnURL string) {
for _, s := range other.priorityQueue {
cp := *s
cp.TxnName = txnName
cp.TxnURL = txnURL
slows.observe(cp)
}
}
// merge aggregates the observations from two slow queries with the same Query.
func (slow *slowQuery) merge(other slowQuery) {
slow.Count += other.Count
slow.Total += other.Total
if other.Min < slow.Min {
slow.Min = other.Min
}
if other.Duration > slow.Duration {
slow.slowQueryInstance = other.slowQueryInstance
}
}
func (slows *slowQueries) observeInstance(slow slowQueryInstance) {
slows.observe(slowQuery{
Count: 1,
Total: slow.Duration,
Min: slow.Duration,
slowQueryInstance: slow,
})
}
func (slows *slowQueries) insertAtIndex(slow slowQuery, idx int) {
cpy := new(slowQuery)
*cpy = slow
slows.priorityQueue[idx] = cpy
slows.lookup[slow.ParameterizedQuery] = idx
heap.Fix(slows, idx)
}
func (slows *slowQueries) observe(slow slowQuery) {
// Has the query has previously been observed?
if idx, ok := slows.lookup[slow.ParameterizedQuery]; ok {
slows.priorityQueue[idx].merge(slow)
heap.Fix(slows, idx)
return
}
// Has the collection reached max capacity?
if len(slows.priorityQueue) < cap(slows.priorityQueue) {
idx := len(slows.priorityQueue)
slows.priorityQueue = slows.priorityQueue[0 : idx+1]
slows.insertAtIndex(slow, idx)
return
}
// Is this query slower than the existing fastest?
fastest := slows.priorityQueue[0]
if slow.Duration > fastest.Duration {
delete(slows.lookup, fastest.ParameterizedQuery)
slows.insertAtIndex(slow, 0)
return
}
}
// The third element of the slow query JSON should be a hash of the query
// string. This hash may be used by backend services to aggregate queries which
// have the have the same query string. It is unknown if this actually used.
func makeSlowQueryID(query string) uint32 {
h := fnv.New32a()
h.Write([]byte(query))
return h.Sum32()
}
func (slow *slowQuery) WriteJSON(buf *bytes.Buffer) {
buf.WriteByte('[')
jsonx.AppendString(buf, slow.TxnName)
buf.WriteByte(',')
jsonx.AppendString(buf, slow.TxnURL)
buf.WriteByte(',')
jsonx.AppendInt(buf, int64(makeSlowQueryID(slow.ParameterizedQuery)))
buf.WriteByte(',')
jsonx.AppendString(buf, slow.ParameterizedQuery)
buf.WriteByte(',')
jsonx.AppendString(buf, slow.DatastoreMetric)
buf.WriteByte(',')
jsonx.AppendInt(buf, int64(slow.Count))
buf.WriteByte(',')
jsonx.AppendFloat(buf, slow.Total.Seconds()*1000.0)
buf.WriteByte(',')
jsonx.AppendFloat(buf, slow.Min.Seconds()*1000.0)
buf.WriteByte(',')
jsonx.AppendFloat(buf, slow.Duration.Seconds()*1000.0)
buf.WriteByte(',')
w := jsonFieldsWriter{buf: buf}
buf.WriteByte('{')
if "" != slow.Host {
w.stringField("host", slow.Host)
}
if "" != slow.PortPathOrID {
w.stringField("port_path_or_id", slow.PortPathOrID)
}
if "" != slow.DatabaseName {
w.stringField("database_name", slow.DatabaseName)
}
if nil != slow.StackTrace {
w.writerField("backtrace", slow.StackTrace)
}
if nil != slow.QueryParameters {
w.writerField("query_parameters", slow.QueryParameters)
}
buf.WriteByte('}')
buf.WriteByte(']')
}
// WriteJSON marshals the collection of slow queries into JSON according to the
// schema expected by the collector.
//
// Note: This JSON does not contain the agentRunID. This is for unknown
// historical reasons. Since the agentRunID is included in the url,
// its use in the other commands' JSON is redundant (although required).
func (slows *slowQueries) WriteJSON(buf *bytes.Buffer) {
buf.WriteByte('[')
buf.WriteByte('[')
for idx, s := range slows.priorityQueue {
if idx > 0 {
buf.WriteByte(',')
}
s.WriteJSON(buf)
}
buf.WriteByte(']')
buf.WriteByte(']')
}
func (slows *slowQueries) Data(agentRunID string, harvestStart time.Time) ([]byte, error) {
if 0 == len(slows.priorityQueue) {
return nil, nil
}
estimate := 1024 * len(slows.priorityQueue)
buf := bytes.NewBuffer(make([]byte, 0, estimate))
slows.WriteJSON(buf)
return buf.Bytes(), nil
}
func (slows *slowQueries) MergeIntoHarvest(newHarvest *Harvest) {
}

View File

@ -0,0 +1,82 @@
package internal
import (
"bytes"
"path"
"runtime"
)
// StackTrace is a stack trace.
type StackTrace struct {
callers []uintptr
written int
}
// GetStackTrace returns a new StackTrace.
func GetStackTrace(skipFrames int) *StackTrace {
st := &StackTrace{}
skip := 2 // skips runtime.Callers and this function
skip += skipFrames
st.callers = make([]uintptr, maxStackTraceFrames)
st.written = runtime.Callers(skip, st.callers)
st.callers = st.callers[0:st.written]
return st
}
func pcToFunc(pc uintptr) (*runtime.Func, uintptr) {
// The Golang runtime package documentation says "To look up the file
// and line number of the call itself, use pc[i]-1. As an exception to
// this rule, if pc[i-1] corresponds to the function runtime.sigpanic,
// then pc[i] is the program counter of a faulting instruction and
// should be used without any subtraction."
//
// TODO: Fully understand when this subtraction is necessary.
place := pc - 1
return runtime.FuncForPC(place), place
}
func topCallerNameBase(st *StackTrace) string {
f, _ := pcToFunc(st.callers[0])
if nil == f {
return ""
}
return path.Base(f.Name())
}
// WriteJSON adds the stack trace to the buffer in the JSON form expected by the
// collector.
func (st *StackTrace) WriteJSON(buf *bytes.Buffer) {
buf.WriteByte('[')
for i, pc := range st.callers {
if i > 0 {
buf.WriteByte(',')
}
// Implements the format documented here:
// https://source.datanerd.us/agents/agent-specs/blob/master/Stack-Traces.md
buf.WriteByte('{')
if f, place := pcToFunc(pc); nil != f {
name := path.Base(f.Name())
file, line := f.FileLine(place)
w := jsonFieldsWriter{buf: buf}
w.stringField("filepath", file)
w.stringField("name", name)
w.intField("line", int64(line))
}
buf.WriteByte('}')
}
buf.WriteByte(']')
}
// MarshalJSON prepares JSON in the format expected by the collector.
func (st *StackTrace) MarshalJSON() ([]byte, error) {
estimate := 256 * len(st.callers)
buf := bytes.NewBuffer(make([]byte, 0, estimate))
st.WriteJSON(buf)
return buf.Bytes(), nil
}

View File

@ -0,0 +1,124 @@
package sysinfo
import (
"bufio"
"bytes"
"errors"
"fmt"
"io"
"os"
"regexp"
"runtime"
)
var (
// ErrDockerUnsupported is returned if Docker is not supported on the
// platform.
ErrDockerUnsupported = errors.New("Docker unsupported on this platform")
// ErrDockerNotFound is returned if a Docker ID is not found in
// /proc/self/cgroup
ErrDockerNotFound = errors.New("Docker ID not found")
)
// DockerID attempts to detect Docker.
func DockerID() (string, error) {
if "linux" != runtime.GOOS {
return "", ErrDockerUnsupported
}
f, err := os.Open("/proc/self/cgroup")
if err != nil {
return "", err
}
defer f.Close()
return parseDockerID(f)
}
var (
dockerIDLength = 64
dockerIDRegexRaw = fmt.Sprintf("^[0-9a-f]{%d}$", dockerIDLength)
dockerIDRegex = regexp.MustCompile(dockerIDRegexRaw)
)
func parseDockerID(r io.Reader) (string, error) {
// Each line in the cgroup file consists of three colon delimited fields.
// 1. hierarchy ID - we don't care about this
// 2. subsystems - comma separated list of cgroup subsystem names
// 3. control group - control group to which the process belongs
//
// Example
// 5:cpuacct,cpu,cpuset:/daemons
for scanner := bufio.NewScanner(r); scanner.Scan(); {
line := scanner.Bytes()
cols := bytes.SplitN(line, []byte(":"), 3)
if len(cols) < 3 {
continue
}
// We're only interested in the cpu subsystem.
if !isCPUCol(cols[1]) {
continue
}
// We're only interested in Docker generated cgroups.
// Reference Implementation:
// case cpu_cgroup
// # docker native driver w/out systemd (fs)
// when %r{^/docker/([0-9a-f]+)$} then $1
// # docker native driver with systemd
// when %r{^/system\.slice/docker-([0-9a-f]+)\.scope$} then $1
// # docker lxc driver
// when %r{^/lxc/([0-9a-f]+)$} then $1
//
var id string
if bytes.HasPrefix(cols[2], []byte("/docker/")) {
id = string(cols[2][len("/docker/"):])
} else if bytes.HasPrefix(cols[2], []byte("/lxc/")) {
id = string(cols[2][len("/lxc/"):])
} else if bytes.HasPrefix(cols[2], []byte("/system.slice/docker-")) &&
bytes.HasSuffix(cols[2], []byte(".scope")) {
id = string(cols[2][len("/system.slice/docker-") : len(cols[2])-len(".scope")])
} else {
continue
}
if err := validateDockerID(id); err != nil {
// We can stop searching at this point, the CPU
// subsystem should only occur once, and its cgroup is
// not docker or not a format we accept.
return "", err
}
return id, nil
}
return "", ErrDockerNotFound
}
func isCPUCol(col []byte) bool {
// Sometimes we have multiple subsystems in one line, as in this example
// from:
// https://source.datanerd.us/newrelic/cross_agent_tests/blob/master/docker_container_id/docker-1.1.2-native-driver-systemd.txt
//
// 3:cpuacct,cpu:/system.slice/docker-67f98c9e6188f9c1818672a15dbe46237b6ee7e77f834d40d41c5fb3c2f84a2f.scope
splitCSV := func(r rune) bool { return r == ',' }
subsysCPU := []byte("cpu")
for _, subsys := range bytes.FieldsFunc(col, splitCSV) {
if bytes.Equal(subsysCPU, subsys) {
return true
}
}
return false
}
func validateDockerID(id string) error {
if !dockerIDRegex.MatchString(id) {
return fmt.Errorf("%s does not match %s",
id, dockerIDRegexRaw)
}
return nil
}

View File

@ -0,0 +1,10 @@
// +build !linux
package sysinfo
import "os"
// Hostname returns the host name.
func Hostname() (string, error) {
return os.Hostname()
}

View File

@ -0,0 +1,50 @@
package sysinfo
import (
"os"
"syscall"
)
// Hostname returns the host name.
func Hostname() (string, error) {
// Try the builtin API first, which is designed to match the output of
// /bin/hostname, and fallback to uname(2) if that fails to match the
// behavior of gethostname(2) as implemented by glibc. On Linux, all
// these method should result in the same value because sethostname(2)
// limits the hostname to 64 bytes, the same size of the nodename field
// returned by uname(2). Note that is correspondence is not true on
// other platforms.
//
// os.Hostname failures should be exceedingly rare, however some systems
// configure SELinux to deny read access to /proc/sys/kernel/hostname.
// Redhat's OpenShift platform for example. os.Hostname can also fail if
// some or all of /proc has been hidden via chroot(2) or manipulation of
// the current processes' filesystem namespace via the cgroups APIs.
// Docker is an example of a tool that can configure such an
// environment.
name, err := os.Hostname()
if err == nil {
return name, nil
}
var uts syscall.Utsname
if err2 := syscall.Uname(&uts); err2 != nil {
// The man page documents only one possible error for uname(2),
// suggesting that as long as the buffer given is valid, the
// call will never fail. Return the original error in the hope
// it provides more relevant information about why the hostname
// can't be retrieved.
return "", err
}
// Convert Nodename to a Go string.
buf := make([]byte, 0, len(uts.Nodename))
for _, c := range uts.Nodename {
if c == 0 {
break
}
buf = append(buf, byte(c))
}
return string(buf), nil
}

View File

@ -0,0 +1,40 @@
package sysinfo
import (
"bufio"
"errors"
"io"
"regexp"
"strconv"
)
// BytesToMebibytes converts bytes into mebibytes.
func BytesToMebibytes(bts uint64) uint64 {
return bts / ((uint64)(1024 * 1024))
}
var (
meminfoRe = regexp.MustCompile(`^MemTotal:\s+([0-9]+)\s+[kK]B$`)
errMemTotalNotFound = errors.New("supported MemTotal not found in /proc/meminfo")
)
// parseProcMeminfo is used to parse Linux's "/proc/meminfo". It is located
// here so that the relevant cross agent tests will be run on all platforms.
func parseProcMeminfo(f io.Reader) (uint64, error) {
scanner := bufio.NewScanner(f)
for scanner.Scan() {
if m := meminfoRe.FindSubmatch(scanner.Bytes()); m != nil {
kb, err := strconv.ParseUint(string(m[1]), 10, 64)
if err != nil {
return 0, err
}
return kb * 1024, nil
}
}
err := scanner.Err()
if err == nil {
err = errMemTotalNotFound
}
return 0, err
}

View File

@ -0,0 +1,29 @@
package sysinfo
import (
"syscall"
"unsafe"
)
// PhysicalMemoryBytes returns the total amount of host memory.
func PhysicalMemoryBytes() (uint64, error) {
mib := []int32{6 /* CTL_HW */, 24 /* HW_MEMSIZE */}
buf := make([]byte, 8)
bufLen := uintptr(8)
_, _, e1 := syscall.Syscall6(syscall.SYS___SYSCTL,
uintptr(unsafe.Pointer(&mib[0])), uintptr(len(mib)),
uintptr(unsafe.Pointer(&buf[0])), uintptr(unsafe.Pointer(&bufLen)),
uintptr(0), uintptr(0))
if e1 != 0 {
return 0, e1
}
if bufLen != 8 {
return 0, syscall.EIO
}
return *(*uint64)(unsafe.Pointer(&buf[0])), nil
}

View File

@ -0,0 +1,32 @@
package sysinfo
import (
"syscall"
"unsafe"
)
// PhysicalMemoryBytes returns the total amount of host memory.
func PhysicalMemoryBytes() (uint64, error) {
mib := []int32{6 /* CTL_HW */, 5 /* HW_PHYSMEM */}
buf := make([]byte, 8)
bufLen := uintptr(8)
_, _, e1 := syscall.Syscall6(syscall.SYS___SYSCTL,
uintptr(unsafe.Pointer(&mib[0])), uintptr(len(mib)),
uintptr(unsafe.Pointer(&buf[0])), uintptr(unsafe.Pointer(&bufLen)),
uintptr(0), uintptr(0))
if e1 != 0 {
return 0, e1
}
switch bufLen {
case 4:
return uint64(*(*uint32)(unsafe.Pointer(&buf[0]))), nil
case 8:
return *(*uint64)(unsafe.Pointer(&buf[0])), nil
default:
return 0, syscall.EIO
}
}

View File

@ -0,0 +1,14 @@
package sysinfo
import "os"
// PhysicalMemoryBytes returns the total amount of host memory.
func PhysicalMemoryBytes() (uint64, error) {
f, err := os.Open("/proc/meminfo")
if err != nil {
return 0, err
}
defer f.Close()
return parseProcMeminfo(f)
}

View File

@ -0,0 +1,26 @@
package sysinfo
/*
#include <unistd.h>
*/
import "C"
// PhysicalMemoryBytes returns the total amount of host memory.
func PhysicalMemoryBytes() (uint64, error) {
// The function we're calling on Solaris is
// long sysconf(int name);
var pages C.long
var pagesizeBytes C.long
var err error
pagesizeBytes, err = C.sysconf(C._SC_PAGE_SIZE)
if pagesizeBytes < 1 {
return 0, err
}
pages, err = C.sysconf(C._SC_PHYS_PAGES)
if pages < 1 {
return 0, err
}
return uint64(pages) * uint64(pagesizeBytes), nil
}

View File

@ -0,0 +1,23 @@
package sysinfo
import (
"syscall"
"unsafe"
)
// PhysicalMemoryBytes returns the total amount of host memory.
func PhysicalMemoryBytes() (uint64, error) {
// https://msdn.microsoft.com/en-us/library/windows/desktop/cc300158(v=vs.85).aspx
// http://stackoverflow.com/questions/30743070/query-total-physical-memory-in-windows-with-golang
mod := syscall.NewLazyDLL("kernel32.dll")
proc := mod.NewProc("GetPhysicallyInstalledSystemMemory")
var memkb uint64
ret, _, err := proc.Call(uintptr(unsafe.Pointer(&memkb)))
// return value TRUE(1) succeeds, FAILED(0) fails
if ret != 1 {
return 0, err
}
return memkb * 1024, nil
}

View File

@ -0,0 +1,11 @@
package sysinfo
import (
"time"
)
// Usage contains process process times.
type Usage struct {
System time.Duration
User time.Duration
}

View File

@ -0,0 +1,26 @@
// +build !windows
package sysinfo
import (
"syscall"
"time"
)
func timevalToDuration(tv syscall.Timeval) time.Duration {
return time.Duration(tv.Nano()) * time.Nanosecond
}
// GetUsage gathers process times.
func GetUsage() (Usage, error) {
ru := syscall.Rusage{}
err := syscall.Getrusage(syscall.RUSAGE_SELF, &ru)
if err != nil {
return Usage{}, err
}
return Usage{
System: timevalToDuration(ru.Stime),
User: timevalToDuration(ru.Utime),
}, nil
}

View File

@ -0,0 +1,34 @@
package sysinfo
import (
"syscall"
"time"
)
func filetimeToDuration(ft *syscall.Filetime) time.Duration {
ns := ft.Nanoseconds()
return time.Duration(ns)
}
// GetUsage gathers process times.
func GetUsage() (Usage, error) {
var creationTime syscall.Filetime
var exitTime syscall.Filetime
var kernelTime syscall.Filetime
var userTime syscall.Filetime
handle, err := syscall.GetCurrentProcess()
if err != nil {
return Usage{}, err
}
err = syscall.GetProcessTimes(handle, &creationTime, &exitTime, &kernelTime, &userTime)
if err != nil {
return Usage{}, err
}
return Usage{
System: filetimeToDuration(&kernelTime),
User: filetimeToDuration(&userTime),
}, nil
}

413
vendor/github.com/newrelic/go-agent/internal/tracing.go generated vendored Normal file
View File

@ -0,0 +1,413 @@
package internal
import (
"fmt"
"net/url"
"time"
"github.com/newrelic/go-agent/internal/sysinfo"
)
type segmentStamp uint64
type segmentTime struct {
Stamp segmentStamp
Time time.Time
}
// SegmentStartTime is embedded into the top level segments (rather than
// segmentTime) to minimize the structure sizes to minimize allocations.
type SegmentStartTime struct {
Stamp segmentStamp
Depth int
}
type segmentFrame struct {
segmentTime
children time.Duration
}
type segmentEnd struct {
valid bool
start segmentTime
stop segmentTime
duration time.Duration
exclusive time.Duration
}
// Tracer tracks segments.
type Tracer struct {
finishedChildren time.Duration
stamp segmentStamp
currentDepth int
stack []segmentFrame
customSegments map[string]*metricData
datastoreSegments map[DatastoreMetricKey]*metricData
externalSegments map[externalMetricKey]*metricData
DatastoreExternalTotals
TxnTrace
SlowQueriesEnabled bool
SlowQueryThreshold time.Duration
SlowQueries *slowQueries
}
const (
startingStackDepthAlloc = 128
datastoreProductUnknown = "Unknown"
datastoreOperationUnknown = "other"
)
func (t *Tracer) time(now time.Time) segmentTime {
// Update the stamp before using it so that a 0 stamp can be special.
t.stamp++
return segmentTime{
Time: now,
Stamp: t.stamp,
}
}
// TracerRootChildren is used to calculate a transaction's exclusive duration.
func TracerRootChildren(t *Tracer) time.Duration {
var lostChildren time.Duration
for i := 0; i < t.currentDepth; i++ {
lostChildren += t.stack[i].children
}
return t.finishedChildren + lostChildren
}
// StartSegment begins a segment.
func StartSegment(t *Tracer, now time.Time) SegmentStartTime {
if nil == t.stack {
t.stack = make([]segmentFrame, startingStackDepthAlloc)
}
if cap(t.stack) == t.currentDepth {
newLimit := 2 * t.currentDepth
newStack := make([]segmentFrame, newLimit)
copy(newStack, t.stack)
t.stack = newStack
}
tm := t.time(now)
depth := t.currentDepth
t.currentDepth++
t.stack[depth].children = 0
t.stack[depth].segmentTime = tm
return SegmentStartTime{
Stamp: tm.Stamp,
Depth: depth,
}
}
func endSegment(t *Tracer, start SegmentStartTime, now time.Time) segmentEnd {
var s segmentEnd
if 0 == start.Stamp {
return s
}
if start.Depth >= t.currentDepth {
return s
}
if start.Depth < 0 {
return s
}
if start.Stamp != t.stack[start.Depth].Stamp {
return s
}
var children time.Duration
for i := start.Depth; i < t.currentDepth; i++ {
children += t.stack[i].children
}
s.valid = true
s.stop = t.time(now)
s.start = t.stack[start.Depth].segmentTime
if s.stop.Time.After(s.start.Time) {
s.duration = s.stop.Time.Sub(s.start.Time)
}
if s.duration > children {
s.exclusive = s.duration - children
}
// Note that we expect (depth == (t.currentDepth - 1)). However, if
// (depth < (t.currentDepth - 1)), that's ok: could be a panic popped
// some stack frames (and the consumer was not using defer).
t.currentDepth = start.Depth
if 0 == t.currentDepth {
t.finishedChildren += s.duration
} else {
t.stack[t.currentDepth-1].children += s.duration
}
return s
}
// EndBasicSegment ends a basic segment.
func EndBasicSegment(t *Tracer, start SegmentStartTime, now time.Time, name string) {
end := endSegment(t, start, now)
if !end.valid {
return
}
if nil == t.customSegments {
t.customSegments = make(map[string]*metricData)
}
m := metricDataFromDuration(end.duration, end.exclusive)
if data, ok := t.customSegments[name]; ok {
data.aggregate(m)
} else {
// Use `new` in place of &m so that m is not
// automatically moved to the heap.
cpy := new(metricData)
*cpy = m
t.customSegments[name] = cpy
}
if t.TxnTrace.considerNode(end) {
t.TxnTrace.witnessNode(end, customSegmentMetric(name), nil)
}
}
// EndExternalSegment ends an external segment.
func EndExternalSegment(t *Tracer, start SegmentStartTime, now time.Time, u *url.URL) {
end := endSegment(t, start, now)
if !end.valid {
return
}
host := HostFromURL(u)
if "" == host {
host = "unknown"
}
key := externalMetricKey{
Host: host,
ExternalCrossProcessID: "",
ExternalTransactionName: "",
}
if nil == t.externalSegments {
t.externalSegments = make(map[externalMetricKey]*metricData)
}
t.externalCallCount++
t.externalDuration += end.duration
m := metricDataFromDuration(end.duration, end.exclusive)
if data, ok := t.externalSegments[key]; ok {
data.aggregate(m)
} else {
// Use `new` in place of &m so that m is not
// automatically moved to the heap.
cpy := new(metricData)
*cpy = m
t.externalSegments[key] = cpy
}
if t.TxnTrace.considerNode(end) {
t.TxnTrace.witnessNode(end, externalHostMetric(key), &traceNodeParams{
CleanURL: SafeURL(u),
})
}
}
// EndDatastoreParams contains the parameters for EndDatastoreSegment.
type EndDatastoreParams struct {
Tracer *Tracer
Start SegmentStartTime
Now time.Time
Product string
Collection string
Operation string
ParameterizedQuery string
QueryParameters map[string]interface{}
Host string
PortPathOrID string
Database string
}
const (
unknownDatastoreHost = "unknown"
unknownDatastorePortPathOrID = "unknown"
)
var (
// ThisHost is the system hostname.
ThisHost = func() string {
if h, err := sysinfo.Hostname(); nil == err {
return h
}
return unknownDatastoreHost
}()
hostsToReplace = map[string]struct{}{
"localhost": struct{}{},
"127.0.0.1": struct{}{},
"0.0.0.0": struct{}{},
"0:0:0:0:0:0:0:1": struct{}{},
"::1": struct{}{},
"0:0:0:0:0:0:0:0": struct{}{},
"::": struct{}{},
}
)
func (t Tracer) slowQueryWorthy(d time.Duration) bool {
return t.SlowQueriesEnabled && (d >= t.SlowQueryThreshold)
}
// EndDatastoreSegment ends a datastore segment.
func EndDatastoreSegment(p EndDatastoreParams) {
end := endSegment(p.Tracer, p.Start, p.Now)
if !end.valid {
return
}
if p.Operation == "" {
p.Operation = datastoreOperationUnknown
}
if p.Product == "" {
p.Product = datastoreProductUnknown
}
if p.Host == "" && p.PortPathOrID != "" {
p.Host = unknownDatastoreHost
}
if p.PortPathOrID == "" && p.Host != "" {
p.PortPathOrID = unknownDatastorePortPathOrID
}
if _, ok := hostsToReplace[p.Host]; ok {
p.Host = ThisHost
}
// We still want to create a slowQuery if the consumer has not provided
// a Query string since the stack trace has value.
if p.ParameterizedQuery == "" {
collection := p.Collection
if "" == collection {
collection = "unknown"
}
p.ParameterizedQuery = fmt.Sprintf(`'%s' on '%s' using '%s'`,
p.Operation, collection, p.Product)
}
key := DatastoreMetricKey{
Product: p.Product,
Collection: p.Collection,
Operation: p.Operation,
Host: p.Host,
PortPathOrID: p.PortPathOrID,
}
if nil == p.Tracer.datastoreSegments {
p.Tracer.datastoreSegments = make(map[DatastoreMetricKey]*metricData)
}
p.Tracer.datastoreCallCount++
p.Tracer.datastoreDuration += end.duration
m := metricDataFromDuration(end.duration, end.exclusive)
if data, ok := p.Tracer.datastoreSegments[key]; ok {
data.aggregate(m)
} else {
// Use `new` in place of &m so that m is not
// automatically moved to the heap.
cpy := new(metricData)
*cpy = m
p.Tracer.datastoreSegments[key] = cpy
}
scopedMetric := datastoreScopedMetric(key)
queryParams := vetQueryParameters(p.QueryParameters)
if p.Tracer.TxnTrace.considerNode(end) {
p.Tracer.TxnTrace.witnessNode(end, scopedMetric, &traceNodeParams{
Host: p.Host,
PortPathOrID: p.PortPathOrID,
Database: p.Database,
Query: p.ParameterizedQuery,
queryParameters: queryParams,
})
}
if p.Tracer.slowQueryWorthy(end.duration) {
if nil == p.Tracer.SlowQueries {
p.Tracer.SlowQueries = newSlowQueries(maxTxnSlowQueries)
}
// Frames to skip:
// this function
// endDatastore
// DatastoreSegment.End
skipFrames := 3
p.Tracer.SlowQueries.observeInstance(slowQueryInstance{
Duration: end.duration,
DatastoreMetric: scopedMetric,
ParameterizedQuery: p.ParameterizedQuery,
QueryParameters: queryParams,
Host: p.Host,
PortPathOrID: p.PortPathOrID,
DatabaseName: p.Database,
StackTrace: GetStackTrace(skipFrames),
})
}
}
// MergeBreakdownMetrics creates segment metrics.
func MergeBreakdownMetrics(t *Tracer, metrics *metricTable, scope string, isWeb bool) {
// Custom Segment Metrics
for key, data := range t.customSegments {
name := customSegmentMetric(key)
// Unscoped
metrics.add(name, "", *data, unforced)
// Scoped
metrics.add(name, scope, *data, unforced)
}
// External Segment Metrics
for key, data := range t.externalSegments {
metrics.add(externalAll, "", *data, forced)
if isWeb {
metrics.add(externalWeb, "", *data, forced)
} else {
metrics.add(externalOther, "", *data, forced)
}
hostMetric := externalHostMetric(key)
metrics.add(hostMetric, "", *data, unforced)
if "" != key.ExternalCrossProcessID && "" != key.ExternalTransactionName {
txnMetric := externalTransactionMetric(key)
// Unscoped CAT metrics
metrics.add(externalAppMetric(key), "", *data, unforced)
metrics.add(txnMetric, "", *data, unforced)
// Scoped External Metric
metrics.add(txnMetric, scope, *data, unforced)
} else {
// Scoped External Metric
metrics.add(hostMetric, scope, *data, unforced)
}
}
// Datastore Segment Metrics
for key, data := range t.datastoreSegments {
metrics.add(datastoreAll, "", *data, forced)
product := datastoreProductMetric(key)
metrics.add(product.All, "", *data, forced)
if isWeb {
metrics.add(datastoreWeb, "", *data, forced)
metrics.add(product.Web, "", *data, forced)
} else {
metrics.add(datastoreOther, "", *data, forced)
metrics.add(product.Other, "", *data, forced)
}
if key.Host != "" && key.PortPathOrID != "" {
instance := datastoreInstanceMetric(key)
metrics.add(instance, "", *data, unforced)
}
operation := datastoreOperationMetric(key)
metrics.add(operation, "", *data, unforced)
if "" != key.Collection {
statement := datastoreStatementMetric(key)
metrics.add(statement, "", *data, unforced)
metrics.add(statement, scope, *data, unforced)
} else {
metrics.add(operation, scope, *data, unforced)
}
}
}

View File

@ -0,0 +1,97 @@
package internal
import (
"bytes"
"math/rand"
"time"
)
// DatastoreExternalTotals contains overview of external and datastore calls
// made during a transaction.
type DatastoreExternalTotals struct {
externalCallCount uint64
externalDuration time.Duration
datastoreCallCount uint64
datastoreDuration time.Duration
}
// TxnEvent represents a transaction.
// https://source.datanerd.us/agents/agent-specs/blob/master/Transaction-Events-PORTED.md
// https://newrelic.atlassian.net/wiki/display/eng/Agent+Support+for+Synthetics%3A+Forced+Transaction+Traces+and+Analytic+Events
type TxnEvent struct {
Name string
Timestamp time.Time
Duration time.Duration
Queuing time.Duration
Zone ApdexZone
Attrs *Attributes
DatastoreExternalTotals
}
// WriteJSON prepares JSON in the format expected by the collector.
func (e *TxnEvent) WriteJSON(buf *bytes.Buffer) {
w := jsonFieldsWriter{buf: buf}
buf.WriteByte('[')
buf.WriteByte('{')
w.stringField("type", "Transaction")
w.stringField("name", e.Name)
w.floatField("timestamp", timeToFloatSeconds(e.Timestamp))
w.floatField("duration", e.Duration.Seconds())
if ApdexNone != e.Zone {
w.stringField("nr.apdexPerfZone", e.Zone.label())
}
if e.Queuing > 0 {
w.floatField("queueDuration", e.Queuing.Seconds())
}
if e.externalCallCount > 0 {
w.intField("externalCallCount", int64(e.externalCallCount))
w.floatField("externalDuration", e.externalDuration.Seconds())
}
if e.datastoreCallCount > 0 {
// Note that "database" is used for the keys here instead of
// "datastore" for historical reasons.
w.intField("databaseCallCount", int64(e.datastoreCallCount))
w.floatField("databaseDuration", e.datastoreDuration.Seconds())
}
buf.WriteByte('}')
buf.WriteByte(',')
userAttributesJSON(e.Attrs, buf, destTxnEvent)
buf.WriteByte(',')
agentAttributesJSON(e.Attrs, buf, destTxnEvent)
buf.WriteByte(']')
}
// MarshalJSON is used for testing.
func (e *TxnEvent) MarshalJSON() ([]byte, error) {
buf := bytes.NewBuffer(make([]byte, 0, 256))
e.WriteJSON(buf)
return buf.Bytes(), nil
}
type txnEvents struct {
events *analyticsEvents
}
func newTxnEvents(max int) *txnEvents {
return &txnEvents{
events: newAnalyticsEvents(max),
}
}
func (events *txnEvents) AddTxnEvent(e *TxnEvent) {
stamp := eventStamp(rand.Float32())
events.events.addEvent(analyticsEvent{stamp, e})
}
func (events *txnEvents) MergeIntoHarvest(h *Harvest) {
h.TxnEvents.events.mergeFailed(events.events)
}
func (events *txnEvents) Data(agentRunID string, harvestStart time.Time) ([]byte, error) {
return events.events.CollectorJSON(agentRunID)
}
func (events *txnEvents) numSeen() float64 { return events.events.NumSeen() }
func (events *txnEvents) numSaved() float64 { return events.events.NumSaved() }

View File

@ -0,0 +1,315 @@
package internal
import (
"bytes"
"container/heap"
"encoding/json"
"sort"
"time"
"github.com/newrelic/go-agent/internal/jsonx"
)
// See https://source.datanerd.us/agents/agent-specs/blob/master/Transaction-Trace-LEGACY.md
type traceNodeHeap []traceNode
// traceNodeParams is used for trace node parameters. A struct is used in place
// of a map[string]interface{} to facilitate testing and reduce JSON Marshal
// overhead. If too many fields get added here, it probably makes sense to
// start using a map. This struct is not embedded into traceNode to minimize
// the size of traceNode: Not all nodes will have parameters.
type traceNodeParams struct {
StackTrace *StackTrace
CleanURL string
Database string
Host string
PortPathOrID string
Query string
queryParameters queryParameters
}
func (p *traceNodeParams) WriteJSON(buf *bytes.Buffer) {
w := jsonFieldsWriter{buf: buf}
buf.WriteByte('{')
if nil != p.StackTrace {
w.writerField("backtrace", p.StackTrace)
}
if "" != p.CleanURL {
w.stringField("uri", p.CleanURL)
}
if "" != p.Database {
w.stringField("database_name", p.Database)
}
if "" != p.Host {
w.stringField("host", p.Host)
}
if "" != p.PortPathOrID {
w.stringField("port_path_or_id", p.PortPathOrID)
}
if "" != p.Query {
w.stringField("query", p.Query)
}
if nil != p.queryParameters {
w.writerField("query_parameters", p.queryParameters)
}
buf.WriteByte('}')
}
// MarshalJSON is used for testing.
func (p *traceNodeParams) MarshalJSON() ([]byte, error) {
buf := &bytes.Buffer{}
p.WriteJSON(buf)
return buf.Bytes(), nil
}
type traceNode struct {
start segmentTime
stop segmentTime
duration time.Duration
params *traceNodeParams
name string
}
func (h traceNodeHeap) Len() int { return len(h) }
func (h traceNodeHeap) Less(i, j int) bool { return h[i].duration < h[j].duration }
func (h traceNodeHeap) Swap(i, j int) { h[i], h[j] = h[j], h[i] }
// Push and Pop are unused: only heap.Init and heap.Fix are used.
func (h traceNodeHeap) Push(x interface{}) {}
func (h traceNodeHeap) Pop() interface{} { return nil }
// TxnTrace contains the work in progress transaction trace.
type TxnTrace struct {
Enabled bool
SegmentThreshold time.Duration
StackTraceThreshold time.Duration
nodes traceNodeHeap
maxNodes int
}
// considerNode exists to prevent unnecessary calls to witnessNode: constructing
// the metric name and params map requires allocations.
func (trace *TxnTrace) considerNode(end segmentEnd) bool {
return trace.Enabled && (end.duration >= trace.SegmentThreshold)
}
func (trace *TxnTrace) witnessNode(end segmentEnd, name string, params *traceNodeParams) {
node := traceNode{
start: end.start,
stop: end.stop,
duration: end.duration,
name: name,
params: params,
}
if !trace.considerNode(end) {
return
}
if trace.nodes == nil {
max := trace.maxNodes
if 0 == max {
max = maxTxnTraceNodes
}
trace.nodes = make(traceNodeHeap, 0, max)
}
if end.exclusive >= trace.StackTraceThreshold {
if node.params == nil {
p := new(traceNodeParams)
node.params = p
}
// skip the following stack frames:
// this method
// function in tracing.go (EndBasicSegment, EndExternalSegment, EndDatastoreSegment)
// function in internal_txn.go (endSegment, endExternal, endDatastore)
// segment end method
skip := 4
node.params.StackTrace = GetStackTrace(skip)
}
if len(trace.nodes) < cap(trace.nodes) {
trace.nodes = append(trace.nodes, node)
if len(trace.nodes) == cap(trace.nodes) {
heap.Init(trace.nodes)
}
return
}
if node.duration <= trace.nodes[0].duration {
return
}
trace.nodes[0] = node
heap.Fix(trace.nodes, 0)
}
// HarvestTrace contains a finished transaction trace ready for serialization to
// the collector.
type HarvestTrace struct {
Start time.Time
Duration time.Duration
MetricName string
CleanURL string
Trace TxnTrace
ForcePersist bool
GUID string
SyntheticsResourceID string
Attrs *Attributes
}
type nodeDetails struct {
name string
relativeStart time.Duration
relativeStop time.Duration
params *traceNodeParams
}
func printNodeStart(buf *bytes.Buffer, n nodeDetails) {
// time.Seconds() is intentionally not used here. Millisecond
// precision is enough.
relativeStartMillis := n.relativeStart.Nanoseconds() / (1000 * 1000)
relativeStopMillis := n.relativeStop.Nanoseconds() / (1000 * 1000)
buf.WriteByte('[')
jsonx.AppendInt(buf, relativeStartMillis)
buf.WriteByte(',')
jsonx.AppendInt(buf, relativeStopMillis)
buf.WriteByte(',')
jsonx.AppendString(buf, n.name)
buf.WriteByte(',')
if nil == n.params {
buf.WriteString("{}")
} else {
n.params.WriteJSON(buf)
}
buf.WriteByte(',')
buf.WriteByte('[')
}
func printChildren(buf *bytes.Buffer, traceStart time.Time, nodes sortedTraceNodes, next int, stop segmentStamp) int {
firstChild := true
for next < len(nodes) && nodes[next].start.Stamp < stop {
if firstChild {
firstChild = false
} else {
buf.WriteByte(',')
}
printNodeStart(buf, nodeDetails{
name: nodes[next].name,
relativeStart: nodes[next].start.Time.Sub(traceStart),
relativeStop: nodes[next].stop.Time.Sub(traceStart),
params: nodes[next].params,
})
next = printChildren(buf, traceStart, nodes, next+1, nodes[next].stop.Stamp)
buf.WriteString("]]")
}
return next
}
type sortedTraceNodes []*traceNode
func (s sortedTraceNodes) Len() int { return len(s) }
func (s sortedTraceNodes) Less(i, j int) bool { return s[i].start.Stamp < s[j].start.Stamp }
func (s sortedTraceNodes) Swap(i, j int) { s[i], s[j] = s[j], s[i] }
func traceDataJSON(trace *HarvestTrace) []byte {
estimate := 100 * len(trace.Trace.nodes)
buf := bytes.NewBuffer(make([]byte, 0, estimate))
nodes := make(sortedTraceNodes, len(trace.Trace.nodes))
for i := 0; i < len(nodes); i++ {
nodes[i] = &trace.Trace.nodes[i]
}
sort.Sort(nodes)
buf.WriteByte('[') // begin trace data
// If the trace string pool is used, insert another array here.
jsonx.AppendFloat(buf, 0.0) // unused timestamp
buf.WriteByte(',') //
buf.WriteString("{}") // unused: formerly request parameters
buf.WriteByte(',') //
buf.WriteString("{}") // unused: formerly custom parameters
buf.WriteByte(',') //
printNodeStart(buf, nodeDetails{ // begin outer root
name: "ROOT",
relativeStart: 0,
relativeStop: trace.Duration,
})
printNodeStart(buf, nodeDetails{ // begin inner root
name: trace.MetricName,
relativeStart: 0,
relativeStop: trace.Duration,
})
if len(nodes) > 0 {
lastStopStamp := nodes[len(nodes)-1].stop.Stamp + 1
printChildren(buf, trace.Start, nodes, 0, lastStopStamp)
}
buf.WriteString("]]") // end outer root
buf.WriteString("]]") // end inner root
buf.WriteByte(',')
buf.WriteByte('{')
buf.WriteString(`"agentAttributes":`)
agentAttributesJSON(trace.Attrs, buf, destTxnTrace)
buf.WriteByte(',')
buf.WriteString(`"userAttributes":`)
userAttributesJSON(trace.Attrs, buf, destTxnTrace)
buf.WriteByte(',')
buf.WriteString(`"intrinsics":{}`) // TODO intrinsics
buf.WriteByte('}')
// If the trace string pool is used, end another array here.
buf.WriteByte(']') // end trace data
return buf.Bytes()
}
// MarshalJSON prepares the trace in the JSON expected by the collector.
func (trace *HarvestTrace) MarshalJSON() ([]byte, error) {
return json.Marshal([]interface{}{
trace.Start.UnixNano() / 1000,
trace.Duration.Seconds() * 1000.0,
trace.MetricName,
trace.CleanURL,
JSONString(traceDataJSON(trace)),
trace.GUID,
nil, // reserved for future use
trace.ForcePersist,
nil, // X-Ray sessions not supported
trace.SyntheticsResourceID,
})
}
type harvestTraces struct {
trace *HarvestTrace
}
func newHarvestTraces() *harvestTraces {
return &harvestTraces{}
}
func (traces *harvestTraces) Witness(trace HarvestTrace) {
if nil == traces.trace || traces.trace.Duration < trace.Duration {
cpy := new(HarvestTrace)
*cpy = trace
traces.trace = cpy
}
}
func (traces *harvestTraces) Data(agentRunID string, harvestStart time.Time) ([]byte, error) {
if nil == traces.trace {
return nil, nil
}
return json.Marshal([]interface{}{
agentRunID,
[]interface{}{
traces.trace,
},
})
}
func (traces *harvestTraces) MergeIntoHarvest(h *Harvest) {}

43
vendor/github.com/newrelic/go-agent/internal/url.go generated vendored Normal file
View File

@ -0,0 +1,43 @@
package internal
import "net/url"
// SafeURL removes sensitive information from a URL.
func SafeURL(u *url.URL) string {
if nil == u {
return ""
}
if "" != u.Opaque {
// If the URL is opaque, we cannot be sure if it contains
// sensitive information.
return ""
}
// Omit user, query, and fragment information for security.
ur := url.URL{
Scheme: u.Scheme,
Host: u.Host,
Path: u.Path,
}
return ur.String()
}
// SafeURLFromString removes sensitive information from a URL.
func SafeURLFromString(rawurl string) string {
u, err := url.Parse(rawurl)
if nil != err {
return ""
}
return SafeURL(u)
}
// HostFromURL returns the URL's host.
func HostFromURL(u *url.URL) string {
if nil == u {
return ""
}
if "" != u.Opaque {
return "opaque"
}
return u.Host
}

View File

@ -0,0 +1,80 @@
package internal
import (
"bytes"
"encoding/json"
"strings"
"time"
)
// JSONString assists in logging JSON: Based on the formatter used to log
// Context contents, the contents could be marshalled as JSON or just printed
// directly.
type JSONString string
// MarshalJSON returns the JSONString unmodified without any escaping.
func (js JSONString) MarshalJSON() ([]byte, error) {
if "" == js {
return []byte("null"), nil
}
return []byte(js), nil
}
func removeFirstSegment(name string) string {
idx := strings.Index(name, "/")
if -1 == idx {
return name
}
return name[idx+1:]
}
func timeToFloatSeconds(t time.Time) float64 {
return float64(t.UnixNano()) / float64(1000*1000*1000)
}
func timeToFloatMilliseconds(t time.Time) float64 {
return float64(t.UnixNano()) / float64(1000*1000)
}
func floatSecondsToDuration(seconds float64) time.Duration {
nanos := seconds * 1000 * 1000 * 1000
return time.Duration(nanos) * time.Nanosecond
}
func absTimeDiff(t1, t2 time.Time) time.Duration {
if t1.After(t2) {
return t1.Sub(t2)
}
return t2.Sub(t1)
}
func compactJSON(js []byte) []byte {
buf := new(bytes.Buffer)
if err := json.Compact(buf, js); err != nil {
return nil
}
return buf.Bytes()
}
// CompactJSONString removes the whitespace from a JSON string.
func CompactJSONString(js string) string {
out := compactJSON([]byte(js))
return string(out)
}
// StringLengthByteLimit truncates strings using a byte-limit boundary and
// avoids terminating in the middle of a multibyte character.
func StringLengthByteLimit(str string, byteLimit int) string {
if len(str) <= byteLimit {
return str
}
limitIndex := 0
for pos := range str {
if pos > byteLimit {
break
}
limitIndex = pos
}
return str[0:limitIndex]
}

View File

@ -0,0 +1,121 @@
package utilization
import (
"fmt"
"io"
"net/http"
"time"
)
const (
maxResponseLengthBytes = 255
// AWS data gathering requires making three web requests, therefore this
// timeout is in keeping with the spec's total timeout of 1 second.
individualConnectionTimeout = 300 * time.Millisecond
)
const (
awsHost = "169.254.169.254"
typeEndpointPath = "/2008-02-01/meta-data/instance-type"
idEndpointPath = "/2008-02-01/meta-data/instance-id"
zoneEndpointPath = "/2008-02-01/meta-data/placement/availability-zone"
typeEndpoint = "http://" + awsHost + typeEndpointPath
idEndpoint = "http://" + awsHost + idEndpointPath
zoneEndpoint = "http://" + awsHost + zoneEndpointPath
)
// awsValidationError represents a response from an AWS endpoint that doesn't
// match the format expectations.
type awsValidationError struct {
e error
}
func (a awsValidationError) Error() string {
return a.e.Error()
}
func isAWSValidationError(e error) bool {
_, is := e.(awsValidationError)
return is
}
func getAWS() (*vendor, error) {
return getEndpoints(&http.Client{
Timeout: individualConnectionTimeout,
})
}
func getEndpoints(client *http.Client) (*vendor, error) {
v := &vendor{}
var err error
v.ID, err = getAndValidate(client, idEndpoint)
if err != nil {
return nil, err
}
v.Type, err = getAndValidate(client, typeEndpoint)
if err != nil {
return nil, err
}
v.Zone, err = getAndValidate(client, zoneEndpoint)
if err != nil {
return nil, err
}
return v, nil
}
func getAndValidate(client *http.Client, endpoint string) (string, error) {
response, err := client.Get(endpoint)
if err != nil {
return "", err
}
defer response.Body.Close()
if response.StatusCode != 200 {
return "", fmt.Errorf("unexpected response code %d", response.StatusCode)
}
b := make([]byte, maxResponseLengthBytes+1)
num, err := response.Body.Read(b)
if err != nil && err != io.EOF {
return "", err
}
if num > maxResponseLengthBytes {
return "", awsValidationError{
fmt.Errorf("maximum length %d exceeded", maxResponseLengthBytes),
}
}
responseText := string(b[:num])
for _, r := range responseText {
if !isAcceptableRune(r) {
return "", awsValidationError{
fmt.Errorf("invalid character %x", r),
}
}
}
return responseText, nil
}
// See:
// https://source.datanerd.us/agents/agent-specs/blob/master/Utilization.md#normalizing-aws-data
func isAcceptableRune(r rune) bool {
switch r {
case 0xFFFD:
return false
case '_', ' ', '/', '.', '-':
return true
default:
return r > 0x7f ||
('0' <= r && r <= '9') ||
('a' <= r && r <= 'z') ||
('A' <= r && r <= 'Z')
}
}

View File

@ -0,0 +1,140 @@
// Package utilization implements the Utilization spec, available at
// https://source.datanerd.us/agents/agent-specs/blob/master/Utilization.md
package utilization
import (
"runtime"
"github.com/newrelic/go-agent/internal/logger"
"github.com/newrelic/go-agent/internal/sysinfo"
)
const metadataVersion = 2
// Config controls the behavior of utilization information capture.
type Config struct {
DetectAWS bool
DetectDocker bool
LogicalProcessors int
TotalRAMMIB int
BillingHostname string
}
type override struct {
LogicalProcessors *int `json:"logical_processors,omitempty"`
TotalRAMMIB *int `json:"total_ram_mib,omitempty"`
BillingHostname string `json:"hostname,omitempty"`
}
// Data contains utilization system information.
type Data struct {
MetadataVersion int `json:"metadata_version"`
LogicalProcessors int `json:"logical_processors"`
RAMMib *uint64 `json:"total_ram_mib"`
Hostname string `json:"hostname"`
Vendors *vendors `json:"vendors,omitempty"`
Config *override `json:"config,omitempty"`
}
var (
sampleRAMMib = uint64(1024)
// SampleData contains sample utilization data useful for testing.
SampleData = Data{
MetadataVersion: metadataVersion,
LogicalProcessors: 16,
RAMMib: &sampleRAMMib,
Hostname: "my-hostname",
}
)
type vendor struct {
ID string `json:"id,omitempty"`
Type string `json:"type,omitempty"`
Zone string `json:"zone,omitempty"`
}
type vendors struct {
AWS *vendor `json:"aws,omitempty"`
Docker *vendor `json:"docker,omitempty"`
}
func overrideFromConfig(config Config) *override {
ov := &override{}
if 0 != config.LogicalProcessors {
x := config.LogicalProcessors
ov.LogicalProcessors = &x
}
if 0 != config.TotalRAMMIB {
x := config.TotalRAMMIB
ov.TotalRAMMIB = &x
}
ov.BillingHostname = config.BillingHostname
if "" == ov.BillingHostname &&
nil == ov.LogicalProcessors &&
nil == ov.TotalRAMMIB {
ov = nil
}
return ov
}
// Gather gathers system utilization data.
func Gather(config Config, lg logger.Logger) *Data {
uDat := Data{
MetadataVersion: metadataVersion,
Vendors: &vendors{},
LogicalProcessors: runtime.NumCPU(),
}
if config.DetectDocker {
id, err := sysinfo.DockerID()
if err != nil &&
err != sysinfo.ErrDockerUnsupported &&
err != sysinfo.ErrDockerNotFound {
lg.Warn("error gathering Docker information", map[string]interface{}{
"error": err.Error(),
})
} else if id != "" {
uDat.Vendors.Docker = &vendor{ID: id}
}
}
if config.DetectAWS {
aws, err := getAWS()
if nil == err {
uDat.Vendors.AWS = aws
} else if isAWSValidationError(err) {
lg.Warn("AWS validation error", map[string]interface{}{
"error": err.Error(),
})
}
}
if uDat.Vendors.AWS == nil && uDat.Vendors.Docker == nil {
uDat.Vendors = nil
}
host, err := sysinfo.Hostname()
if nil == err {
uDat.Hostname = host
} else {
lg.Warn("error getting hostname", map[string]interface{}{
"error": err.Error(),
})
}
bts, err := sysinfo.PhysicalMemoryBytes()
if nil == err {
mib := sysinfo.BytesToMebibytes(bts)
uDat.RAMMib = &mib
} else {
lg.Warn("error getting memory", map[string]interface{}{
"error": err.Error(),
})
}
uDat.Config = overrideFromConfig(config)
return &uDat
}

566
vendor/github.com/newrelic/go-agent/internal_app.go generated vendored Normal file
View File

@ -0,0 +1,566 @@
package newrelic
import (
"errors"
"fmt"
"net/http"
"os"
"strings"
"sync"
"time"
"github.com/newrelic/go-agent/internal"
"github.com/newrelic/go-agent/internal/logger"
)
var (
debugLogging = os.Getenv("NEW_RELIC_DEBUG_LOGGING")
redirectHost = func() string {
if s := os.Getenv("NEW_RELIC_HOST"); "" != s {
return s
}
return "collector.newrelic.com"
}()
)
type dataConsumer interface {
Consume(internal.AgentRunID, internal.Harvestable)
}
type appData struct {
id internal.AgentRunID
data internal.Harvestable
}
type app struct {
config Config
attrConfig *internal.AttributeConfig
rpmControls internal.RpmControls
testHarvest *internal.Harvest
// initiateShutdown is used to tell the processor to shutdown.
initiateShutdown chan struct{}
// shutdownStarted and shutdownComplete are closed by the processor
// goroutine to indicate the shutdown status. Two channels are used so
// that the call of app.Shutdown() can block until shutdown has
// completed but other goroutines can exit when shutdown has started.
// This is not just an optimization: This prevents a deadlock if
// harvesting data during the shutdown fails and an attempt is made to
// merge the data into the next harvest.
shutdownStarted chan struct{}
shutdownComplete chan struct{}
// Sends to these channels should not occur without a <-shutdownStarted
// select option to prevent deadlock.
dataChan chan appData
collectorErrorChan chan error
connectChan chan *internal.AppRun
harvestTicker *time.Ticker
// This mutex protects both `run` and `err`, both of which should only
// be accessed using getState and setState.
sync.RWMutex
// run is non-nil when the app is successfully connected. It is
// immutable.
run *internal.AppRun
// err is non-nil if the application will never be connected again
// (disconnect, license exception, shutdown).
err error
}
var (
placeholderRun = &internal.AppRun{
ConnectReply: internal.ConnectReplyDefaults(),
}
)
func isFatalHarvestError(e error) bool {
return internal.IsDisconnect(e) ||
internal.IsLicenseException(e) ||
internal.IsRestartException(e)
}
func shouldSaveFailedHarvest(e error) bool {
if e == internal.ErrPayloadTooLarge || e == internal.ErrUnsupportedMedia {
return false
}
return true
}
func (app *app) doHarvest(h *internal.Harvest, harvestStart time.Time, run *internal.AppRun) {
h.CreateFinalMetrics()
h.Metrics = h.Metrics.ApplyRules(run.MetricRules)
payloads := h.Payloads()
for cmd, p := range payloads {
data, err := p.Data(run.RunID.String(), harvestStart)
if nil == data && nil == err {
continue
}
if nil == err {
call := internal.RpmCmd{
Collector: run.Collector,
RunID: run.RunID.String(),
Name: cmd,
Data: data,
}
// The reply from harvest calls is always unused.
_, err = internal.CollectorRequest(call, app.rpmControls)
}
if nil == err {
continue
}
if isFatalHarvestError(err) {
select {
case app.collectorErrorChan <- err:
case <-app.shutdownStarted:
}
return
}
app.config.Logger.Warn("harvest failure", map[string]interface{}{
"cmd": cmd,
"error": err.Error(),
})
if shouldSaveFailedHarvest(err) {
app.Consume(run.RunID, p)
}
}
}
func connectAttempt(app *app) (*internal.AppRun, error) {
js, e := configConnectJSON(app.config)
if nil != e {
return nil, e
}
return internal.ConnectAttempt(js, redirectHost, app.rpmControls)
}
func (app *app) connectRoutine() {
for {
run, err := connectAttempt(app)
if nil == err {
select {
case app.connectChan <- run:
case <-app.shutdownStarted:
}
return
}
if internal.IsDisconnect(err) || internal.IsLicenseException(err) {
select {
case app.collectorErrorChan <- err:
case <-app.shutdownStarted:
}
return
}
app.config.Logger.Warn("application connect failure", map[string]interface{}{
"error": err.Error(),
})
time.Sleep(internal.ConnectBackoff)
}
}
func debug(data internal.Harvestable, lg Logger) {
now := time.Now()
h := internal.NewHarvest(now)
data.MergeIntoHarvest(h)
ps := h.Payloads()
for cmd, p := range ps {
d, err := p.Data("agent run id", now)
if nil == d && nil == err {
continue
}
if nil != err {
lg.Debug("integration", map[string]interface{}{
"cmd": cmd,
"error": err.Error(),
})
continue
}
lg.Debug("integration", map[string]interface{}{
"cmd": cmd,
"data": internal.JSONString(d),
})
}
}
func processConnectMessages(run *internal.AppRun, lg Logger) {
for _, msg := range run.Messages {
event := "collector message"
cn := map[string]interface{}{"msg": msg.Message}
switch strings.ToLower(msg.Level) {
case "error":
lg.Error(event, cn)
case "warn":
lg.Warn(event, cn)
case "info":
lg.Info(event, cn)
case "debug", "verbose":
lg.Debug(event, cn)
}
}
}
func (app *app) process() {
// Both the harvest and the run are non-nil when the app is connected,
// and nil otherwise.
var h *internal.Harvest
var run *internal.AppRun
for {
select {
case <-app.harvestTicker.C:
if nil != run {
now := time.Now()
go app.doHarvest(h, now, run)
h = internal.NewHarvest(now)
}
case d := <-app.dataChan:
if nil != run && run.RunID == d.id {
d.data.MergeIntoHarvest(h)
}
case <-app.initiateShutdown:
close(app.shutdownStarted)
// Remove the run before merging any final data to
// ensure a bounded number of receives from dataChan.
app.setState(nil, errors.New("application shut down"))
app.harvestTicker.Stop()
if nil != run {
for done := false; !done; {
select {
case d := <-app.dataChan:
if run.RunID == d.id {
d.data.MergeIntoHarvest(h)
}
default:
done = true
}
}
app.doHarvest(h, time.Now(), run)
}
close(app.shutdownComplete)
return
case err := <-app.collectorErrorChan:
run = nil
h = nil
app.setState(nil, nil)
switch {
case internal.IsDisconnect(err):
app.setState(nil, err)
app.config.Logger.Error("application disconnected by New Relic", map[string]interface{}{
"app": app.config.AppName,
})
case internal.IsLicenseException(err):
app.setState(nil, err)
app.config.Logger.Error("invalid license", map[string]interface{}{
"app": app.config.AppName,
"license": app.config.License,
})
case internal.IsRestartException(err):
app.config.Logger.Info("application restarted", map[string]interface{}{
"app": app.config.AppName,
})
go app.connectRoutine()
}
case run = <-app.connectChan:
h = internal.NewHarvest(time.Now())
app.setState(run, nil)
app.config.Logger.Info("application connected", map[string]interface{}{
"app": app.config.AppName,
"run": run.RunID.String(),
})
processConnectMessages(run, app.config.Logger)
}
}
}
func (app *app) Shutdown(timeout time.Duration) {
if !app.config.Enabled {
return
}
select {
case app.initiateShutdown <- struct{}{}:
default:
}
// Block until shutdown is done or timeout occurs.
t := time.NewTimer(timeout)
select {
case <-app.shutdownComplete:
case <-t.C:
}
t.Stop()
app.config.Logger.Info("application shutdown", map[string]interface{}{
"app": app.config.AppName,
})
}
func convertAttributeDestinationConfig(c AttributeDestinationConfig) internal.AttributeDestinationConfig {
return internal.AttributeDestinationConfig{
Enabled: c.Enabled,
Include: c.Include,
Exclude: c.Exclude,
}
}
func runSampler(app *app, period time.Duration) {
previous := internal.GetSample(time.Now(), app.config.Logger)
t := time.NewTicker(period)
for {
select {
case now := <-t.C:
current := internal.GetSample(now, app.config.Logger)
run, _ := app.getState()
app.Consume(run.RunID, internal.GetStats(internal.Samples{
Previous: previous,
Current: current,
}))
previous = current
case <-app.shutdownStarted:
t.Stop()
return
}
}
}
func (app *app) WaitForConnection(timeout time.Duration) error {
if !app.config.Enabled {
return nil
}
deadline := time.Now().Add(timeout)
pollPeriod := 50 * time.Millisecond
for {
run, err := app.getState()
if nil != err {
return err
}
if run.RunID != "" {
return nil
}
if time.Now().After(deadline) {
return fmt.Errorf("timeout out after %s", timeout.String())
}
time.Sleep(pollPeriod)
}
}
func newApp(c Config) (Application, error) {
c = copyConfigReferenceFields(c)
if err := c.Validate(); nil != err {
return nil, err
}
if nil == c.Logger {
c.Logger = logger.ShimLogger{}
}
app := &app{
config: c,
attrConfig: internal.CreateAttributeConfig(internal.AttributeConfigInput{
Attributes: convertAttributeDestinationConfig(c.Attributes),
ErrorCollector: convertAttributeDestinationConfig(c.ErrorCollector.Attributes),
TransactionEvents: convertAttributeDestinationConfig(c.TransactionEvents.Attributes),
TransactionTracer: convertAttributeDestinationConfig(c.TransactionTracer.Attributes),
}),
// This channel must be buffered since Shutdown makes a
// non-blocking send attempt.
initiateShutdown: make(chan struct{}, 1),
shutdownStarted: make(chan struct{}),
shutdownComplete: make(chan struct{}),
connectChan: make(chan *internal.AppRun, 1),
collectorErrorChan: make(chan error, 1),
dataChan: make(chan appData, internal.AppDataChanSize),
rpmControls: internal.RpmControls{
UseTLS: c.UseTLS,
License: c.License,
Client: &http.Client{
Transport: c.Transport,
Timeout: internal.CollectorTimeout,
},
Logger: c.Logger,
AgentVersion: Version,
},
}
app.config.Logger.Info("application created", map[string]interface{}{
"app": app.config.AppName,
"version": Version,
"enabled": app.config.Enabled,
})
if !app.config.Enabled {
return app, nil
}
app.harvestTicker = time.NewTicker(internal.HarvestPeriod)
go app.process()
go app.connectRoutine()
if app.config.RuntimeSampler.Enabled {
go runSampler(app, internal.RuntimeSamplerPeriod)
}
return app, nil
}
type expectApp interface {
internal.Expect
Application
}
func newTestApp(replyfn func(*internal.ConnectReply), cfg Config) (expectApp, error) {
cfg.Enabled = false
application, err := newApp(cfg)
if nil != err {
return nil, err
}
app := application.(*app)
if nil != replyfn {
reply := internal.ConnectReplyDefaults()
replyfn(reply)
app.setState(&internal.AppRun{ConnectReply: reply}, nil)
}
app.testHarvest = internal.NewHarvest(time.Now())
return app, nil
}
func (app *app) getState() (*internal.AppRun, error) {
app.RLock()
defer app.RUnlock()
run := app.run
if nil == run {
run = placeholderRun
}
return run, app.err
}
func (app *app) setState(run *internal.AppRun, err error) {
app.Lock()
defer app.Unlock()
app.run = run
app.err = err
}
// StartTransaction implements newrelic.Application's StartTransaction.
func (app *app) StartTransaction(name string, w http.ResponseWriter, r *http.Request) Transaction {
run, _ := app.getState()
return upgradeTxn(newTxn(txnInput{
Config: app.config,
Reply: run.ConnectReply,
Request: r,
W: w,
Consumer: app,
attrConfig: app.attrConfig,
}, name))
}
var (
errHighSecurityEnabled = errors.New("high security enabled")
errCustomEventsDisabled = errors.New("custom events disabled")
errCustomEventsRemoteDisabled = errors.New("custom events disabled by server")
)
// RecordCustomEvent implements newrelic.Application's RecordCustomEvent.
func (app *app) RecordCustomEvent(eventType string, params map[string]interface{}) error {
if app.config.HighSecurity {
return errHighSecurityEnabled
}
if !app.config.CustomInsightsEvents.Enabled {
return errCustomEventsDisabled
}
event, e := internal.CreateCustomEvent(eventType, params, time.Now())
if nil != e {
return e
}
run, _ := app.getState()
if !run.CollectCustomEvents {
return errCustomEventsRemoteDisabled
}
app.Consume(run.RunID, event)
return nil
}
func (app *app) Consume(id internal.AgentRunID, data internal.Harvestable) {
if "" != debugLogging {
debug(data, app.config.Logger)
}
if nil != app.testHarvest {
data.MergeIntoHarvest(app.testHarvest)
return
}
if "" == id {
return
}
select {
case app.dataChan <- appData{id, data}:
case <-app.shutdownStarted:
}
}
func (app *app) ExpectCustomEvents(t internal.Validator, want []internal.WantCustomEvent) {
internal.ExpectCustomEvents(internal.ExtendValidator(t, "custom events"), app.testHarvest.CustomEvents, want)
}
func (app *app) ExpectErrors(t internal.Validator, want []internal.WantError) {
t = internal.ExtendValidator(t, "traced errors")
internal.ExpectErrors(t, app.testHarvest.ErrorTraces, want)
}
func (app *app) ExpectErrorEvents(t internal.Validator, want []internal.WantErrorEvent) {
t = internal.ExtendValidator(t, "error events")
internal.ExpectErrorEvents(t, app.testHarvest.ErrorEvents, want)
}
func (app *app) ExpectTxnEvents(t internal.Validator, want []internal.WantTxnEvent) {
t = internal.ExtendValidator(t, "txn events")
internal.ExpectTxnEvents(t, app.testHarvest.TxnEvents, want)
}
func (app *app) ExpectMetrics(t internal.Validator, want []internal.WantMetric) {
t = internal.ExtendValidator(t, "metrics")
internal.ExpectMetrics(t, app.testHarvest.Metrics, want)
}
func (app *app) ExpectTxnTraces(t internal.Validator, want []internal.WantTxnTrace) {
t = internal.ExtendValidator(t, "txn traces")
internal.ExpectTxnTraces(t, app.testHarvest.TxnTraces, want)
}
func (app *app) ExpectSlowQueries(t internal.Validator, want []internal.WantSlowQuery) {
t = internal.ExtendValidator(t, "slow queries")
internal.ExpectSlowQueries(t, app.testHarvest.SlowSQLs, want)
}

153
vendor/github.com/newrelic/go-agent/internal_config.go generated vendored Normal file
View File

@ -0,0 +1,153 @@
package newrelic
import (
"encoding/json"
"fmt"
"net/http"
"os"
"strings"
"github.com/newrelic/go-agent/internal"
"github.com/newrelic/go-agent/internal/logger"
"github.com/newrelic/go-agent/internal/utilization"
)
func copyDestConfig(c AttributeDestinationConfig) AttributeDestinationConfig {
cp := c
if nil != c.Include {
cp.Include = make([]string, len(c.Include))
copy(cp.Include, c.Include)
}
if nil != c.Exclude {
cp.Exclude = make([]string, len(c.Exclude))
copy(cp.Exclude, c.Exclude)
}
return cp
}
func copyConfigReferenceFields(cfg Config) Config {
cp := cfg
if nil != cfg.Labels {
cp.Labels = make(map[string]string, len(cfg.Labels))
for key, val := range cfg.Labels {
cp.Labels[key] = val
}
}
if nil != cfg.ErrorCollector.IgnoreStatusCodes {
ignored := make([]int, len(cfg.ErrorCollector.IgnoreStatusCodes))
copy(ignored, cfg.ErrorCollector.IgnoreStatusCodes)
cp.ErrorCollector.IgnoreStatusCodes = ignored
}
cp.Attributes = copyDestConfig(cfg.Attributes)
cp.ErrorCollector.Attributes = copyDestConfig(cfg.ErrorCollector.Attributes)
cp.TransactionEvents.Attributes = copyDestConfig(cfg.TransactionEvents.Attributes)
cp.TransactionTracer.Attributes = copyDestConfig(cfg.TransactionTracer.Attributes)
return cp
}
const (
agentLanguage = "go"
)
func transportSetting(t http.RoundTripper) interface{} {
if nil == t {
return nil
}
return fmt.Sprintf("%T", t)
}
func loggerSetting(lg Logger) interface{} {
if nil == lg {
return nil
}
if _, ok := lg.(logger.ShimLogger); ok {
return nil
}
return fmt.Sprintf("%T", lg)
}
const (
// https://source.datanerd.us/agents/agent-specs/blob/master/Custom-Host-Names.md
hostByteLimit = 255
)
type settings Config
func (s settings) MarshalJSON() ([]byte, error) {
c := Config(s)
transport := c.Transport
c.Transport = nil
logger := c.Logger
c.Logger = nil
js, err := json.Marshal(c)
if nil != err {
return nil, err
}
fields := make(map[string]interface{})
err = json.Unmarshal(js, &fields)
if nil != err {
return nil, err
}
// The License field is not simply ignored by adding the `json:"-"` tag
// to it since we want to allow consumers to populate Config from JSON.
delete(fields, `License`)
fields[`Transport`] = transportSetting(transport)
fields[`Logger`] = loggerSetting(logger)
return json.Marshal(fields)
}
func configConnectJSONInternal(c Config, pid int, util *utilization.Data, e internal.Environment, version string) ([]byte, error) {
return json.Marshal([]interface{}{struct {
Pid int `json:"pid"`
Language string `json:"language"`
Version string `json:"agent_version"`
Host string `json:"host"`
HostDisplayName string `json:"display_host,omitempty"`
Settings interface{} `json:"settings"`
AppName []string `json:"app_name"`
HighSecurity bool `json:"high_security"`
Labels internal.Labels `json:"labels,omitempty"`
Environment internal.Environment `json:"environment"`
Identifier string `json:"identifier"`
Util *utilization.Data `json:"utilization"`
}{
Pid: pid,
Language: agentLanguage,
Version: version,
Host: internal.StringLengthByteLimit(util.Hostname, hostByteLimit),
HostDisplayName: internal.StringLengthByteLimit(c.HostDisplayName, hostByteLimit),
Settings: (settings)(c),
AppName: strings.Split(c.AppName, ";"),
HighSecurity: c.HighSecurity,
Labels: internal.Labels(c.Labels),
Environment: e,
// This identifier field is provided to avoid:
// https://newrelic.atlassian.net/browse/DSCORE-778
//
// This identifier is used by the collector to look up the real
// agent. If an identifier isn't provided, the collector will
// create its own based on the first appname, which prevents a
// single daemon from connecting "a;b" and "a;c" at the same
// time.
//
// Providing the identifier below works around this issue and
// allows users more flexibility in using application rollups.
Identifier: c.AppName,
Util: util,
}})
}
func configConnectJSON(c Config) ([]byte, error) {
env := internal.NewEnvironment()
util := utilization.Gather(utilization.Config{
DetectAWS: c.Utilization.DetectAWS,
DetectDocker: c.Utilization.DetectDocker,
LogicalProcessors: c.Utilization.LogicalProcessors,
TotalRAMMIB: c.Utilization.TotalRAMMIB,
BillingHostname: c.Utilization.BillingHostname,
}, c.Logger)
return configConnectJSONInternal(c, os.Getpid(), util, env, Version)
}

View File

@ -0,0 +1,121 @@
package newrelic
import (
"bufio"
"io"
"net"
"net/http"
)
const (
hasC = 1 << iota // CloseNotifier
hasF // Flusher
hasH // Hijacker
hasR // ReaderFrom
)
type wrap struct{ *txn }
type wrapR struct{ *txn }
type wrapH struct{ *txn }
type wrapHR struct{ *txn }
type wrapF struct{ *txn }
type wrapFR struct{ *txn }
type wrapFH struct{ *txn }
type wrapFHR struct{ *txn }
type wrapC struct{ *txn }
type wrapCR struct{ *txn }
type wrapCH struct{ *txn }
type wrapCHR struct{ *txn }
type wrapCF struct{ *txn }
type wrapCFR struct{ *txn }
type wrapCFH struct{ *txn }
type wrapCFHR struct{ *txn }
func (x wrapC) CloseNotify() <-chan bool { return x.W.(http.CloseNotifier).CloseNotify() }
func (x wrapCR) CloseNotify() <-chan bool { return x.W.(http.CloseNotifier).CloseNotify() }
func (x wrapCH) CloseNotify() <-chan bool { return x.W.(http.CloseNotifier).CloseNotify() }
func (x wrapCHR) CloseNotify() <-chan bool { return x.W.(http.CloseNotifier).CloseNotify() }
func (x wrapCF) CloseNotify() <-chan bool { return x.W.(http.CloseNotifier).CloseNotify() }
func (x wrapCFR) CloseNotify() <-chan bool { return x.W.(http.CloseNotifier).CloseNotify() }
func (x wrapCFH) CloseNotify() <-chan bool { return x.W.(http.CloseNotifier).CloseNotify() }
func (x wrapCFHR) CloseNotify() <-chan bool { return x.W.(http.CloseNotifier).CloseNotify() }
func (x wrapF) Flush() { x.W.(http.Flusher).Flush() }
func (x wrapFR) Flush() { x.W.(http.Flusher).Flush() }
func (x wrapFH) Flush() { x.W.(http.Flusher).Flush() }
func (x wrapFHR) Flush() { x.W.(http.Flusher).Flush() }
func (x wrapCF) Flush() { x.W.(http.Flusher).Flush() }
func (x wrapCFR) Flush() { x.W.(http.Flusher).Flush() }
func (x wrapCFH) Flush() { x.W.(http.Flusher).Flush() }
func (x wrapCFHR) Flush() { x.W.(http.Flusher).Flush() }
func (x wrapH) Hijack() (net.Conn, *bufio.ReadWriter, error) { return x.W.(http.Hijacker).Hijack() }
func (x wrapHR) Hijack() (net.Conn, *bufio.ReadWriter, error) { return x.W.(http.Hijacker).Hijack() }
func (x wrapFH) Hijack() (net.Conn, *bufio.ReadWriter, error) { return x.W.(http.Hijacker).Hijack() }
func (x wrapFHR) Hijack() (net.Conn, *bufio.ReadWriter, error) { return x.W.(http.Hijacker).Hijack() }
func (x wrapCH) Hijack() (net.Conn, *bufio.ReadWriter, error) { return x.W.(http.Hijacker).Hijack() }
func (x wrapCHR) Hijack() (net.Conn, *bufio.ReadWriter, error) { return x.W.(http.Hijacker).Hijack() }
func (x wrapCFH) Hijack() (net.Conn, *bufio.ReadWriter, error) { return x.W.(http.Hijacker).Hijack() }
func (x wrapCFHR) Hijack() (net.Conn, *bufio.ReadWriter, error) { return x.W.(http.Hijacker).Hijack() }
func (x wrapR) ReadFrom(r io.Reader) (int64, error) { return x.W.(io.ReaderFrom).ReadFrom(r) }
func (x wrapHR) ReadFrom(r io.Reader) (int64, error) { return x.W.(io.ReaderFrom).ReadFrom(r) }
func (x wrapFR) ReadFrom(r io.Reader) (int64, error) { return x.W.(io.ReaderFrom).ReadFrom(r) }
func (x wrapFHR) ReadFrom(r io.Reader) (int64, error) { return x.W.(io.ReaderFrom).ReadFrom(r) }
func (x wrapCR) ReadFrom(r io.Reader) (int64, error) { return x.W.(io.ReaderFrom).ReadFrom(r) }
func (x wrapCHR) ReadFrom(r io.Reader) (int64, error) { return x.W.(io.ReaderFrom).ReadFrom(r) }
func (x wrapCFR) ReadFrom(r io.Reader) (int64, error) { return x.W.(io.ReaderFrom).ReadFrom(r) }
func (x wrapCFHR) ReadFrom(r io.Reader) (int64, error) { return x.W.(io.ReaderFrom).ReadFrom(r) }
func upgradeTxn(txn *txn) Transaction {
x := 0
if _, ok := txn.W.(http.CloseNotifier); ok {
x |= hasC
}
if _, ok := txn.W.(http.Flusher); ok {
x |= hasF
}
if _, ok := txn.W.(http.Hijacker); ok {
x |= hasH
}
if _, ok := txn.W.(io.ReaderFrom); ok {
x |= hasR
}
switch x {
default:
// Wrap the transaction even when there are no methods needed to
// ensure consistent error stack trace depth.
return wrap{txn}
case hasR:
return wrapR{txn}
case hasH:
return wrapH{txn}
case hasH | hasR:
return wrapHR{txn}
case hasF:
return wrapF{txn}
case hasF | hasR:
return wrapFR{txn}
case hasF | hasH:
return wrapFH{txn}
case hasF | hasH | hasR:
return wrapFHR{txn}
case hasC:
return wrapC{txn}
case hasC | hasR:
return wrapCR{txn}
case hasC | hasH:
return wrapCH{txn}
case hasC | hasH | hasR:
return wrapCHR{txn}
case hasC | hasF:
return wrapCF{txn}
case hasC | hasF | hasR:
return wrapCFR{txn}
case hasC | hasF | hasH:
return wrapCFH{txn}
case hasC | hasF | hasH | hasR:
return wrapCFHR{txn}
}
}

492
vendor/github.com/newrelic/go-agent/internal_txn.go generated vendored Normal file
View File

@ -0,0 +1,492 @@
package newrelic
import (
"errors"
"net/http"
"net/url"
"sync"
"time"
"github.com/newrelic/go-agent/internal"
)
type txnInput struct {
W http.ResponseWriter
Request *http.Request
Config Config
Reply *internal.ConnectReply
Consumer dataConsumer
attrConfig *internal.AttributeConfig
}
type txn struct {
txnInput
// This mutex is required since the consumer may call the public API
// interface functions from different routines.
sync.Mutex
// finished indicates whether or not End() has been called. After
// finished has been set to true, no recording should occur.
finished bool
queuing time.Duration
start time.Time
name string // Work in progress name
isWeb bool
ignore bool
errors internal.TxnErrors // Lazily initialized.
attrs *internal.Attributes
// Fields relating to tracing and breakdown metrics/segments.
tracer internal.Tracer
// wroteHeader prevents capturing multiple response code errors if the
// user erroneously calls WriteHeader multiple times.
wroteHeader bool
// Fields assigned at completion
stop time.Time
duration time.Duration
finalName string // Full finalized metric name
zone internal.ApdexZone
apdexThreshold time.Duration
}
func newTxn(input txnInput, name string) *txn {
txn := &txn{
txnInput: input,
start: time.Now(),
name: name,
isWeb: nil != input.Request,
attrs: internal.NewAttributes(input.attrConfig),
}
if nil != txn.Request {
txn.queuing = internal.QueueDuration(input.Request.Header, txn.start)
internal.RequestAgentAttributes(txn.attrs, input.Request)
}
txn.attrs.Agent.HostDisplayName = txn.Config.HostDisplayName
txn.tracer.Enabled = txn.txnTracesEnabled()
txn.tracer.SegmentThreshold = txn.Config.TransactionTracer.SegmentThreshold
txn.tracer.StackTraceThreshold = txn.Config.TransactionTracer.StackTraceThreshold
txn.tracer.SlowQueriesEnabled = txn.slowQueriesEnabled()
txn.tracer.SlowQueryThreshold = txn.Config.DatastoreTracer.SlowQuery.Threshold
return txn
}
func (txn *txn) slowQueriesEnabled() bool {
return txn.Config.DatastoreTracer.SlowQuery.Enabled &&
txn.Reply.CollectTraces
}
func (txn *txn) txnTracesEnabled() bool {
return txn.Config.TransactionTracer.Enabled &&
txn.Reply.CollectTraces
}
func (txn *txn) txnEventsEnabled() bool {
return txn.Config.TransactionEvents.Enabled &&
txn.Reply.CollectAnalyticsEvents
}
func (txn *txn) errorEventsEnabled() bool {
return txn.Config.ErrorCollector.CaptureEvents &&
txn.Reply.CollectErrorEvents
}
func (txn *txn) freezeName() {
if txn.ignore || ("" != txn.finalName) {
return
}
txn.finalName = internal.CreateFullTxnName(txn.name, txn.Reply, txn.isWeb)
if "" == txn.finalName {
txn.ignore = true
}
}
func (txn *txn) getsApdex() bool {
return txn.isWeb
}
func (txn *txn) txnTraceThreshold() time.Duration {
if txn.Config.TransactionTracer.Threshold.IsApdexFailing {
return internal.ApdexFailingThreshold(txn.apdexThreshold)
}
return txn.Config.TransactionTracer.Threshold.Duration
}
func (txn *txn) shouldSaveTrace() bool {
return txn.txnTracesEnabled() &&
(txn.duration >= txn.txnTraceThreshold())
}
func (txn *txn) hasErrors() bool {
return len(txn.errors) > 0
}
func (txn *txn) MergeIntoHarvest(h *internal.Harvest) {
exclusive := time.Duration(0)
children := internal.TracerRootChildren(&txn.tracer)
if txn.duration > children {
exclusive = txn.duration - children
}
internal.CreateTxnMetrics(internal.CreateTxnMetricsArgs{
IsWeb: txn.isWeb,
Duration: txn.duration,
Exclusive: exclusive,
Name: txn.finalName,
Zone: txn.zone,
ApdexThreshold: txn.apdexThreshold,
HasErrors: txn.hasErrors(),
Queueing: txn.queuing,
}, h.Metrics)
internal.MergeBreakdownMetrics(&txn.tracer, h.Metrics, txn.finalName, txn.isWeb)
if txn.txnEventsEnabled() {
h.TxnEvents.AddTxnEvent(&internal.TxnEvent{
Name: txn.finalName,
Timestamp: txn.start,
Duration: txn.duration,
Queuing: txn.queuing,
Zone: txn.zone,
Attrs: txn.attrs,
DatastoreExternalTotals: txn.tracer.DatastoreExternalTotals,
})
}
requestURI := ""
if nil != txn.Request && nil != txn.Request.URL {
requestURI = internal.SafeURL(txn.Request.URL)
}
internal.MergeTxnErrors(h.ErrorTraces, txn.errors, txn.finalName, requestURI, txn.attrs)
if txn.errorEventsEnabled() {
for _, e := range txn.errors {
h.ErrorEvents.Add(&internal.ErrorEvent{
Klass: e.Klass,
Msg: e.Msg,
When: e.When,
TxnName: txn.finalName,
Duration: txn.duration,
Queuing: txn.queuing,
Attrs: txn.attrs,
DatastoreExternalTotals: txn.tracer.DatastoreExternalTotals,
})
}
}
if txn.shouldSaveTrace() {
h.TxnTraces.Witness(internal.HarvestTrace{
Start: txn.start,
Duration: txn.duration,
MetricName: txn.finalName,
CleanURL: requestURI,
Trace: txn.tracer.TxnTrace,
ForcePersist: false,
GUID: "",
SyntheticsResourceID: "",
Attrs: txn.attrs,
})
}
if nil != txn.tracer.SlowQueries {
h.SlowSQLs.Merge(txn.tracer.SlowQueries, txn.finalName, requestURI)
}
}
func responseCodeIsError(cfg *Config, code int) bool {
if code < http.StatusBadRequest { // 400
return false
}
for _, ignoreCode := range cfg.ErrorCollector.IgnoreStatusCodes {
if code == ignoreCode {
return false
}
}
return true
}
func headersJustWritten(txn *txn, code int) {
if txn.finished {
return
}
if txn.wroteHeader {
return
}
txn.wroteHeader = true
internal.ResponseHeaderAttributes(txn.attrs, txn.W.Header())
internal.ResponseCodeAttribute(txn.attrs, code)
if responseCodeIsError(&txn.Config, code) {
e := internal.TxnErrorFromResponseCode(time.Now(), code)
e.Stack = internal.GetStackTrace(1)
txn.noticeErrorInternal(e)
}
}
func (txn *txn) Header() http.Header { return txn.W.Header() }
func (txn *txn) Write(b []byte) (int, error) {
n, err := txn.W.Write(b)
txn.Lock()
defer txn.Unlock()
headersJustWritten(txn, http.StatusOK)
return n, err
}
func (txn *txn) WriteHeader(code int) {
txn.W.WriteHeader(code)
txn.Lock()
defer txn.Unlock()
headersJustWritten(txn, code)
}
func (txn *txn) End() error {
txn.Lock()
defer txn.Unlock()
if txn.finished {
return errAlreadyEnded
}
txn.finished = true
r := recover()
if nil != r {
e := internal.TxnErrorFromPanic(time.Now(), r)
e.Stack = internal.GetStackTrace(0)
txn.noticeErrorInternal(e)
}
txn.stop = time.Now()
txn.duration = txn.stop.Sub(txn.start)
txn.freezeName()
// Assign apdexThreshold regardless of whether or not the transaction
// gets apdex since it may be used to calculate the trace threshold.
txn.apdexThreshold = internal.CalculateApdexThreshold(txn.Reply, txn.finalName)
if txn.getsApdex() {
if txn.hasErrors() {
txn.zone = internal.ApdexFailing
} else {
txn.zone = internal.CalculateApdexZone(txn.apdexThreshold, txn.duration)
}
} else {
txn.zone = internal.ApdexNone
}
if txn.Config.Logger.DebugEnabled() {
txn.Config.Logger.Debug("transaction ended", map[string]interface{}{
"name": txn.finalName,
"duration_ms": txn.duration.Seconds() * 1000.0,
"ignored": txn.ignore,
"run": txn.Reply.RunID,
})
}
if !txn.ignore {
txn.Consumer.Consume(txn.Reply.RunID, txn)
}
// Note that if a consumer uses `panic(nil)`, the panic will not
// propagate.
if nil != r {
panic(r)
}
return nil
}
func (txn *txn) AddAttribute(name string, value interface{}) error {
txn.Lock()
defer txn.Unlock()
if txn.finished {
return errAlreadyEnded
}
return internal.AddUserAttribute(txn.attrs, name, value, internal.DestAll)
}
var (
errorsLocallyDisabled = errors.New("errors locally disabled")
errorsRemotelyDisabled = errors.New("errors remotely disabled")
errNilError = errors.New("nil error")
errAlreadyEnded = errors.New("transaction has already ended")
)
const (
highSecurityErrorMsg = "message removed by high security setting"
)
func (txn *txn) noticeErrorInternal(err internal.TxnError) error {
if !txn.Config.ErrorCollector.Enabled {
return errorsLocallyDisabled
}
if !txn.Reply.CollectErrors {
return errorsRemotelyDisabled
}
if nil == txn.errors {
txn.errors = internal.NewTxnErrors(internal.MaxTxnErrors)
}
if txn.Config.HighSecurity {
err.Msg = highSecurityErrorMsg
}
txn.errors.Add(err)
return nil
}
func (txn *txn) NoticeError(err error) error {
txn.Lock()
defer txn.Unlock()
if txn.finished {
return errAlreadyEnded
}
if nil == err {
return errNilError
}
e := internal.TxnErrorFromError(time.Now(), err)
e.Stack = internal.GetStackTrace(2)
return txn.noticeErrorInternal(e)
}
func (txn *txn) SetName(name string) error {
txn.Lock()
defer txn.Unlock()
if txn.finished {
return errAlreadyEnded
}
txn.name = name
return nil
}
func (txn *txn) Ignore() error {
txn.Lock()
defer txn.Unlock()
if txn.finished {
return errAlreadyEnded
}
txn.ignore = true
return nil
}
func (txn *txn) StartSegmentNow() SegmentStartTime {
var s internal.SegmentStartTime
txn.Lock()
if !txn.finished {
s = internal.StartSegment(&txn.tracer, time.Now())
}
txn.Unlock()
return SegmentStartTime{
segment: segment{
start: s,
txn: txn,
},
}
}
type segment struct {
start internal.SegmentStartTime
txn *txn
}
func endSegment(s Segment) {
txn := s.StartTime.txn
if nil == txn {
return
}
txn.Lock()
if !txn.finished {
internal.EndBasicSegment(&txn.tracer, s.StartTime.start, time.Now(), s.Name)
}
txn.Unlock()
}
func endDatastore(s DatastoreSegment) {
txn := s.StartTime.txn
if nil == txn {
return
}
txn.Lock()
defer txn.Unlock()
if txn.finished {
return
}
if txn.Config.HighSecurity {
s.QueryParameters = nil
}
if !txn.Config.DatastoreTracer.QueryParameters.Enabled {
s.QueryParameters = nil
}
if !txn.Config.DatastoreTracer.DatabaseNameReporting.Enabled {
s.DatabaseName = ""
}
if !txn.Config.DatastoreTracer.InstanceReporting.Enabled {
s.Host = ""
s.PortPathOrID = ""
}
internal.EndDatastoreSegment(internal.EndDatastoreParams{
Tracer: &txn.tracer,
Start: s.StartTime.start,
Now: time.Now(),
Product: string(s.Product),
Collection: s.Collection,
Operation: s.Operation,
ParameterizedQuery: s.ParameterizedQuery,
QueryParameters: s.QueryParameters,
Host: s.Host,
PortPathOrID: s.PortPathOrID,
Database: s.DatabaseName,
})
}
func externalSegmentURL(s ExternalSegment) *url.URL {
if "" != s.URL {
u, _ := url.Parse(s.URL)
return u
}
r := s.Request
if nil != s.Response && nil != s.Response.Request {
r = s.Response.Request
}
if r != nil {
return r.URL
}
return nil
}
func endExternal(s ExternalSegment) {
txn := s.StartTime.txn
if nil == txn {
return
}
txn.Lock()
defer txn.Unlock()
if txn.finished {
return
}
internal.EndExternalSegment(&txn.tracer, s.StartTime.start, time.Now(), externalSegmentURL(s))
}

30
vendor/github.com/newrelic/go-agent/log.go generated vendored Normal file
View File

@ -0,0 +1,30 @@
package newrelic
import (
"io"
"github.com/newrelic/go-agent/internal/logger"
)
// Logger is the interface that is used for logging in the go-agent. Assign the
// Config.Logger field to the Logger you wish to use. Loggers must be safe for
// use in multiple goroutines.
//
// For an example implementation, see: _integrations/nrlogrus/nrlogrus.go
type Logger interface {
Error(msg string, context map[string]interface{})
Warn(msg string, context map[string]interface{})
Info(msg string, context map[string]interface{})
Debug(msg string, context map[string]interface{})
DebugEnabled() bool
}
// NewLogger creates a basic Logger at info level.
func NewLogger(w io.Writer) Logger {
return logger.New(w, false)
}
// NewDebugLogger creates a basic Logger at debug level.
func NewDebugLogger(w io.Writer) Logger {
return logger.New(w, true)
}

113
vendor/github.com/newrelic/go-agent/segments.go generated vendored Normal file
View File

@ -0,0 +1,113 @@
package newrelic
import "net/http"
// SegmentStartTime is created by Transaction.StartSegmentNow and marks the
// beginning of a segment. A segment with a zero-valued SegmentStartTime may
// safely be ended.
type SegmentStartTime struct{ segment }
// Segment is used to instrument functions, methods, and blocks of code. The
// easiest way use Segment is the StartSegment function.
type Segment struct {
StartTime SegmentStartTime
Name string
}
// DatastoreSegment is used to instrument calls to databases and object stores.
// Here is an example:
//
// defer newrelic.DatastoreSegment{
// StartTime: newrelic.StartSegmentNow(txn),
// Product: newrelic.DatastoreMySQL,
// Collection: "my_table",
// Operation: "SELECT",
// }.End()
//
type DatastoreSegment struct {
StartTime SegmentStartTime
// Product is the datastore type. See the constants in datastore.go.
Product DatastoreProduct
// Collection is the table or group.
Collection string
// Operation is the relevant action, e.g. "SELECT" or "GET".
Operation string
// ParameterizedQuery may be set to the query being performed. It must
// not contain any raw parameters, only placeholders.
ParameterizedQuery string
// QueryParameters may be used to provide query parameters. Care should
// be taken to only provide parameters which are not sensitive.
// QueryParameters are ignored in high security mode.
QueryParameters map[string]interface{}
// Host is the name of the server hosting the datastore.
Host string
// PortPathOrID can represent either the port, path, or id of the
// datastore being connected to.
PortPathOrID string
// DatabaseName is name of database where the current query is being
// executed.
DatabaseName string
}
// ExternalSegment is used to instrument external calls. StartExternalSegment
// is recommended when you have access to an http.Request.
type ExternalSegment struct {
StartTime SegmentStartTime
Request *http.Request
Response *http.Response
// If you do not have access to the request, this URL field should be
// used to indicate the endpoint.
URL string
}
// End finishes the segment.
func (s Segment) End() { endSegment(s) }
// End finishes the datastore segment.
func (s DatastoreSegment) End() { endDatastore(s) }
// End finishes the external segment.
func (s ExternalSegment) End() { endExternal(s) }
// StartSegmentNow helps avoid Transaction nil checks.
func StartSegmentNow(txn Transaction) SegmentStartTime {
if nil != txn {
return txn.StartSegmentNow()
}
return SegmentStartTime{}
}
// StartSegment makes it easy to instrument segments. To time a function, do
// the following:
//
// func timeMe(txn newrelic.Transaction) {
// defer newrelic.StartSegment(txn, "timeMe").End()
// // ... function code here ...
// }
//
// To time a block of code, do the following:
//
// segment := StartSegment(txn, "myBlock")
// // ... code you want to time here ...
// segment.End()
//
func StartSegment(txn Transaction, name string) Segment {
return Segment{
StartTime: StartSegmentNow(txn),
Name: name,
}
}
// StartExternalSegment makes it easier to instrument external calls.
//
// segment := newrelic.StartExternalSegment(txn, request)
// resp, err := client.Do(request)
// segment.Response = resp
// segment.End()
//
func StartExternalSegment(txn Transaction, request *http.Request) ExternalSegment {
return ExternalSegment{
StartTime: StartSegmentNow(txn),
Request: request,
}
}

45
vendor/github.com/newrelic/go-agent/transaction.go generated vendored Normal file
View File

@ -0,0 +1,45 @@
package newrelic
import "net/http"
// Transaction represents a request or a background task.
// Each Transaction should only be used in a single goroutine.
type Transaction interface {
// If StartTransaction is called with a non-nil http.ResponseWriter then
// the Transaction may be used in its place. This allows
// instrumentation of the response code and response headers.
http.ResponseWriter
// End finishes the current transaction, stopping all further
// instrumentation. Subsequent calls to End will have no effect.
End() error
// Ignore ensures that this transaction's data will not be recorded.
Ignore() error
// SetName names the transaction. Transactions will not be grouped
// usefully if too many unique names are used.
SetName(name string) error
// NoticeError records an error. The first five errors per transaction
// are recorded (this behavior is subject to potential change in the
// future).
NoticeError(err error) error
// AddAttribute adds a key value pair to the current transaction. This
// information is attached to errors, transaction events, and error
// events. The key must contain fewer than than 255 bytes. The value
// must be a number, string, or boolean. Attribute configuration is
// applied (see config.go).
//
// For more information, see:
// https://docs.newrelic.com/docs/agents/manage-apm-agents/agent-metrics/collect-custom-attributes
AddAttribute(key string, value interface{}) error
// StartSegmentNow allows the timing of functions, external calls, and
// datastore calls. The segments of each transaction MUST be used in a
// single goroutine. Consumers are encouraged to use the
// `StartSegmentNow` functions which checks if the Transaction is nil.
// See segments.go
StartSegmentNow() SegmentStartTime
}

10
vendor/github.com/newrelic/go-agent/version.go generated vendored Normal file
View File

@ -0,0 +1,10 @@
package newrelic
const (
major = "1"
minor = "5"
patch = "0"
// Version is the full string version of this Go Agent.
Version = major + "." + minor + "." + patch
)

201
vendor/github.com/paultyng/go-newrelic/LICENSE generated vendored Normal file
View File

@ -0,0 +1,201 @@
Apache License
Version 2.0, January 2004
http://www.apache.org/licenses/
TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
1. Definitions.
"License" shall mean the terms and conditions for use, reproduction,
and distribution as defined by Sections 1 through 9 of this document.
"Licensor" shall mean the copyright owner or entity authorized by
the copyright owner that is granting the License.
"Legal Entity" shall mean the union of the acting entity and all
other entities that control, are controlled by, or are under common
control with that entity. For the purposes of this definition,
"control" means (i) the power, direct or indirect, to cause the
direction or management of such entity, whether by contract or
otherwise, or (ii) ownership of fifty percent (50%) or more of the
outstanding shares, or (iii) beneficial ownership of such entity.
"You" (or "Your") shall mean an individual or Legal Entity
exercising permissions granted by this License.
"Source" form shall mean the preferred form for making modifications,
including but not limited to software source code, documentation
source, and configuration files.
"Object" form shall mean any form resulting from mechanical
transformation or translation of a Source form, including but
not limited to compiled object code, generated documentation,
and conversions to other media types.
"Work" shall mean the work of authorship, whether in Source or
Object form, made available under the License, as indicated by a
copyright notice that is included in or attached to the work
(an example is provided in the Appendix below).
"Derivative Works" shall mean any work, whether in Source or Object
form, that is based on (or derived from) the Work and for which the
editorial revisions, annotations, elaborations, or other modifications
represent, as a whole, an original work of authorship. For the purposes
of this License, Derivative Works shall not include works that remain
separable from, or merely link (or bind by name) to the interfaces of,
the Work and Derivative Works thereof.
"Contribution" shall mean any work of authorship, including
the original version of the Work and any modifications or additions
to that Work or Derivative Works thereof, that is intentionally
submitted to Licensor for inclusion in the Work by the copyright owner
or by an individual or Legal Entity authorized to submit on behalf of
the copyright owner. For the purposes of this definition, "submitted"
means any form of electronic, verbal, or written communication sent
to the Licensor or its representatives, including but not limited to
communication on electronic mailing lists, source code control systems,
and issue tracking systems that are managed by, or on behalf of, the
Licensor for the purpose of discussing and improving the Work, but
excluding communication that is conspicuously marked or otherwise
designated in writing by the copyright owner as "Not a Contribution."
"Contributor" shall mean Licensor and any individual or Legal Entity
on behalf of whom a Contribution has been received by Licensor and
subsequently incorporated within the Work.
2. Grant of Copyright License. Subject to the terms and conditions of
this License, each Contributor hereby grants to You a perpetual,
worldwide, non-exclusive, no-charge, royalty-free, irrevocable
copyright license to reproduce, prepare Derivative Works of,
publicly display, publicly perform, sublicense, and distribute the
Work and such Derivative Works in Source or Object form.
3. Grant of Patent License. Subject to the terms and conditions of
this License, each Contributor hereby grants to You a perpetual,
worldwide, non-exclusive, no-charge, royalty-free, irrevocable
(except as stated in this section) patent license to make, have made,
use, offer to sell, sell, import, and otherwise transfer the Work,
where such license applies only to those patent claims licensable
by such Contributor that are necessarily infringed by their
Contribution(s) alone or by combination of their Contribution(s)
with the Work to which such Contribution(s) was submitted. If You
institute patent litigation against any entity (including a
cross-claim or counterclaim in a lawsuit) alleging that the Work
or a Contribution incorporated within the Work constitutes direct
or contributory patent infringement, then any patent licenses
granted to You under this License for that Work shall terminate
as of the date such litigation is filed.
4. Redistribution. You may reproduce and distribute copies of the
Work or Derivative Works thereof in any medium, with or without
modifications, and in Source or Object form, provided that You
meet the following conditions:
(a) You must give any other recipients of the Work or
Derivative Works a copy of this License; and
(b) You must cause any modified files to carry prominent notices
stating that You changed the files; and
(c) You must retain, in the Source form of any Derivative Works
that You distribute, all copyright, patent, trademark, and
attribution notices from the Source form of the Work,
excluding those notices that do not pertain to any part of
the Derivative Works; and
(d) If the Work includes a "NOTICE" text file as part of its
distribution, then any Derivative Works that You distribute must
include a readable copy of the attribution notices contained
within such NOTICE file, excluding those notices that do not
pertain to any part of the Derivative Works, in at least one
of the following places: within a NOTICE text file distributed
as part of the Derivative Works; within the Source form or
documentation, if provided along with the Derivative Works; or,
within a display generated by the Derivative Works, if and
wherever such third-party notices normally appear. The contents
of the NOTICE file are for informational purposes only and
do not modify the License. You may add Your own attribution
notices within Derivative Works that You distribute, alongside
or as an addendum to the NOTICE text from the Work, provided
that such additional attribution notices cannot be construed
as modifying the License.
You may add Your own copyright statement to Your modifications and
may provide additional or different license terms and conditions
for use, reproduction, or distribution of Your modifications, or
for any such Derivative Works as a whole, provided Your use,
reproduction, and distribution of the Work otherwise complies with
the conditions stated in this License.
5. Submission of Contributions. Unless You explicitly state otherwise,
any Contribution intentionally submitted for inclusion in the Work
by You to the Licensor shall be under the terms and conditions of
this License, without any additional terms or conditions.
Notwithstanding the above, nothing herein shall supersede or modify
the terms of any separate license agreement you may have executed
with Licensor regarding such Contributions.
6. Trademarks. This License does not grant permission to use the trade
names, trademarks, service marks, or product names of the Licensor,
except as required for reasonable and customary use in describing the
origin of the Work and reproducing the content of the NOTICE file.
7. Disclaimer of Warranty. Unless required by applicable law or
agreed to in writing, Licensor provides the Work (and each
Contributor provides its Contributions) on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
implied, including, without limitation, any warranties or conditions
of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
PARTICULAR PURPOSE. You are solely responsible for determining the
appropriateness of using or redistributing the Work and assume any
risks associated with Your exercise of permissions under this License.
8. Limitation of Liability. In no event and under no legal theory,
whether in tort (including negligence), contract, or otherwise,
unless required by applicable law (such as deliberate and grossly
negligent acts) or agreed to in writing, shall any Contributor be
liable to You for damages, including any direct, indirect, special,
incidental, or consequential damages of any character arising as a
result of this License or out of the use or inability to use the
Work (including but not limited to damages for loss of goodwill,
work stoppage, computer failure or malfunction, or any and all
other commercial damages or losses), even if such Contributor
has been advised of the possibility of such damages.
9. Accepting Warranty or Additional Liability. While redistributing
the Work or Derivative Works thereof, You may choose to offer,
and charge a fee for, acceptance of support, warranty, indemnity,
or other liability obligations and/or rights consistent with this
License. However, in accepting such obligations, You may act only
on Your own behalf and on Your sole responsibility, not on behalf
of any other Contributor, and only if You agree to indemnify,
defend, and hold each Contributor harmless for any liability
incurred by, or claims asserted against, such Contributor by reason
of your accepting any such warranty or additional liability.
END OF TERMS AND CONDITIONS
APPENDIX: How to apply the Apache License to your work.
To apply the Apache License to your work, attach the following
boilerplate notice, with the fields enclosed by brackets "{}"
replaced with your own identifying information. (Don't include
the brackets!) The text should be enclosed in the appropriate
comment syntax for the file format. We also recommend that a
file or class name and description of purpose be included on the
same "printed page" as the copyright notice for easier
identification within third-party archives.
Copyright {yyyy} {name of copyright owner}
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.

View File

@ -0,0 +1,87 @@
package api
import (
"fmt"
"net/url"
)
func (c *Client) queryAlertChannels() ([]AlertChannel, error) {
channels := []AlertChannel{}
reqURL, err := url.Parse("/alerts_channels.json")
if err != nil {
return nil, err
}
nextPath := reqURL.String()
for nextPath != "" {
resp := struct {
Channels []AlertChannel `json:"channels,omitempty"`
}{}
nextPath, err = c.Do("GET", nextPath, nil, &resp)
if err != nil {
return nil, err
}
channels = append(channels, resp.Channels...)
}
return channels, nil
}
// GetAlertChannel returns a specific alert channel by ID
func (c *Client) GetAlertChannel(id int) (*AlertChannel, error) {
channels, err := c.queryAlertChannels()
if err != nil {
return nil, err
}
for _, channel := range channels {
if channel.ID == id {
return &channel, nil
}
}
return nil, ErrNotFound
}
// ListAlertChannels returns all alert policies for the account.
func (c *Client) ListAlertChannels() ([]AlertChannel, error) {
return c.queryAlertChannels()
}
func (c *Client) CreateAlertChannel(channel AlertChannel) (*AlertChannel, error) {
// TODO: support attaching policy ID's here?
// qs := map[string]string{
// "policy_ids[]": channel.Links.PolicyIDs,
// }
if len(channel.Links.PolicyIDs) > 0 {
return nil, fmt.Errorf("You cannot create an alert channel with policy IDs, you must attach polidy IDs after creation.")
}
req := struct {
Channel AlertChannel `json:"channel"`
}{
Channel: channel,
}
resp := struct {
Channels []AlertChannel `json:"channels,omitempty"`
}{}
_, err := c.Do("POST", "/alerts_channels.json", req, &resp)
if err != nil {
return nil, err
}
return &resp.Channels[0], nil
}
func (c *Client) DeleteAlertChannel(id int) error {
u := &url.URL{Path: fmt.Sprintf("/alerts_channels/%v.json", id)}
_, err := c.Do("DELETE", u.String(), nil, nil)
return err
}

View File

@ -0,0 +1,117 @@
package api
import (
"fmt"
"net/url"
"strconv"
)
func (c *Client) queryAlertConditions(policyID int) ([]AlertCondition, error) {
conditions := []AlertCondition{}
reqURL, err := url.Parse("/alerts_conditions.json")
if err != nil {
return nil, err
}
qs := reqURL.Query()
qs.Set("policy_id", strconv.Itoa(policyID))
reqURL.RawQuery = qs.Encode()
nextPath := reqURL.String()
for nextPath != "" {
resp := struct {
Conditions []AlertCondition `json:"conditions,omitempty"`
}{}
nextPath, err = c.Do("GET", nextPath, nil, &resp)
if err != nil {
return nil, err
}
for _, c := range resp.Conditions {
c.PolicyID = policyID
}
conditions = append(conditions, resp.Conditions...)
}
return conditions, nil
}
func (c *Client) GetAlertCondition(policyID int, id int) (*AlertCondition, error) {
conditions, err := c.queryAlertConditions(policyID)
if err != nil {
return nil, err
}
for _, condition := range conditions {
if condition.ID == id {
return &condition, nil
}
}
return nil, ErrNotFound
}
// ListAlertConditions returns alert conditions for the specified policy.
func (c *Client) ListAlertConditions(policyID int) ([]AlertCondition, error) {
return c.queryAlertConditions(policyID)
}
func (c *Client) CreateAlertCondition(condition AlertCondition) (*AlertCondition, error) {
policyID := condition.PolicyID
req := struct {
Condition AlertCondition `json:"condition"`
}{
Condition: condition,
}
resp := struct {
Condition AlertCondition `json:"condition,omitempty"`
}{}
u := &url.URL{Path: fmt.Sprintf("/alerts_conditions/policies/%v.json", policyID)}
_, err := c.Do("POST", u.String(), req, &resp)
if err != nil {
return nil, err
}
resp.Condition.PolicyID = policyID
return &resp.Condition, nil
}
func (c *Client) UpdateAlertCondition(condition AlertCondition) (*AlertCondition, error) {
policyID := condition.PolicyID
id := condition.ID
req := struct {
Condition AlertCondition `json:"condition"`
}{
Condition: condition,
}
resp := struct {
Condition AlertCondition `json:"condition,omitempty"`
}{}
u := &url.URL{Path: fmt.Sprintf("/alerts_conditions/%v.json", id)}
_, err := c.Do("PUT", u.String(), req, &resp)
if err != nil {
return nil, err
}
resp.Condition.PolicyID = policyID
return &resp.Condition, nil
}
func (c *Client) DeleteAlertCondition(policyID int, id int) error {
u := &url.URL{Path: fmt.Sprintf("/alerts_conditions/%v.json", id)}
_, err := c.Do("DELETE", u.String(), nil, nil)
return err
}

View File

@ -0,0 +1,86 @@
package api
import (
"fmt"
"net/url"
)
func (c *Client) queryAlertPolicies(name *string) ([]AlertPolicy, error) {
policies := []AlertPolicy{}
reqURL, err := url.Parse("/alerts_policies.json")
if err != nil {
return nil, err
}
qs := reqURL.Query()
if name != nil {
qs.Set("filter[name]", *name)
}
reqURL.RawQuery = qs.Encode()
nextPath := reqURL.String()
for nextPath != "" {
resp := struct {
Policies []AlertPolicy `json:"policies,omitempty"`
}{}
nextPath, err = c.Do("GET", nextPath, nil, &resp)
if err != nil {
return nil, err
}
policies = append(policies, resp.Policies...)
}
return policies, nil
}
// GetAlertPolicy returns a specific alert policy by ID
func (c *Client) GetAlertPolicy(id int) (*AlertPolicy, error) {
policies, err := c.queryAlertPolicies(nil)
if err != nil {
return nil, err
}
for _, policy := range policies {
if policy.ID == id {
return &policy, nil
}
}
return nil, ErrNotFound
}
// ListAlertPolicies returns all alert policies for the account.
func (c *Client) ListAlertPolicies() ([]AlertPolicy, error) {
return c.queryAlertPolicies(nil)
}
// CreateAlertPolicy creates a new alert policy for the account.
func (c *Client) CreateAlertPolicy(policy AlertPolicy) (*AlertPolicy, error) {
req := struct {
Policy AlertPolicy `json:"policy"`
}{
Policy: policy,
}
resp := struct {
Policy AlertPolicy `json:"policy,omitempty"`
}{}
_, err := c.Do("POST", "/alerts_policies.json", req, &resp)
if err != nil {
return nil, err
}
return &resp.Policy, nil
}
// DeleteAlertPolicy deletes an existing alert policy from the account.
func (c *Client) DeleteAlertPolicy(id int) error {
u := &url.URL{Path: fmt.Sprintf("/alerts_policies/%v.json", id)}
_, err := c.Do("DELETE", u.String(), nil, nil)
return err
}

View File

@ -0,0 +1,64 @@
package api
import (
"net/url"
"regexp"
"strconv"
)
func (c *Client) UpdateAlertPolicyChannels(policyID int, channelIDs []int) error {
channelIDStrings := make([]string, len(channelIDs))
for i, channelID := range channelIDs {
channelIDStrings[i] = strconv.Itoa(channelID)
}
reqURL, err := url.Parse("/alerts_policy_channels.json")
if err != nil {
return err
}
qs := url.Values{
"policy_id": []string{strconv.Itoa(policyID)},
"channel_ids": channelIDStrings,
}
reqURL.RawQuery = qs.Encode()
nextPath := reqURL.String()
_, err = c.Do("PUT", nextPath, nil, nil)
return err
}
func (c *Client) DeleteAlertPolicyChannel(policyID int, channelID int) error {
reqURL, err := url.Parse("/alerts_policy_channels.json")
if err != nil {
return err
}
qs := url.Values{
"policy_id": []string{strconv.Itoa(policyID)},
"channel_id": []string{strconv.Itoa(channelID)},
}
reqURL.RawQuery = qs.Encode()
nextPath := reqURL.String()
_, err = c.Do("DELETE", nextPath, nil, nil)
if err != nil {
if apiErr, ok := err.(*ErrorResponse); ok {
matched, err := regexp.MatchString("Alerts policy with ID: \\d+ is not valid.", apiErr.Detail.Title)
if err != nil {
return err
}
if matched {
return ErrNotFound
}
}
return err
}
return nil
}

View File

@ -0,0 +1,58 @@
package api
import (
"net/url"
"strconv"
)
type applicationsFilters struct {
Name *string
Host *string
IDs []int
Language *string
}
func (c *Client) queryApplications(filters applicationsFilters) ([]Application, error) {
applications := []Application{}
reqURL, err := url.Parse("/applications.json")
if err != nil {
return nil, err
}
qs := reqURL.Query()
if filters.Name != nil {
qs.Set("filter[name]", *filters.Name)
}
if filters.Host != nil {
qs.Set("filter[host]", *filters.Host)
}
for _, id := range filters.IDs {
qs.Add("filter[ids]", strconv.Itoa(id))
}
if filters.Language != nil {
qs.Set("filter[language]", *filters.Language)
}
reqURL.RawQuery = qs.Encode()
nextPath := reqURL.String()
for nextPath != "" {
resp := struct {
Applications []Application `json:"applications,omitempty"`
}{}
nextPath, err = c.Do("GET", nextPath, nil, &resp)
if err != nil {
return nil, err
}
applications = append(applications, resp.Applications...)
}
return applications, nil
}
func (c *Client) ListApplications() ([]Application, error) {
return c.queryApplications(applicationsFilters{})
}

108
vendor/github.com/paultyng/go-newrelic/api/client.go generated vendored Normal file
View File

@ -0,0 +1,108 @@
package api
import (
"fmt"
"github.com/tomnomnom/linkheader"
resty "gopkg.in/resty.v0"
)
// Client represents the client state for the API.
type Client struct {
RestyClient *resty.Client
}
type ErrorResponse struct {
Detail *ErrorDetail `json:"error,omitempty"`
}
func (e *ErrorResponse) Error() string {
if e != nil && e.Detail != nil {
return e.Detail.Title
}
return "Unknown error"
}
type ErrorDetail struct {
Title string `json:"title,omitempty"`
}
// Config contains all the configuration data for the API Client
type Config struct {
APIKey string
BaseURL string
Debug bool
}
// New returns a new Client for the specified apiKey.
func New(config Config) Client {
r := resty.New()
baseURL := config.BaseURL
if baseURL == "" {
baseURL = "https://api.newrelic.com/v2"
}
r.SetHeader("X-Api-Key", config.APIKey)
r.SetHostURL(baseURL)
if config.Debug {
r.SetDebug(true)
}
c := Client{
RestyClient: r,
}
return c
}
// Do exectes an API request with the specified parameters.
func (c *Client) Do(method string, path string, body interface{}, response interface{}) (string, error) {
r := c.RestyClient.R().
SetError(&ErrorResponse{})
if body != nil {
r = r.SetBody(body)
}
if response != nil {
r = r.SetResult(response)
}
apiResponse, err := r.Execute(method, path)
if err != nil {
return "", err
}
nextPath := ""
header := apiResponse.Header().Get("Link")
if header != "" {
links := linkheader.Parse(header)
for _, link := range links.FilterByRel("next") {
nextPath = link.URL
break
}
}
statusClass := apiResponse.StatusCode() / 100 % 10
if statusClass == 2 {
return nextPath, nil
}
rawError := apiResponse.Error()
if rawError != nil {
apiError := rawError.(*ErrorResponse)
if apiError.Detail != nil {
return "", apiError
}
}
return "", fmt.Errorf("Unexpected status %v returned from API", apiResponse.StatusCode())
}

79
vendor/github.com/paultyng/go-newrelic/api/labels.go generated vendored Normal file
View File

@ -0,0 +1,79 @@
package api
import (
"fmt"
"net/url"
)
func (c *Client) queryLabels() ([]Label, error) {
labels := []Label{}
reqURL, err := url.Parse("/labels.json")
if err != nil {
return nil, err
}
nextPath := reqURL.String()
for nextPath != "" {
resp := struct {
Labels []Label `json:"labels,omitempty"`
}{}
nextPath, err = c.Do("GET", nextPath, nil, &resp)
if err != nil {
return nil, err
}
labels = append(labels, resp.Labels...)
}
return labels, nil
}
func (c *Client) GetLabel(key string) (*Label, error) {
labels, err := c.queryLabels()
if err != nil {
return nil, err
}
for _, label := range labels {
if label.Key == key {
return &label, nil
}
}
return nil, ErrNotFound
}
// ListLabels returns the labels for the account.
func (c *Client) ListLabels() ([]Label, error) {
return c.queryLabels()
}
// CreateLabel creates a new label for the account.
func (c *Client) CreateLabel(label Label) error {
if label.Links.Applications == nil {
label.Links.Applications = make([]int, 0)
}
if label.Links.Servers == nil {
label.Links.Servers = make([]int, 0)
}
req := struct {
Label Label `json:"label,omitempty"`
}{
Label: label,
}
_, err := c.Do("PUT", "/labels.json", req, nil)
return err
}
// DeleteLabel deletes a label on the account specified by key.
func (c *Client) DeleteLabel(key string) error {
u := &url.URL{Path: fmt.Sprintf("/labels/%v.json", key)}
_, err := c.Do("DELETE", u.String(), nil, nil)
return err
}

120
vendor/github.com/paultyng/go-newrelic/api/types.go generated vendored Normal file
View File

@ -0,0 +1,120 @@
package api
import "errors"
var (
ErrNotFound = errors.New("newrelic: Resource not found")
)
// LabelLinks represents external references on the Label.
type LabelLinks struct {
Applications []int `json:"applications"`
Servers []int `json:"servers"`
}
// Label represents a New Relic label.
type Label struct {
Key string `json:"key,omitempty"`
Category string `json:"category,omitempty"`
Name string `json:"name,omitempty"`
Links LabelLinks `json:"links,omitempty"`
}
// AlertPolicy represents a New Relic alert policy.
type AlertPolicy struct {
ID int `json:"id,omitempty"`
IncidentPreference string `json:"incident_preference,omitempty"`
Name string `json:"name,omitempty"`
CreatedAt int `json:"created_at,omitempty"`
UpdatedAt int `json:"updated_at,omitempty"`
}
// AlertConditionUserDefined represents user defined metrics for the New Relic alert condition.
type AlertConditionUserDefined struct {
Metric string `json:"metric,omitempty"`
ValueFunction string `json:"value_function,omitempty"`
}
// AlertConditionTerm represents the terms of a New Relic alert condition.
type AlertConditionTerm struct {
Duration int `json:"duration,string,omitempty"`
Operator string `json:"operator,omitempty"`
Priority string `json:"priority,omitempty"`
Threshold float64 `json:"threshold,string,omitempty"`
TimeFunction string `json:"time_function,omitempty"`
}
// AlertCondition represents a New Relic alert condition.
// TODO: custom unmarshal entities to ints?
// TODO: handle unmarshaling .75 for float (not just 0.75)
type AlertCondition struct {
PolicyID int `json:"-"`
ID int `json:"id,omitempty"`
Type string `json:"type,omitempty"`
Name string `json:"name,omitempty"`
Enabled bool `json:"enabled,omitempty"`
Entities []string `json:"entities,omitempty"`
Metric string `json:"metric,omitempty"`
RunbookURL string `json:"runbook_url,omitempty"`
Terms []AlertConditionTerm `json:"terms,omitempty"`
UserDefined AlertConditionUserDefined `json:"uder_defined,omitempty"`
}
// AlertChannelLinks represent the links between policies and alert channels
type AlertChannelLinks struct {
PolicyIDs []int `json:"policy_ids,omitempty"`
}
// AlertChannel represents a New Relic alert notification channel
type AlertChannel struct {
ID int `json:"id,omitempty"`
Name string `json:"name,omitempty"`
Type string `json:"type,omitempty"`
Configuration map[string]interface{} `json:"configuration,omitempty"`
Links AlertChannelLinks `json:"links,omitempty"`
}
type ApplicationSummary struct {
ResponseTime float64 `json:"response_time"`
Throughput float64 `json:"throughput"`
ErrorRate float64 `json:"error_rate"`
ApdexTarget float64 `json:"apdex_target"`
ApdexScore float64 `json:"apdex_score"`
HostCount int `json:"host_count"`
InstanceCount int `json:"instance_count"`
ConcurrentInstanceCount int `json:"concurrent_instance_count"`
}
type ApplicationEndUserSummary struct {
ResponseTime float64 `json:"response_time"`
Throughput float64 `json:"throughput"`
ApdexTarget float64 `json:"apdex_target"`
ApdexScore float64 `json:"apdex_score"`
}
type ApplicationSettings struct {
AppApdexThreshold float64 `json:"app_apdex_threshold,omitempty"`
EndUserApdexThreshold float64 `json:"end_user_apdex_threshold,omitempty"`
EnableRealUserMonitoring bool `json:"enable_real_user_monitoring,omitempty"`
UseServerSideConfig bool `json:"use_server_side_config,omitempty"`
}
type ApplicationLinks struct {
ServerIDs []int `json:"servers,omitempty"`
HostIDs []int `json:"application_hosts,omitempty"`
InstanceIDs []int `json:"application_instances,omitempty"`
AlertPolicyID int `json:"alert_policy"`
}
type Application struct {
ID int `json:"id,omitempty"`
Name string `json:"name,omitempty"`
Language string `json:"language,omitempty"`
HealthStatus string `json:"health_status,omitempty"`
Reporting bool `json:"reporting,omitempty"`
LastReportedAt string `json:"last_reported_at,omitempty"`
Summary ApplicationSummary `json:"application_summary,omitempty"`
EndUserSummary ApplicationEndUserSummary `json:"end_user_summary,omitempty"`
Settings ApplicationSettings `json:"settings,omitempty"`
Links ApplicationLinks `json:"links,omitempty"`
}

View File

@ -0,0 +1,10 @@
# Contributing
* Raise an issue if appropriate
* Fork the repo
* Bootstrap the dev dependencies (run `./script/bootstrap`)
* Make your changes
* Use [gofmt](https://golang.org/cmd/gofmt/)
* Make sure the tests pass (run `./script/test`)
* Make sure the linters pass (run `./script/lint`)
* Issue a pull request

21
vendor/github.com/tomnomnom/linkheader/LICENSE generated vendored Normal file
View File

@ -0,0 +1,21 @@
MIT License
Copyright (c) 2016 Tom Hudson
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in all
copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
SOFTWARE.

35
vendor/github.com/tomnomnom/linkheader/README.mkd generated vendored Normal file
View File

@ -0,0 +1,35 @@
# Golang Link Header Parser
Library for parsing HTTP Link headers. Requires Go 1.2 or higher.
Docs can be found on [the GoDoc page](https://godoc.org/github.com/tomnomnom/linkheader).
[![Build Status](https://travis-ci.org/tomnomnom/linkheader.svg)](https://travis-ci.org/tomnomnom/linkheader)
## Basic Example
```go
package main
import (
"fmt"
"github.com/tomnomnom/linkheader"
)
func main() {
header := "<https://api.github.com/user/58276/repos?page=2>; rel=\"next\"," +
"<https://api.github.com/user/58276/repos?page=2>; rel=\"last\""
links := linkheader.Parse(header)
for _, link := range links {
fmt.Printf("URL: %s; Rel: %s\n", link.URL, link.Rel)
}
}
// Output:
// URL: https://api.github.com/user/58276/repos?page=2; Rel: next
// URL: https://api.github.com/user/58276/repos?page=2; Rel: last
```

143
vendor/github.com/tomnomnom/linkheader/main.go generated vendored Normal file
View File

@ -0,0 +1,143 @@
// Package linkheader provides functions for parsing HTTP Link headers
package linkheader
import (
"fmt"
"strings"
)
// A Link is a single URL and related parameters
type Link struct {
URL string
Rel string
Params map[string]string
}
// HasParam returns if a Link has a particular parameter or not
func (l Link) HasParam(key string) bool {
for p := range l.Params {
if p == key {
return true
}
}
return false
}
// Param returns the value of a parameter if it exists
func (l Link) Param(key string) string {
for k, v := range l.Params {
if key == k {
return v
}
}
return ""
}
// String returns the string representation of a link
func (l Link) String() string {
p := make([]string, 0, len(l.Params))
for k, v := range l.Params {
p = append(p, fmt.Sprintf("%s=\"%s\"", k, v))
}
if l.Rel != "" {
p = append(p, fmt.Sprintf("%s=\"%s\"", "rel", l.Rel))
}
return fmt.Sprintf("<%s>; %s", l.URL, strings.Join(p, "; "))
}
// Links is a slice of Link structs
type Links []Link
// FilterByRel filters a group of Links by the provided Rel attribute
func (l Links) FilterByRel(r string) Links {
links := make(Links, 0)
for _, link := range l {
if link.Rel == r {
links = append(links, link)
}
}
return links
}
// String returns the string representation of multiple Links
// for use in HTTP responses etc
func (l Links) String() string {
var strs []string
for _, link := range l {
strs = append(strs, link.String())
}
return strings.Join(strs, ", ")
}
// Parse parses a raw Link header in the form:
// <url>; rel="foo", <url>; rel="bar"; wat="dis"
// returning a slice of Link structs
func Parse(raw string) Links {
links := make(Links, 0)
// One chunk: <url>; rel="foo"
for _, chunk := range strings.Split(raw, ",") {
link := Link{URL: "", Rel: "", Params: make(map[string]string)}
// Figure out what each piece of the chunk is
for _, piece := range strings.Split(chunk, ";") {
piece = strings.Trim(piece, " ")
if piece == "" {
continue
}
// URL
if piece[0] == '<' && piece[len(piece)-1] == '>' {
link.URL = strings.Trim(piece, "<>")
continue
}
// Params
key, val := parseParam(piece)
if key == "" {
continue
}
// Special case for rel
if strings.ToLower(key) == "rel" {
link.Rel = val
}
link.Params[key] = val
}
links = append(links, link)
}
return links
}
// ParseMultiple is like Parse, but accepts a slice of headers
// rather than just one header string
func ParseMultiple(headers []string) Links {
links := make(Links, 0)
for _, header := range headers {
links = append(links, Parse(header)...)
}
return links
}
// parseParam takes a raw param in the form key="val" and
// returns the key and value as seperate strings
func parseParam(raw string) (key, val string) {
parts := strings.SplitN(raw, "=", 2)
if len(parts) != 2 {
return "", ""
}
key = parts[0]
val = strings.Trim(parts[1], "\"")
return key, val
}

68
vendor/golang.org/x/net/idna/idna.go generated vendored Normal file
View File

@ -0,0 +1,68 @@
// Copyright 2012 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
// Package idna implements IDNA2008 (Internationalized Domain Names for
// Applications), defined in RFC 5890, RFC 5891, RFC 5892, RFC 5893 and
// RFC 5894.
package idna // import "golang.org/x/net/idna"
import (
"strings"
"unicode/utf8"
)
// TODO(nigeltao): specify when errors occur. For example, is ToASCII(".") or
// ToASCII("foo\x00") an error? See also http://www.unicode.org/faq/idn.html#11
// acePrefix is the ASCII Compatible Encoding prefix.
const acePrefix = "xn--"
// ToASCII converts a domain or domain label to its ASCII form. For example,
// ToASCII("bücher.example.com") is "xn--bcher-kva.example.com", and
// ToASCII("golang") is "golang".
func ToASCII(s string) (string, error) {
if ascii(s) {
return s, nil
}
labels := strings.Split(s, ".")
for i, label := range labels {
if !ascii(label) {
a, err := encode(acePrefix, label)
if err != nil {
return "", err
}
labels[i] = a
}
}
return strings.Join(labels, "."), nil
}
// ToUnicode converts a domain or domain label to its Unicode form. For example,
// ToUnicode("xn--bcher-kva.example.com") is "bücher.example.com", and
// ToUnicode("golang") is "golang".
func ToUnicode(s string) (string, error) {
if !strings.Contains(s, acePrefix) {
return s, nil
}
labels := strings.Split(s, ".")
for i, label := range labels {
if strings.HasPrefix(label, acePrefix) {
u, err := decode(label[len(acePrefix):])
if err != nil {
return "", err
}
labels[i] = u
}
}
return strings.Join(labels, "."), nil
}
func ascii(s string) bool {
for i := 0; i < len(s); i++ {
if s[i] >= utf8.RuneSelf {
return false
}
}
return true
}

200
vendor/golang.org/x/net/idna/punycode.go generated vendored Normal file
View File

@ -0,0 +1,200 @@
// Copyright 2012 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
package idna
// This file implements the Punycode algorithm from RFC 3492.
import (
"fmt"
"math"
"strings"
"unicode/utf8"
)
// These parameter values are specified in section 5.
//
// All computation is done with int32s, so that overflow behavior is identical
// regardless of whether int is 32-bit or 64-bit.
const (
base int32 = 36
damp int32 = 700
initialBias int32 = 72
initialN int32 = 128
skew int32 = 38
tmax int32 = 26
tmin int32 = 1
)
// decode decodes a string as specified in section 6.2.
func decode(encoded string) (string, error) {
if encoded == "" {
return "", nil
}
pos := 1 + strings.LastIndex(encoded, "-")
if pos == 1 {
return "", fmt.Errorf("idna: invalid label %q", encoded)
}
if pos == len(encoded) {
return encoded[:len(encoded)-1], nil
}
output := make([]rune, 0, len(encoded))
if pos != 0 {
for _, r := range encoded[:pos-1] {
output = append(output, r)
}
}
i, n, bias := int32(0), initialN, initialBias
for pos < len(encoded) {
oldI, w := i, int32(1)
for k := base; ; k += base {
if pos == len(encoded) {
return "", fmt.Errorf("idna: invalid label %q", encoded)
}
digit, ok := decodeDigit(encoded[pos])
if !ok {
return "", fmt.Errorf("idna: invalid label %q", encoded)
}
pos++
i += digit * w
if i < 0 {
return "", fmt.Errorf("idna: invalid label %q", encoded)
}
t := k - bias
if t < tmin {
t = tmin
} else if t > tmax {
t = tmax
}
if digit < t {
break
}
w *= base - t
if w >= math.MaxInt32/base {
return "", fmt.Errorf("idna: invalid label %q", encoded)
}
}
x := int32(len(output) + 1)
bias = adapt(i-oldI, x, oldI == 0)
n += i / x
i %= x
if n > utf8.MaxRune || len(output) >= 1024 {
return "", fmt.Errorf("idna: invalid label %q", encoded)
}
output = append(output, 0)
copy(output[i+1:], output[i:])
output[i] = n
i++
}
return string(output), nil
}
// encode encodes a string as specified in section 6.3 and prepends prefix to
// the result.
//
// The "while h < length(input)" line in the specification becomes "for
// remaining != 0" in the Go code, because len(s) in Go is in bytes, not runes.
func encode(prefix, s string) (string, error) {
output := make([]byte, len(prefix), len(prefix)+1+2*len(s))
copy(output, prefix)
delta, n, bias := int32(0), initialN, initialBias
b, remaining := int32(0), int32(0)
for _, r := range s {
if r < 0x80 {
b++
output = append(output, byte(r))
} else {
remaining++
}
}
h := b
if b > 0 {
output = append(output, '-')
}
for remaining != 0 {
m := int32(0x7fffffff)
for _, r := range s {
if m > r && r >= n {
m = r
}
}
delta += (m - n) * (h + 1)
if delta < 0 {
return "", fmt.Errorf("idna: invalid label %q", s)
}
n = m
for _, r := range s {
if r < n {
delta++
if delta < 0 {
return "", fmt.Errorf("idna: invalid label %q", s)
}
continue
}
if r > n {
continue
}
q := delta
for k := base; ; k += base {
t := k - bias
if t < tmin {
t = tmin
} else if t > tmax {
t = tmax
}
if q < t {
break
}
output = append(output, encodeDigit(t+(q-t)%(base-t)))
q = (q - t) / (base - t)
}
output = append(output, encodeDigit(q))
bias = adapt(delta, h+1, h == b)
delta = 0
h++
remaining--
}
delta++
n++
}
return string(output), nil
}
func decodeDigit(x byte) (digit int32, ok bool) {
switch {
case '0' <= x && x <= '9':
return int32(x - ('0' - 26)), true
case 'A' <= x && x <= 'Z':
return int32(x - 'A'), true
case 'a' <= x && x <= 'z':
return int32(x - 'a'), true
}
return 0, false
}
func encodeDigit(digit int32) byte {
switch {
case 0 <= digit && digit < 26:
return byte(digit + 'a')
case 26 <= digit && digit < 36:
return byte(digit + ('0' - 26))
}
panic("idna: internal error in punycode encoding")
}
// adapt is the bias adaptation function specified in section 6.1.
func adapt(delta, numPoints int32, firstTime bool) int32 {
if firstTime {
delta /= damp
} else {
delta /= 2
}
delta += delta / numPoints
k := int32(0)
for delta > ((base-tmin)*tmax)/2 {
delta /= base - tmin
k += base
}
return k + (base-tmin+1)*delta/(delta+skew)
}

Some files were not shown because too many files have changed in this diff Show More