Merge pull request #9538 from hashicorp/f-nomad-provider
provider/nomad: Nomad provider for managing jobs
This commit is contained in:
commit
fd498fbfff
|
@ -0,0 +1,49 @@
|
||||||
|
package nomad
|
||||||
|
|
||||||
|
import (
|
||||||
|
"fmt"
|
||||||
|
|
||||||
|
"github.com/hashicorp/nomad/api"
|
||||||
|
"github.com/hashicorp/terraform/helper/schema"
|
||||||
|
"github.com/hashicorp/terraform/terraform"
|
||||||
|
)
|
||||||
|
|
||||||
|
func Provider() terraform.ResourceProvider {
|
||||||
|
return &schema.Provider{
|
||||||
|
Schema: map[string]*schema.Schema{
|
||||||
|
"address": &schema.Schema{
|
||||||
|
Type: schema.TypeString,
|
||||||
|
Required: true,
|
||||||
|
DefaultFunc: schema.EnvDefaultFunc("NOMAD_ADDR", nil),
|
||||||
|
Description: "URL of the root of the target Nomad agent.",
|
||||||
|
},
|
||||||
|
|
||||||
|
"region": &schema.Schema{
|
||||||
|
Type: schema.TypeString,
|
||||||
|
Optional: true,
|
||||||
|
DefaultFunc: schema.EnvDefaultFunc("NOMAD_REGION", ""),
|
||||||
|
Description: "Region of the target Nomad agent.",
|
||||||
|
},
|
||||||
|
},
|
||||||
|
|
||||||
|
ConfigureFunc: providerConfigure,
|
||||||
|
|
||||||
|
ResourcesMap: map[string]*schema.Resource{
|
||||||
|
"nomad_job": resourceJob(),
|
||||||
|
},
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func providerConfigure(d *schema.ResourceData) (interface{}, error) {
|
||||||
|
config := &api.Config{
|
||||||
|
Address: d.Get("address").(string),
|
||||||
|
Region: d.Get("region").(string),
|
||||||
|
}
|
||||||
|
|
||||||
|
client, err := api.NewClient(config)
|
||||||
|
if err != nil {
|
||||||
|
return nil, fmt.Errorf("failed to configure Nomad API: %s", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
return client, nil
|
||||||
|
}
|
|
@ -0,0 +1,44 @@
|
||||||
|
package nomad
|
||||||
|
|
||||||
|
import (
|
||||||
|
"os"
|
||||||
|
"testing"
|
||||||
|
|
||||||
|
"github.com/hashicorp/terraform/helper/schema"
|
||||||
|
"github.com/hashicorp/terraform/terraform"
|
||||||
|
)
|
||||||
|
|
||||||
|
// How to run the acceptance tests for this provider:
|
||||||
|
//
|
||||||
|
// - Obtain an official Nomad release from https://nomadproject.io
|
||||||
|
// and extract the "nomad" binary
|
||||||
|
//
|
||||||
|
// - Run the following to start the Nomad agent in development mode:
|
||||||
|
// nomad agent -dev
|
||||||
|
//
|
||||||
|
// - Run the Terraform acceptance tests as usual:
|
||||||
|
// make testacc TEST=./builtin/providers/nomad
|
||||||
|
//
|
||||||
|
// The tests expect to be run in a fresh, empty Nomad server.
|
||||||
|
|
||||||
|
func TestProvider(t *testing.T) {
|
||||||
|
if err := Provider().(*schema.Provider).InternalValidate(); err != nil {
|
||||||
|
t.Fatalf("err: %s", err)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
var testProvider *schema.Provider
|
||||||
|
var testProviders map[string]terraform.ResourceProvider
|
||||||
|
|
||||||
|
func init() {
|
||||||
|
testProvider = Provider().(*schema.Provider)
|
||||||
|
testProviders = map[string]terraform.ResourceProvider{
|
||||||
|
"nomad": testProvider,
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func testAccPreCheck(t *testing.T) {
|
||||||
|
if v := os.Getenv("NOMAD_ADDR"); v == "" {
|
||||||
|
os.Setenv("NOMAD_ADDR", "http://127.0.0.1:4646")
|
||||||
|
}
|
||||||
|
}
|
|
@ -0,0 +1,196 @@
|
||||||
|
package nomad
|
||||||
|
|
||||||
|
import (
|
||||||
|
"bytes"
|
||||||
|
"encoding/gob"
|
||||||
|
"fmt"
|
||||||
|
"log"
|
||||||
|
"reflect"
|
||||||
|
"strings"
|
||||||
|
|
||||||
|
"github.com/hashicorp/nomad/api"
|
||||||
|
"github.com/hashicorp/nomad/jobspec"
|
||||||
|
"github.com/hashicorp/nomad/nomad/structs"
|
||||||
|
"github.com/hashicorp/terraform/helper/schema"
|
||||||
|
)
|
||||||
|
|
||||||
|
func resourceJob() *schema.Resource {
|
||||||
|
return &schema.Resource{
|
||||||
|
Create: resourceJobRegister,
|
||||||
|
Update: resourceJobRegister,
|
||||||
|
Delete: resourceJobDeregister,
|
||||||
|
Read: resourceJobRead,
|
||||||
|
Exists: resourceJobExists,
|
||||||
|
|
||||||
|
Schema: map[string]*schema.Schema{
|
||||||
|
"jobspec": {
|
||||||
|
Description: "Job specification. If you want to point to a file use the file() function.",
|
||||||
|
Required: true,
|
||||||
|
Type: schema.TypeString,
|
||||||
|
DiffSuppressFunc: jobspecDiffSuppress,
|
||||||
|
},
|
||||||
|
|
||||||
|
"deregister_on_destroy": {
|
||||||
|
Description: "If true, the job will be deregistered on destroy.",
|
||||||
|
Optional: true,
|
||||||
|
Default: true,
|
||||||
|
Type: schema.TypeBool,
|
||||||
|
},
|
||||||
|
|
||||||
|
"deregister_on_id_change": {
|
||||||
|
Description: "If true, the job will be deregistered when the job ID changes.",
|
||||||
|
Optional: true,
|
||||||
|
Default: true,
|
||||||
|
Type: schema.TypeBool,
|
||||||
|
},
|
||||||
|
},
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func resourceJobRegister(d *schema.ResourceData, meta interface{}) error {
|
||||||
|
client := meta.(*api.Client)
|
||||||
|
|
||||||
|
// Get the jobspec itself
|
||||||
|
jobspecRaw := d.Get("jobspec").(string)
|
||||||
|
|
||||||
|
// Parse it
|
||||||
|
jobspecStruct, err := jobspec.Parse(strings.NewReader(jobspecRaw))
|
||||||
|
if err != nil {
|
||||||
|
return fmt.Errorf("error parsing jobspec: %s", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
// Initialize and validate
|
||||||
|
jobspecStruct.Canonicalize()
|
||||||
|
if err := jobspecStruct.Validate(); err != nil {
|
||||||
|
return fmt.Errorf("Error validating job: %v", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
// If we have an ID and its not equal to this jobspec, then we
|
||||||
|
// have to deregister the old job before we register the new job.
|
||||||
|
prevId := d.Id()
|
||||||
|
if !d.Get("deregister_on_id_change").(bool) {
|
||||||
|
// If we aren't deregistering on ID change, just pretend we
|
||||||
|
// don't have a prior ID.
|
||||||
|
prevId = ""
|
||||||
|
}
|
||||||
|
if prevId != "" && prevId != jobspecStruct.ID {
|
||||||
|
log.Printf(
|
||||||
|
"[INFO] Deregistering %q before registering %q",
|
||||||
|
prevId, jobspecStruct.ID)
|
||||||
|
|
||||||
|
log.Printf("[DEBUG] Deregistering job: %q", prevId)
|
||||||
|
_, _, err := client.Jobs().Deregister(prevId, nil)
|
||||||
|
if err != nil {
|
||||||
|
return fmt.Errorf(
|
||||||
|
"error deregistering previous job %q "+
|
||||||
|
"before registering new job %q: %s",
|
||||||
|
prevId, jobspecStruct.ID, err)
|
||||||
|
}
|
||||||
|
|
||||||
|
// Success! Clear our state.
|
||||||
|
d.SetId("")
|
||||||
|
}
|
||||||
|
|
||||||
|
// Convert it so that we can use it with the API
|
||||||
|
jobspecAPI, err := convertStructJob(jobspecStruct)
|
||||||
|
if err != nil {
|
||||||
|
return fmt.Errorf("error converting jobspec: %s", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
// Register the job
|
||||||
|
_, _, err = client.Jobs().Register(jobspecAPI, nil)
|
||||||
|
if err != nil {
|
||||||
|
return fmt.Errorf("error applying jobspec: %s", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
d.SetId(jobspecAPI.ID)
|
||||||
|
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func resourceJobDeregister(d *schema.ResourceData, meta interface{}) error {
|
||||||
|
client := meta.(*api.Client)
|
||||||
|
|
||||||
|
// If deregistration is disabled, then do nothing
|
||||||
|
if !d.Get("deregister_on_destroy").(bool) {
|
||||||
|
log.Printf(
|
||||||
|
"[WARN] Job %q will not deregister since 'deregister_on_destroy'"+
|
||||||
|
" is false", d.Id())
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
id := d.Id()
|
||||||
|
log.Printf("[DEBUG] Deregistering job: %q", id)
|
||||||
|
_, _, err := client.Jobs().Deregister(id, nil)
|
||||||
|
if err != nil {
|
||||||
|
return fmt.Errorf("error deregistering job: %s", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func resourceJobExists(d *schema.ResourceData, meta interface{}) (bool, error) {
|
||||||
|
client := meta.(*api.Client)
|
||||||
|
|
||||||
|
id := d.Id()
|
||||||
|
log.Printf("[DEBUG] Checking if job exists: %q", id)
|
||||||
|
_, _, err := client.Jobs().Info(id, nil)
|
||||||
|
if err != nil {
|
||||||
|
// As of Nomad 0.4.1, the API client returns an error for 404
|
||||||
|
// rather than a nil result, so we must check this way.
|
||||||
|
if strings.Contains(err.Error(), "404") {
|
||||||
|
return false, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
return true, fmt.Errorf("error checking for job: %#v", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
return true, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func resourceJobRead(d *schema.ResourceData, meta interface{}) error {
|
||||||
|
// We don't do anything at the moment. Exists is used to
|
||||||
|
// remove non-existent jobs but read doesn't have to do anything.
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// convertStructJob is used to take a *structs.Job and convert it to an *api.Job.
|
||||||
|
//
|
||||||
|
// This is unfortunate but it is how Nomad itself does it (this is copied
|
||||||
|
// line for line from Nomad). We'll mimic them exactly to get this done.
|
||||||
|
func convertStructJob(in *structs.Job) (*api.Job, error) {
|
||||||
|
gob.Register([]map[string]interface{}{})
|
||||||
|
gob.Register([]interface{}{})
|
||||||
|
var apiJob *api.Job
|
||||||
|
buf := new(bytes.Buffer)
|
||||||
|
if err := gob.NewEncoder(buf).Encode(in); err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
if err := gob.NewDecoder(buf).Decode(&apiJob); err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
return apiJob, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// jobspecDiffSuppress is the DiffSuppressFunc used by the schema to
|
||||||
|
// check if two jobspecs are equal.
|
||||||
|
func jobspecDiffSuppress(k, old, new string, d *schema.ResourceData) bool {
|
||||||
|
// Parse the old job
|
||||||
|
oldJob, err := jobspec.Parse(strings.NewReader(old))
|
||||||
|
if err != nil {
|
||||||
|
return false
|
||||||
|
}
|
||||||
|
|
||||||
|
// Parse the new job
|
||||||
|
newJob, err := jobspec.Parse(strings.NewReader(new))
|
||||||
|
if err != nil {
|
||||||
|
return false
|
||||||
|
}
|
||||||
|
|
||||||
|
// Init
|
||||||
|
oldJob.Canonicalize()
|
||||||
|
newJob.Canonicalize()
|
||||||
|
|
||||||
|
// Check for jobspec equality
|
||||||
|
return reflect.DeepEqual(oldJob, newJob)
|
||||||
|
}
|
|
@ -0,0 +1,283 @@
|
||||||
|
package nomad
|
||||||
|
|
||||||
|
import (
|
||||||
|
"fmt"
|
||||||
|
"strings"
|
||||||
|
"testing"
|
||||||
|
|
||||||
|
r "github.com/hashicorp/terraform/helper/resource"
|
||||||
|
"github.com/hashicorp/terraform/terraform"
|
||||||
|
|
||||||
|
"github.com/hashicorp/nomad/api"
|
||||||
|
)
|
||||||
|
|
||||||
|
func TestResourceJob_basic(t *testing.T) {
|
||||||
|
r.Test(t, r.TestCase{
|
||||||
|
Providers: testProviders,
|
||||||
|
PreCheck: func() { testAccPreCheck(t) },
|
||||||
|
Steps: []r.TestStep{
|
||||||
|
r.TestStep{
|
||||||
|
Config: testResourceJob_initialConfig,
|
||||||
|
Check: testResourceJob_initialCheck,
|
||||||
|
},
|
||||||
|
},
|
||||||
|
|
||||||
|
CheckDestroy: testResourceJob_checkDestroy("foo"),
|
||||||
|
})
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestResourceJob_refresh(t *testing.T) {
|
||||||
|
r.Test(t, r.TestCase{
|
||||||
|
Providers: testProviders,
|
||||||
|
PreCheck: func() { testAccPreCheck(t) },
|
||||||
|
Steps: []r.TestStep{
|
||||||
|
r.TestStep{
|
||||||
|
Config: testResourceJob_initialConfig,
|
||||||
|
Check: testResourceJob_initialCheck,
|
||||||
|
},
|
||||||
|
|
||||||
|
// This should successfully cause the job to be recreated,
|
||||||
|
// testing the Exists function.
|
||||||
|
r.TestStep{
|
||||||
|
PreConfig: testResourceJob_deregister(t, "foo"),
|
||||||
|
Config: testResourceJob_initialConfig,
|
||||||
|
},
|
||||||
|
},
|
||||||
|
})
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestResourceJob_disableDestroyDeregister(t *testing.T) {
|
||||||
|
r.Test(t, r.TestCase{
|
||||||
|
Providers: testProviders,
|
||||||
|
PreCheck: func() { testAccPreCheck(t) },
|
||||||
|
Steps: []r.TestStep{
|
||||||
|
r.TestStep{
|
||||||
|
Config: testResourceJob_noDestroy,
|
||||||
|
Check: testResourceJob_initialCheck,
|
||||||
|
},
|
||||||
|
|
||||||
|
// Destroy with our setting set
|
||||||
|
r.TestStep{
|
||||||
|
Destroy: true,
|
||||||
|
Config: testResourceJob_noDestroy,
|
||||||
|
Check: testResourceJob_checkExists,
|
||||||
|
},
|
||||||
|
|
||||||
|
// Re-apply without the setting set
|
||||||
|
r.TestStep{
|
||||||
|
Config: testResourceJob_initialConfig,
|
||||||
|
Check: testResourceJob_checkExists,
|
||||||
|
},
|
||||||
|
},
|
||||||
|
})
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestResourceJob_idChange(t *testing.T) {
|
||||||
|
r.Test(t, r.TestCase{
|
||||||
|
Providers: testProviders,
|
||||||
|
PreCheck: func() { testAccPreCheck(t) },
|
||||||
|
Steps: []r.TestStep{
|
||||||
|
r.TestStep{
|
||||||
|
Config: testResourceJob_initialConfig,
|
||||||
|
Check: testResourceJob_initialCheck,
|
||||||
|
},
|
||||||
|
|
||||||
|
// Change our ID
|
||||||
|
r.TestStep{
|
||||||
|
Config: testResourceJob_updateConfig,
|
||||||
|
Check: testResourceJob_updateCheck,
|
||||||
|
},
|
||||||
|
},
|
||||||
|
})
|
||||||
|
}
|
||||||
|
|
||||||
|
var testResourceJob_initialConfig = `
|
||||||
|
resource "nomad_job" "test" {
|
||||||
|
jobspec = <<EOT
|
||||||
|
job "foo" {
|
||||||
|
datacenters = ["dc1"]
|
||||||
|
type = "service"
|
||||||
|
group "foo" {
|
||||||
|
task "foo" {
|
||||||
|
driver = "raw_exec"
|
||||||
|
config {
|
||||||
|
command = "/bin/sleep"
|
||||||
|
args = ["1"]
|
||||||
|
}
|
||||||
|
|
||||||
|
resources {
|
||||||
|
cpu = 20
|
||||||
|
memory = 10
|
||||||
|
disk = 100
|
||||||
|
}
|
||||||
|
|
||||||
|
logs {
|
||||||
|
max_files = 3
|
||||||
|
max_file_size = 10
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
EOT
|
||||||
|
}
|
||||||
|
`
|
||||||
|
|
||||||
|
var testResourceJob_noDestroy = `
|
||||||
|
resource "nomad_job" "test" {
|
||||||
|
deregister_on_destroy = false
|
||||||
|
jobspec = <<EOT
|
||||||
|
job "foo" {
|
||||||
|
datacenters = ["dc1"]
|
||||||
|
type = "service"
|
||||||
|
group "foo" {
|
||||||
|
task "foo" {
|
||||||
|
driver = "raw_exec"
|
||||||
|
config {
|
||||||
|
command = "/bin/sleep"
|
||||||
|
args = ["1"]
|
||||||
|
}
|
||||||
|
|
||||||
|
resources {
|
||||||
|
cpu = 20
|
||||||
|
memory = 10
|
||||||
|
disk = 100
|
||||||
|
}
|
||||||
|
|
||||||
|
logs {
|
||||||
|
max_files = 3
|
||||||
|
max_file_size = 10
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
EOT
|
||||||
|
}
|
||||||
|
`
|
||||||
|
|
||||||
|
func testResourceJob_initialCheck(s *terraform.State) error {
|
||||||
|
resourceState := s.Modules[0].Resources["nomad_job.test"]
|
||||||
|
if resourceState == nil {
|
||||||
|
return fmt.Errorf("resource not found in state")
|
||||||
|
}
|
||||||
|
|
||||||
|
instanceState := resourceState.Primary
|
||||||
|
if instanceState == nil {
|
||||||
|
return fmt.Errorf("resource has no primary instance")
|
||||||
|
}
|
||||||
|
|
||||||
|
jobID := instanceState.ID
|
||||||
|
|
||||||
|
client := testProvider.Meta().(*api.Client)
|
||||||
|
job, _, err := client.Jobs().Info(jobID, nil)
|
||||||
|
if err != nil {
|
||||||
|
return fmt.Errorf("error reading back job: %s", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
if got, want := job.ID, jobID; got != want {
|
||||||
|
return fmt.Errorf("jobID is %q; want %q", got, want)
|
||||||
|
}
|
||||||
|
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func testResourceJob_checkExists(s *terraform.State) error {
|
||||||
|
jobID := "foo"
|
||||||
|
|
||||||
|
client := testProvider.Meta().(*api.Client)
|
||||||
|
_, _, err := client.Jobs().Info(jobID, nil)
|
||||||
|
if err != nil {
|
||||||
|
return fmt.Errorf("error reading back job: %s", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func testResourceJob_checkDestroy(jobID string) r.TestCheckFunc {
|
||||||
|
return func(*terraform.State) error {
|
||||||
|
client := testProvider.Meta().(*api.Client)
|
||||||
|
_, _, err := client.Jobs().Info(jobID, nil)
|
||||||
|
if err != nil && strings.Contains(err.Error(), "404") {
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
if err == nil {
|
||||||
|
err = fmt.Errorf("not destroyed")
|
||||||
|
}
|
||||||
|
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func testResourceJob_deregister(t *testing.T, jobID string) func() {
|
||||||
|
return func() {
|
||||||
|
client := testProvider.Meta().(*api.Client)
|
||||||
|
_, _, err := client.Jobs().Deregister(jobID, nil)
|
||||||
|
if err != nil {
|
||||||
|
t.Fatalf("error deregistering job: %s", err)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
var testResourceJob_updateConfig = `
|
||||||
|
resource "nomad_job" "test" {
|
||||||
|
jobspec = <<EOT
|
||||||
|
job "bar" {
|
||||||
|
datacenters = ["dc1"]
|
||||||
|
type = "service"
|
||||||
|
group "foo" {
|
||||||
|
task "foo" {
|
||||||
|
driver = "raw_exec"
|
||||||
|
config {
|
||||||
|
command = "/bin/sleep"
|
||||||
|
args = ["1"]
|
||||||
|
}
|
||||||
|
|
||||||
|
resources {
|
||||||
|
cpu = 20
|
||||||
|
memory = 10
|
||||||
|
disk = 100
|
||||||
|
}
|
||||||
|
|
||||||
|
logs {
|
||||||
|
max_files = 3
|
||||||
|
max_file_size = 10
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
EOT
|
||||||
|
}
|
||||||
|
`
|
||||||
|
|
||||||
|
func testResourceJob_updateCheck(s *terraform.State) error {
|
||||||
|
resourceState := s.Modules[0].Resources["nomad_job.test"]
|
||||||
|
if resourceState == nil {
|
||||||
|
return fmt.Errorf("resource not found in state")
|
||||||
|
}
|
||||||
|
|
||||||
|
instanceState := resourceState.Primary
|
||||||
|
if instanceState == nil {
|
||||||
|
return fmt.Errorf("resource has no primary instance")
|
||||||
|
}
|
||||||
|
|
||||||
|
jobID := instanceState.ID
|
||||||
|
|
||||||
|
client := testProvider.Meta().(*api.Client)
|
||||||
|
job, _, err := client.Jobs().Info(jobID, nil)
|
||||||
|
if err != nil {
|
||||||
|
return fmt.Errorf("error reading back job: %s", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
if got, want := job.ID, jobID; got != want {
|
||||||
|
return fmt.Errorf("jobID is %q; want %q", got, want)
|
||||||
|
}
|
||||||
|
|
||||||
|
{
|
||||||
|
// Verify foo doesn't exist
|
||||||
|
_, _, err := client.Jobs().Info("foo", nil)
|
||||||
|
if err == nil {
|
||||||
|
return fmt.Errorf("reading foo success")
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
return nil
|
||||||
|
}
|
|
@ -34,6 +34,7 @@ import (
|
||||||
logentriesprovider "github.com/hashicorp/terraform/builtin/providers/logentries"
|
logentriesprovider "github.com/hashicorp/terraform/builtin/providers/logentries"
|
||||||
mailgunprovider "github.com/hashicorp/terraform/builtin/providers/mailgun"
|
mailgunprovider "github.com/hashicorp/terraform/builtin/providers/mailgun"
|
||||||
mysqlprovider "github.com/hashicorp/terraform/builtin/providers/mysql"
|
mysqlprovider "github.com/hashicorp/terraform/builtin/providers/mysql"
|
||||||
|
nomadprovider "github.com/hashicorp/terraform/builtin/providers/nomad"
|
||||||
nullprovider "github.com/hashicorp/terraform/builtin/providers/null"
|
nullprovider "github.com/hashicorp/terraform/builtin/providers/null"
|
||||||
openstackprovider "github.com/hashicorp/terraform/builtin/providers/openstack"
|
openstackprovider "github.com/hashicorp/terraform/builtin/providers/openstack"
|
||||||
packetprovider "github.com/hashicorp/terraform/builtin/providers/packet"
|
packetprovider "github.com/hashicorp/terraform/builtin/providers/packet"
|
||||||
|
@ -93,6 +94,7 @@ var InternalProviders = map[string]plugin.ProviderFunc{
|
||||||
"logentries": logentriesprovider.Provider,
|
"logentries": logentriesprovider.Provider,
|
||||||
"mailgun": mailgunprovider.Provider,
|
"mailgun": mailgunprovider.Provider,
|
||||||
"mysql": mysqlprovider.Provider,
|
"mysql": mysqlprovider.Provider,
|
||||||
|
"nomad": nomadprovider.Provider,
|
||||||
"null": nullprovider.Provider,
|
"null": nullprovider.Provider,
|
||||||
"openstack": openstackprovider.Provider,
|
"openstack": openstackprovider.Provider,
|
||||||
"packet": packetprovider.Provider,
|
"packet": packetprovider.Provider,
|
||||||
|
|
|
@ -0,0 +1,66 @@
|
||||||
|
# 0.10.0
|
||||||
|
|
||||||
|
* feature: Add a test hook (#180)
|
||||||
|
* feature: `ParseLevel` is now case-insensitive (#326)
|
||||||
|
* feature: `FieldLogger` interface that generalizes `Logger` and `Entry` (#308)
|
||||||
|
* performance: avoid re-allocations on `WithFields` (#335)
|
||||||
|
|
||||||
|
# 0.9.0
|
||||||
|
|
||||||
|
* logrus/text_formatter: don't emit empty msg
|
||||||
|
* logrus/hooks/airbrake: move out of main repository
|
||||||
|
* logrus/hooks/sentry: move out of main repository
|
||||||
|
* logrus/hooks/papertrail: move out of main repository
|
||||||
|
* logrus/hooks/bugsnag: move out of main repository
|
||||||
|
* logrus/core: run tests with `-race`
|
||||||
|
* logrus/core: detect TTY based on `stderr`
|
||||||
|
* logrus/core: support `WithError` on logger
|
||||||
|
* logrus/core: Solaris support
|
||||||
|
|
||||||
|
# 0.8.7
|
||||||
|
|
||||||
|
* logrus/core: fix possible race (#216)
|
||||||
|
* logrus/doc: small typo fixes and doc improvements
|
||||||
|
|
||||||
|
|
||||||
|
# 0.8.6
|
||||||
|
|
||||||
|
* hooks/raven: allow passing an initialized client
|
||||||
|
|
||||||
|
# 0.8.5
|
||||||
|
|
||||||
|
* logrus/core: revert #208
|
||||||
|
|
||||||
|
# 0.8.4
|
||||||
|
|
||||||
|
* formatter/text: fix data race (#218)
|
||||||
|
|
||||||
|
# 0.8.3
|
||||||
|
|
||||||
|
* logrus/core: fix entry log level (#208)
|
||||||
|
* logrus/core: improve performance of text formatter by 40%
|
||||||
|
* logrus/core: expose `LevelHooks` type
|
||||||
|
* logrus/core: add support for DragonflyBSD and NetBSD
|
||||||
|
* formatter/text: print structs more verbosely
|
||||||
|
|
||||||
|
# 0.8.2
|
||||||
|
|
||||||
|
* logrus: fix more Fatal family functions
|
||||||
|
|
||||||
|
# 0.8.1
|
||||||
|
|
||||||
|
* logrus: fix not exiting on `Fatalf` and `Fatalln`
|
||||||
|
|
||||||
|
# 0.8.0
|
||||||
|
|
||||||
|
* logrus: defaults to stderr instead of stdout
|
||||||
|
* hooks/sentry: add special field for `*http.Request`
|
||||||
|
* formatter/text: ignore Windows for colors
|
||||||
|
|
||||||
|
# 0.7.3
|
||||||
|
|
||||||
|
* formatter/\*: allow configuration of timestamp layout
|
||||||
|
|
||||||
|
# 0.7.2
|
||||||
|
|
||||||
|
* formatter/text: Add configuration option for time format (#158)
|
|
@ -0,0 +1,21 @@
|
||||||
|
The MIT License (MIT)
|
||||||
|
|
||||||
|
Copyright (c) 2014 Simon Eskildsen
|
||||||
|
|
||||||
|
Permission is hereby granted, free of charge, to any person obtaining a copy
|
||||||
|
of this software and associated documentation files (the "Software"), to deal
|
||||||
|
in the Software without restriction, including without limitation the rights
|
||||||
|
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
|
||||||
|
copies of the Software, and to permit persons to whom the Software is
|
||||||
|
furnished to do so, subject to the following conditions:
|
||||||
|
|
||||||
|
The above copyright notice and this permission notice shall be included in
|
||||||
|
all copies or substantial portions of the Software.
|
||||||
|
|
||||||
|
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
|
||||||
|
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
|
||||||
|
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
|
||||||
|
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
|
||||||
|
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
|
||||||
|
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
|
||||||
|
THE SOFTWARE.
|
|
@ -0,0 +1,388 @@
|
||||||
|
# Logrus <img src="http://i.imgur.com/hTeVwmJ.png" width="40" height="40" alt=":walrus:" class="emoji" title=":walrus:"/> [![Build Status](https://travis-ci.org/Sirupsen/logrus.svg?branch=master)](https://travis-ci.org/Sirupsen/logrus) [![GoDoc](https://godoc.org/github.com/Sirupsen/logrus?status.svg)](https://godoc.org/github.com/Sirupsen/logrus)
|
||||||
|
|
||||||
|
Logrus is a structured logger for Go (golang), completely API compatible with
|
||||||
|
the standard library logger. [Godoc][godoc]. **Please note the Logrus API is not
|
||||||
|
yet stable (pre 1.0). Logrus itself is completely stable and has been used in
|
||||||
|
many large deployments. The core API is unlikely to change much but please
|
||||||
|
version control your Logrus to make sure you aren't fetching latest `master` on
|
||||||
|
every build.**
|
||||||
|
|
||||||
|
Nicely color-coded in development (when a TTY is attached, otherwise just
|
||||||
|
plain text):
|
||||||
|
|
||||||
|
![Colored](http://i.imgur.com/PY7qMwd.png)
|
||||||
|
|
||||||
|
With `log.SetFormatter(&log.JSONFormatter{})`, for easy parsing by logstash
|
||||||
|
or Splunk:
|
||||||
|
|
||||||
|
```json
|
||||||
|
{"animal":"walrus","level":"info","msg":"A group of walrus emerges from the
|
||||||
|
ocean","size":10,"time":"2014-03-10 19:57:38.562264131 -0400 EDT"}
|
||||||
|
|
||||||
|
{"level":"warning","msg":"The group's number increased tremendously!",
|
||||||
|
"number":122,"omg":true,"time":"2014-03-10 19:57:38.562471297 -0400 EDT"}
|
||||||
|
|
||||||
|
{"animal":"walrus","level":"info","msg":"A giant walrus appears!",
|
||||||
|
"size":10,"time":"2014-03-10 19:57:38.562500591 -0400 EDT"}
|
||||||
|
|
||||||
|
{"animal":"walrus","level":"info","msg":"Tremendously sized cow enters the ocean.",
|
||||||
|
"size":9,"time":"2014-03-10 19:57:38.562527896 -0400 EDT"}
|
||||||
|
|
||||||
|
{"level":"fatal","msg":"The ice breaks!","number":100,"omg":true,
|
||||||
|
"time":"2014-03-10 19:57:38.562543128 -0400 EDT"}
|
||||||
|
```
|
||||||
|
|
||||||
|
With the default `log.SetFormatter(&log.TextFormatter{})` when a TTY is not
|
||||||
|
attached, the output is compatible with the
|
||||||
|
[logfmt](http://godoc.org/github.com/kr/logfmt) format:
|
||||||
|
|
||||||
|
```text
|
||||||
|
time="2015-03-26T01:27:38-04:00" level=debug msg="Started observing beach" animal=walrus number=8
|
||||||
|
time="2015-03-26T01:27:38-04:00" level=info msg="A group of walrus emerges from the ocean" animal=walrus size=10
|
||||||
|
time="2015-03-26T01:27:38-04:00" level=warning msg="The group's number increased tremendously!" number=122 omg=true
|
||||||
|
time="2015-03-26T01:27:38-04:00" level=debug msg="Temperature changes" temperature=-4
|
||||||
|
time="2015-03-26T01:27:38-04:00" level=panic msg="It's over 9000!" animal=orca size=9009
|
||||||
|
time="2015-03-26T01:27:38-04:00" level=fatal msg="The ice breaks!" err=&{0x2082280c0 map[animal:orca size:9009] 2015-03-26 01:27:38.441574009 -0400 EDT panic It's over 9000!} number=100 omg=true
|
||||||
|
exit status 1
|
||||||
|
```
|
||||||
|
|
||||||
|
#### Example
|
||||||
|
|
||||||
|
The simplest way to use Logrus is simply the package-level exported logger:
|
||||||
|
|
||||||
|
```go
|
||||||
|
package main
|
||||||
|
|
||||||
|
import (
|
||||||
|
log "github.com/Sirupsen/logrus"
|
||||||
|
)
|
||||||
|
|
||||||
|
func main() {
|
||||||
|
log.WithFields(log.Fields{
|
||||||
|
"animal": "walrus",
|
||||||
|
}).Info("A walrus appears")
|
||||||
|
}
|
||||||
|
```
|
||||||
|
|
||||||
|
Note that it's completely api-compatible with the stdlib logger, so you can
|
||||||
|
replace your `log` imports everywhere with `log "github.com/Sirupsen/logrus"`
|
||||||
|
and you'll now have the flexibility of Logrus. You can customize it all you
|
||||||
|
want:
|
||||||
|
|
||||||
|
```go
|
||||||
|
package main
|
||||||
|
|
||||||
|
import (
|
||||||
|
"os"
|
||||||
|
log "github.com/Sirupsen/logrus"
|
||||||
|
)
|
||||||
|
|
||||||
|
func init() {
|
||||||
|
// Log as JSON instead of the default ASCII formatter.
|
||||||
|
log.SetFormatter(&log.JSONFormatter{})
|
||||||
|
|
||||||
|
// Output to stderr instead of stdout, could also be a file.
|
||||||
|
log.SetOutput(os.Stderr)
|
||||||
|
|
||||||
|
// Only log the warning severity or above.
|
||||||
|
log.SetLevel(log.WarnLevel)
|
||||||
|
}
|
||||||
|
|
||||||
|
func main() {
|
||||||
|
log.WithFields(log.Fields{
|
||||||
|
"animal": "walrus",
|
||||||
|
"size": 10,
|
||||||
|
}).Info("A group of walrus emerges from the ocean")
|
||||||
|
|
||||||
|
log.WithFields(log.Fields{
|
||||||
|
"omg": true,
|
||||||
|
"number": 122,
|
||||||
|
}).Warn("The group's number increased tremendously!")
|
||||||
|
|
||||||
|
log.WithFields(log.Fields{
|
||||||
|
"omg": true,
|
||||||
|
"number": 100,
|
||||||
|
}).Fatal("The ice breaks!")
|
||||||
|
|
||||||
|
// A common pattern is to re-use fields between logging statements by re-using
|
||||||
|
// the logrus.Entry returned from WithFields()
|
||||||
|
contextLogger := log.WithFields(log.Fields{
|
||||||
|
"common": "this is a common field",
|
||||||
|
"other": "I also should be logged always",
|
||||||
|
})
|
||||||
|
|
||||||
|
contextLogger.Info("I'll be logged with common and other field")
|
||||||
|
contextLogger.Info("Me too")
|
||||||
|
}
|
||||||
|
```
|
||||||
|
|
||||||
|
For more advanced usage such as logging to multiple locations from the same
|
||||||
|
application, you can also create an instance of the `logrus` Logger:
|
||||||
|
|
||||||
|
```go
|
||||||
|
package main
|
||||||
|
|
||||||
|
import (
|
||||||
|
"github.com/Sirupsen/logrus"
|
||||||
|
)
|
||||||
|
|
||||||
|
// Create a new instance of the logger. You can have any number of instances.
|
||||||
|
var log = logrus.New()
|
||||||
|
|
||||||
|
func main() {
|
||||||
|
// The API for setting attributes is a little different than the package level
|
||||||
|
// exported logger. See Godoc.
|
||||||
|
log.Out = os.Stderr
|
||||||
|
|
||||||
|
log.WithFields(logrus.Fields{
|
||||||
|
"animal": "walrus",
|
||||||
|
"size": 10,
|
||||||
|
}).Info("A group of walrus emerges from the ocean")
|
||||||
|
}
|
||||||
|
```
|
||||||
|
|
||||||
|
#### Fields
|
||||||
|
|
||||||
|
Logrus encourages careful, structured logging though logging fields instead of
|
||||||
|
long, unparseable error messages. For example, instead of: `log.Fatalf("Failed
|
||||||
|
to send event %s to topic %s with key %d")`, you should log the much more
|
||||||
|
discoverable:
|
||||||
|
|
||||||
|
```go
|
||||||
|
log.WithFields(log.Fields{
|
||||||
|
"event": event,
|
||||||
|
"topic": topic,
|
||||||
|
"key": key,
|
||||||
|
}).Fatal("Failed to send event")
|
||||||
|
```
|
||||||
|
|
||||||
|
We've found this API forces you to think about logging in a way that produces
|
||||||
|
much more useful logging messages. We've been in countless situations where just
|
||||||
|
a single added field to a log statement that was already there would've saved us
|
||||||
|
hours. The `WithFields` call is optional.
|
||||||
|
|
||||||
|
In general, with Logrus using any of the `printf`-family functions should be
|
||||||
|
seen as a hint you should add a field, however, you can still use the
|
||||||
|
`printf`-family functions with Logrus.
|
||||||
|
|
||||||
|
#### Hooks
|
||||||
|
|
||||||
|
You can add hooks for logging levels. For example to send errors to an exception
|
||||||
|
tracking service on `Error`, `Fatal` and `Panic`, info to StatsD or log to
|
||||||
|
multiple places simultaneously, e.g. syslog.
|
||||||
|
|
||||||
|
Logrus comes with [built-in hooks](hooks/). Add those, or your custom hook, in
|
||||||
|
`init`:
|
||||||
|
|
||||||
|
```go
|
||||||
|
import (
|
||||||
|
log "github.com/Sirupsen/logrus"
|
||||||
|
"gopkg.in/gemnasium/logrus-airbrake-hook.v2" // the package is named "aibrake"
|
||||||
|
logrus_syslog "github.com/Sirupsen/logrus/hooks/syslog"
|
||||||
|
"log/syslog"
|
||||||
|
)
|
||||||
|
|
||||||
|
func init() {
|
||||||
|
|
||||||
|
// Use the Airbrake hook to report errors that have Error severity or above to
|
||||||
|
// an exception tracker. You can create custom hooks, see the Hooks section.
|
||||||
|
log.AddHook(airbrake.NewHook(123, "xyz", "production"))
|
||||||
|
|
||||||
|
hook, err := logrus_syslog.NewSyslogHook("udp", "localhost:514", syslog.LOG_INFO, "")
|
||||||
|
if err != nil {
|
||||||
|
log.Error("Unable to connect to local syslog daemon")
|
||||||
|
} else {
|
||||||
|
log.AddHook(hook)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
```
|
||||||
|
Note: Syslog hook also support connecting to local syslog (Ex. "/dev/log" or "/var/run/syslog" or "/var/run/log"). For the detail, please check the [syslog hook README](hooks/syslog/README.md).
|
||||||
|
|
||||||
|
| Hook | Description |
|
||||||
|
| ----- | ----------- |
|
||||||
|
| [Airbrake](https://github.com/gemnasium/logrus-airbrake-hook) | Send errors to the Airbrake API V3. Uses the official [`gobrake`](https://github.com/airbrake/gobrake) behind the scenes. |
|
||||||
|
| [Airbrake "legacy"](https://github.com/gemnasium/logrus-airbrake-legacy-hook) | Send errors to an exception tracking service compatible with the Airbrake API V2. Uses [`airbrake-go`](https://github.com/tobi/airbrake-go) behind the scenes. |
|
||||||
|
| [Papertrail](https://github.com/polds/logrus-papertrail-hook) | Send errors to the [Papertrail](https://papertrailapp.com) hosted logging service via UDP. |
|
||||||
|
| [Syslog](https://github.com/Sirupsen/logrus/blob/master/hooks/syslog/syslog.go) | Send errors to remote syslog server. Uses standard library `log/syslog` behind the scenes. |
|
||||||
|
| [Bugsnag](https://github.com/Shopify/logrus-bugsnag/blob/master/bugsnag.go) | Send errors to the Bugsnag exception tracking service. |
|
||||||
|
| [Sentry](https://github.com/evalphobia/logrus_sentry) | Send errors to the Sentry error logging and aggregation service. |
|
||||||
|
| [Hiprus](https://github.com/nubo/hiprus) | Send errors to a channel in hipchat. |
|
||||||
|
| [Logrusly](https://github.com/sebest/logrusly) | Send logs to [Loggly](https://www.loggly.com/) |
|
||||||
|
| [Slackrus](https://github.com/johntdyer/slackrus) | Hook for Slack chat. |
|
||||||
|
| [Journalhook](https://github.com/wercker/journalhook) | Hook for logging to `systemd-journald` |
|
||||||
|
| [Graylog](https://github.com/gemnasium/logrus-graylog-hook) | Hook for logging to [Graylog](http://graylog2.org/) |
|
||||||
|
| [Raygun](https://github.com/squirkle/logrus-raygun-hook) | Hook for logging to [Raygun.io](http://raygun.io/) |
|
||||||
|
| [LFShook](https://github.com/rifflock/lfshook) | Hook for logging to the local filesystem |
|
||||||
|
| [Honeybadger](https://github.com/agonzalezro/logrus_honeybadger) | Hook for sending exceptions to Honeybadger |
|
||||||
|
| [Mail](https://github.com/zbindenren/logrus_mail) | Hook for sending exceptions via mail |
|
||||||
|
| [Rollrus](https://github.com/heroku/rollrus) | Hook for sending errors to rollbar |
|
||||||
|
| [Fluentd](https://github.com/evalphobia/logrus_fluent) | Hook for logging to fluentd |
|
||||||
|
| [Mongodb](https://github.com/weekface/mgorus) | Hook for logging to mongodb |
|
||||||
|
| [InfluxDB](https://github.com/Abramovic/logrus_influxdb) | Hook for logging to influxdb |
|
||||||
|
| [Octokit](https://github.com/dorajistyle/logrus-octokit-hook) | Hook for logging to github via octokit |
|
||||||
|
| [DeferPanic](https://github.com/deferpanic/dp-logrus) | Hook for logging to DeferPanic |
|
||||||
|
| [Redis-Hook](https://github.com/rogierlommers/logrus-redis-hook) | Hook for logging to a ELK stack (through Redis) |
|
||||||
|
| [Amqp-Hook](https://github.com/vladoatanasov/logrus_amqp) | Hook for logging to Amqp broker (Like RabbitMQ) |
|
||||||
|
| [KafkaLogrus](https://github.com/goibibo/KafkaLogrus) | Hook for logging to kafka |
|
||||||
|
| [Typetalk](https://github.com/dragon3/logrus-typetalk-hook) | Hook for logging to [Typetalk](https://www.typetalk.in/) |
|
||||||
|
| [ElasticSearch](https://github.com/sohlich/elogrus) | Hook for logging to ElasticSearch|
|
||||||
|
|
||||||
|
|
||||||
|
#### Level logging
|
||||||
|
|
||||||
|
Logrus has six logging levels: Debug, Info, Warning, Error, Fatal and Panic.
|
||||||
|
|
||||||
|
```go
|
||||||
|
log.Debug("Useful debugging information.")
|
||||||
|
log.Info("Something noteworthy happened!")
|
||||||
|
log.Warn("You should probably take a look at this.")
|
||||||
|
log.Error("Something failed but I'm not quitting.")
|
||||||
|
// Calls os.Exit(1) after logging
|
||||||
|
log.Fatal("Bye.")
|
||||||
|
// Calls panic() after logging
|
||||||
|
log.Panic("I'm bailing.")
|
||||||
|
```
|
||||||
|
|
||||||
|
You can set the logging level on a `Logger`, then it will only log entries with
|
||||||
|
that severity or anything above it:
|
||||||
|
|
||||||
|
```go
|
||||||
|
// Will log anything that is info or above (warn, error, fatal, panic). Default.
|
||||||
|
log.SetLevel(log.InfoLevel)
|
||||||
|
```
|
||||||
|
|
||||||
|
It may be useful to set `log.Level = logrus.DebugLevel` in a debug or verbose
|
||||||
|
environment if your application has that.
|
||||||
|
|
||||||
|
#### Entries
|
||||||
|
|
||||||
|
Besides the fields added with `WithField` or `WithFields` some fields are
|
||||||
|
automatically added to all logging events:
|
||||||
|
|
||||||
|
1. `time`. The timestamp when the entry was created.
|
||||||
|
2. `msg`. The logging message passed to `{Info,Warn,Error,Fatal,Panic}` after
|
||||||
|
the `AddFields` call. E.g. `Failed to send event.`
|
||||||
|
3. `level`. The logging level. E.g. `info`.
|
||||||
|
|
||||||
|
#### Environments
|
||||||
|
|
||||||
|
Logrus has no notion of environment.
|
||||||
|
|
||||||
|
If you wish for hooks and formatters to only be used in specific environments,
|
||||||
|
you should handle that yourself. For example, if your application has a global
|
||||||
|
variable `Environment`, which is a string representation of the environment you
|
||||||
|
could do:
|
||||||
|
|
||||||
|
```go
|
||||||
|
import (
|
||||||
|
log "github.com/Sirupsen/logrus"
|
||||||
|
)
|
||||||
|
|
||||||
|
init() {
|
||||||
|
// do something here to set environment depending on an environment variable
|
||||||
|
// or command-line flag
|
||||||
|
if Environment == "production" {
|
||||||
|
log.SetFormatter(&log.JSONFormatter{})
|
||||||
|
} else {
|
||||||
|
// The TextFormatter is default, you don't actually have to do this.
|
||||||
|
log.SetFormatter(&log.TextFormatter{})
|
||||||
|
}
|
||||||
|
}
|
||||||
|
```
|
||||||
|
|
||||||
|
This configuration is how `logrus` was intended to be used, but JSON in
|
||||||
|
production is mostly only useful if you do log aggregation with tools like
|
||||||
|
Splunk or Logstash.
|
||||||
|
|
||||||
|
#### Formatters
|
||||||
|
|
||||||
|
The built-in logging formatters are:
|
||||||
|
|
||||||
|
* `logrus.TextFormatter`. Logs the event in colors if stdout is a tty, otherwise
|
||||||
|
without colors.
|
||||||
|
* *Note:* to force colored output when there is no TTY, set the `ForceColors`
|
||||||
|
field to `true`. To force no colored output even if there is a TTY set the
|
||||||
|
`DisableColors` field to `true`
|
||||||
|
* `logrus.JSONFormatter`. Logs fields as JSON.
|
||||||
|
* `logrus/formatters/logstash.LogstashFormatter`. Logs fields as [Logstash](http://logstash.net) Events.
|
||||||
|
|
||||||
|
```go
|
||||||
|
logrus.SetFormatter(&logstash.LogstashFormatter{Type: "application_name"})
|
||||||
|
```
|
||||||
|
|
||||||
|
Third party logging formatters:
|
||||||
|
|
||||||
|
* [`prefixed`](https://github.com/x-cray/logrus-prefixed-formatter). Displays log entry source along with alternative layout.
|
||||||
|
* [`zalgo`](https://github.com/aybabtme/logzalgo). Invoking the P͉̫o̳̼̊w̖͈̰͎e̬͔̭͂r͚̼̹̲ ̫͓͉̳͈ō̠͕͖̚f̝͍̠ ͕̲̞͖͑Z̖̫̤̫ͪa͉̬͈̗l͖͎g̳̥o̰̥̅!̣͔̲̻͊̄ ̙̘̦̹̦.
|
||||||
|
|
||||||
|
You can define your formatter by implementing the `Formatter` interface,
|
||||||
|
requiring a `Format` method. `Format` takes an `*Entry`. `entry.Data` is a
|
||||||
|
`Fields` type (`map[string]interface{}`) with all your fields as well as the
|
||||||
|
default ones (see Entries section above):
|
||||||
|
|
||||||
|
```go
|
||||||
|
type MyJSONFormatter struct {
|
||||||
|
}
|
||||||
|
|
||||||
|
log.SetFormatter(new(MyJSONFormatter))
|
||||||
|
|
||||||
|
func (f *MyJSONFormatter) Format(entry *Entry) ([]byte, error) {
|
||||||
|
// Note this doesn't include Time, Level and Message which are available on
|
||||||
|
// the Entry. Consult `godoc` on information about those fields or read the
|
||||||
|
// source of the official loggers.
|
||||||
|
serialized, err := json.Marshal(entry.Data)
|
||||||
|
if err != nil {
|
||||||
|
return nil, fmt.Errorf("Failed to marshal fields to JSON, %v", err)
|
||||||
|
}
|
||||||
|
return append(serialized, '\n'), nil
|
||||||
|
}
|
||||||
|
```
|
||||||
|
|
||||||
|
#### Logger as an `io.Writer`
|
||||||
|
|
||||||
|
Logrus can be transformed into an `io.Writer`. That writer is the end of an `io.Pipe` and it is your responsibility to close it.
|
||||||
|
|
||||||
|
```go
|
||||||
|
w := logger.Writer()
|
||||||
|
defer w.Close()
|
||||||
|
|
||||||
|
srv := http.Server{
|
||||||
|
// create a stdlib log.Logger that writes to
|
||||||
|
// logrus.Logger.
|
||||||
|
ErrorLog: log.New(w, "", 0),
|
||||||
|
}
|
||||||
|
```
|
||||||
|
|
||||||
|
Each line written to that writer will be printed the usual way, using formatters
|
||||||
|
and hooks. The level for those entries is `info`.
|
||||||
|
|
||||||
|
#### Rotation
|
||||||
|
|
||||||
|
Log rotation is not provided with Logrus. Log rotation should be done by an
|
||||||
|
external program (like `logrotate(8)`) that can compress and delete old log
|
||||||
|
entries. It should not be a feature of the application-level logger.
|
||||||
|
|
||||||
|
#### Tools
|
||||||
|
|
||||||
|
| Tool | Description |
|
||||||
|
| ---- | ----------- |
|
||||||
|
|[Logrus Mate](https://github.com/gogap/logrus_mate)|Logrus mate is a tool for Logrus to manage loggers, you can initial logger's level, hook and formatter by config file, the logger will generated with different config at different environment.|
|
||||||
|
|
||||||
|
#### Testing
|
||||||
|
|
||||||
|
Logrus has a built in facility for asserting the presence of log messages. This is implemented through the `test` hook and provides:
|
||||||
|
|
||||||
|
* decorators for existing logger (`test.NewLocal` and `test.NewGlobal`) which basically just add the `test` hook
|
||||||
|
* a test logger (`test.NewNullLogger`) that just records log messages (and does not output any):
|
||||||
|
|
||||||
|
```go
|
||||||
|
logger, hook := NewNullLogger()
|
||||||
|
logger.Error("Hello error")
|
||||||
|
|
||||||
|
assert.Equal(1, len(hook.Entries))
|
||||||
|
assert.Equal(logrus.ErrorLevel, hook.LastEntry().Level)
|
||||||
|
assert.Equal("Hello error", hook.LastEntry().Message)
|
||||||
|
|
||||||
|
hook.Reset()
|
||||||
|
assert.Nil(hook.LastEntry())
|
||||||
|
```
|
|
@ -0,0 +1,26 @@
|
||||||
|
/*
|
||||||
|
Package logrus is a structured logger for Go, completely API compatible with the standard library logger.
|
||||||
|
|
||||||
|
|
||||||
|
The simplest way to use Logrus is simply the package-level exported logger:
|
||||||
|
|
||||||
|
package main
|
||||||
|
|
||||||
|
import (
|
||||||
|
log "github.com/Sirupsen/logrus"
|
||||||
|
)
|
||||||
|
|
||||||
|
func main() {
|
||||||
|
log.WithFields(log.Fields{
|
||||||
|
"animal": "walrus",
|
||||||
|
"number": 1,
|
||||||
|
"size": 10,
|
||||||
|
}).Info("A walrus appears")
|
||||||
|
}
|
||||||
|
|
||||||
|
Output:
|
||||||
|
time="2015-09-07T08:48:33Z" level=info msg="A walrus appears" animal=walrus number=1 size=10
|
||||||
|
|
||||||
|
For a full guide visit https://github.com/Sirupsen/logrus
|
||||||
|
*/
|
||||||
|
package logrus
|
|
@ -0,0 +1,264 @@
|
||||||
|
package logrus
|
||||||
|
|
||||||
|
import (
|
||||||
|
"bytes"
|
||||||
|
"fmt"
|
||||||
|
"io"
|
||||||
|
"os"
|
||||||
|
"time"
|
||||||
|
)
|
||||||
|
|
||||||
|
// Defines the key when adding errors using WithError.
|
||||||
|
var ErrorKey = "error"
|
||||||
|
|
||||||
|
// An entry is the final or intermediate Logrus logging entry. It contains all
|
||||||
|
// the fields passed with WithField{,s}. It's finally logged when Debug, Info,
|
||||||
|
// Warn, Error, Fatal or Panic is called on it. These objects can be reused and
|
||||||
|
// passed around as much as you wish to avoid field duplication.
|
||||||
|
type Entry struct {
|
||||||
|
Logger *Logger
|
||||||
|
|
||||||
|
// Contains all the fields set by the user.
|
||||||
|
Data Fields
|
||||||
|
|
||||||
|
// Time at which the log entry was created
|
||||||
|
Time time.Time
|
||||||
|
|
||||||
|
// Level the log entry was logged at: Debug, Info, Warn, Error, Fatal or Panic
|
||||||
|
Level Level
|
||||||
|
|
||||||
|
// Message passed to Debug, Info, Warn, Error, Fatal or Panic
|
||||||
|
Message string
|
||||||
|
}
|
||||||
|
|
||||||
|
func NewEntry(logger *Logger) *Entry {
|
||||||
|
return &Entry{
|
||||||
|
Logger: logger,
|
||||||
|
// Default is three fields, give a little extra room
|
||||||
|
Data: make(Fields, 5),
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// Returns a reader for the entry, which is a proxy to the formatter.
|
||||||
|
func (entry *Entry) Reader() (*bytes.Buffer, error) {
|
||||||
|
serialized, err := entry.Logger.Formatter.Format(entry)
|
||||||
|
return bytes.NewBuffer(serialized), err
|
||||||
|
}
|
||||||
|
|
||||||
|
// Returns the string representation from the reader and ultimately the
|
||||||
|
// formatter.
|
||||||
|
func (entry *Entry) String() (string, error) {
|
||||||
|
reader, err := entry.Reader()
|
||||||
|
if err != nil {
|
||||||
|
return "", err
|
||||||
|
}
|
||||||
|
|
||||||
|
return reader.String(), err
|
||||||
|
}
|
||||||
|
|
||||||
|
// Add an error as single field (using the key defined in ErrorKey) to the Entry.
|
||||||
|
func (entry *Entry) WithError(err error) *Entry {
|
||||||
|
return entry.WithField(ErrorKey, err)
|
||||||
|
}
|
||||||
|
|
||||||
|
// Add a single field to the Entry.
|
||||||
|
func (entry *Entry) WithField(key string, value interface{}) *Entry {
|
||||||
|
return entry.WithFields(Fields{key: value})
|
||||||
|
}
|
||||||
|
|
||||||
|
// Add a map of fields to the Entry.
|
||||||
|
func (entry *Entry) WithFields(fields Fields) *Entry {
|
||||||
|
data := make(Fields, len(entry.Data)+len(fields))
|
||||||
|
for k, v := range entry.Data {
|
||||||
|
data[k] = v
|
||||||
|
}
|
||||||
|
for k, v := range fields {
|
||||||
|
data[k] = v
|
||||||
|
}
|
||||||
|
return &Entry{Logger: entry.Logger, Data: data}
|
||||||
|
}
|
||||||
|
|
||||||
|
// This function is not declared with a pointer value because otherwise
|
||||||
|
// race conditions will occur when using multiple goroutines
|
||||||
|
func (entry Entry) log(level Level, msg string) {
|
||||||
|
entry.Time = time.Now()
|
||||||
|
entry.Level = level
|
||||||
|
entry.Message = msg
|
||||||
|
|
||||||
|
if err := entry.Logger.Hooks.Fire(level, &entry); err != nil {
|
||||||
|
entry.Logger.mu.Lock()
|
||||||
|
fmt.Fprintf(os.Stderr, "Failed to fire hook: %v\n", err)
|
||||||
|
entry.Logger.mu.Unlock()
|
||||||
|
}
|
||||||
|
|
||||||
|
reader, err := entry.Reader()
|
||||||
|
if err != nil {
|
||||||
|
entry.Logger.mu.Lock()
|
||||||
|
fmt.Fprintf(os.Stderr, "Failed to obtain reader, %v\n", err)
|
||||||
|
entry.Logger.mu.Unlock()
|
||||||
|
}
|
||||||
|
|
||||||
|
entry.Logger.mu.Lock()
|
||||||
|
defer entry.Logger.mu.Unlock()
|
||||||
|
|
||||||
|
_, err = io.Copy(entry.Logger.Out, reader)
|
||||||
|
if err != nil {
|
||||||
|
fmt.Fprintf(os.Stderr, "Failed to write to log, %v\n", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
// To avoid Entry#log() returning a value that only would make sense for
|
||||||
|
// panic() to use in Entry#Panic(), we avoid the allocation by checking
|
||||||
|
// directly here.
|
||||||
|
if level <= PanicLevel {
|
||||||
|
panic(&entry)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func (entry *Entry) Debug(args ...interface{}) {
|
||||||
|
if entry.Logger.Level >= DebugLevel {
|
||||||
|
entry.log(DebugLevel, fmt.Sprint(args...))
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func (entry *Entry) Print(args ...interface{}) {
|
||||||
|
entry.Info(args...)
|
||||||
|
}
|
||||||
|
|
||||||
|
func (entry *Entry) Info(args ...interface{}) {
|
||||||
|
if entry.Logger.Level >= InfoLevel {
|
||||||
|
entry.log(InfoLevel, fmt.Sprint(args...))
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func (entry *Entry) Warn(args ...interface{}) {
|
||||||
|
if entry.Logger.Level >= WarnLevel {
|
||||||
|
entry.log(WarnLevel, fmt.Sprint(args...))
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func (entry *Entry) Warning(args ...interface{}) {
|
||||||
|
entry.Warn(args...)
|
||||||
|
}
|
||||||
|
|
||||||
|
func (entry *Entry) Error(args ...interface{}) {
|
||||||
|
if entry.Logger.Level >= ErrorLevel {
|
||||||
|
entry.log(ErrorLevel, fmt.Sprint(args...))
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func (entry *Entry) Fatal(args ...interface{}) {
|
||||||
|
if entry.Logger.Level >= FatalLevel {
|
||||||
|
entry.log(FatalLevel, fmt.Sprint(args...))
|
||||||
|
}
|
||||||
|
os.Exit(1)
|
||||||
|
}
|
||||||
|
|
||||||
|
func (entry *Entry) Panic(args ...interface{}) {
|
||||||
|
if entry.Logger.Level >= PanicLevel {
|
||||||
|
entry.log(PanicLevel, fmt.Sprint(args...))
|
||||||
|
}
|
||||||
|
panic(fmt.Sprint(args...))
|
||||||
|
}
|
||||||
|
|
||||||
|
// Entry Printf family functions
|
||||||
|
|
||||||
|
func (entry *Entry) Debugf(format string, args ...interface{}) {
|
||||||
|
if entry.Logger.Level >= DebugLevel {
|
||||||
|
entry.Debug(fmt.Sprintf(format, args...))
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func (entry *Entry) Infof(format string, args ...interface{}) {
|
||||||
|
if entry.Logger.Level >= InfoLevel {
|
||||||
|
entry.Info(fmt.Sprintf(format, args...))
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func (entry *Entry) Printf(format string, args ...interface{}) {
|
||||||
|
entry.Infof(format, args...)
|
||||||
|
}
|
||||||
|
|
||||||
|
func (entry *Entry) Warnf(format string, args ...interface{}) {
|
||||||
|
if entry.Logger.Level >= WarnLevel {
|
||||||
|
entry.Warn(fmt.Sprintf(format, args...))
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func (entry *Entry) Warningf(format string, args ...interface{}) {
|
||||||
|
entry.Warnf(format, args...)
|
||||||
|
}
|
||||||
|
|
||||||
|
func (entry *Entry) Errorf(format string, args ...interface{}) {
|
||||||
|
if entry.Logger.Level >= ErrorLevel {
|
||||||
|
entry.Error(fmt.Sprintf(format, args...))
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func (entry *Entry) Fatalf(format string, args ...interface{}) {
|
||||||
|
if entry.Logger.Level >= FatalLevel {
|
||||||
|
entry.Fatal(fmt.Sprintf(format, args...))
|
||||||
|
}
|
||||||
|
os.Exit(1)
|
||||||
|
}
|
||||||
|
|
||||||
|
func (entry *Entry) Panicf(format string, args ...interface{}) {
|
||||||
|
if entry.Logger.Level >= PanicLevel {
|
||||||
|
entry.Panic(fmt.Sprintf(format, args...))
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// Entry Println family functions
|
||||||
|
|
||||||
|
func (entry *Entry) Debugln(args ...interface{}) {
|
||||||
|
if entry.Logger.Level >= DebugLevel {
|
||||||
|
entry.Debug(entry.sprintlnn(args...))
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func (entry *Entry) Infoln(args ...interface{}) {
|
||||||
|
if entry.Logger.Level >= InfoLevel {
|
||||||
|
entry.Info(entry.sprintlnn(args...))
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func (entry *Entry) Println(args ...interface{}) {
|
||||||
|
entry.Infoln(args...)
|
||||||
|
}
|
||||||
|
|
||||||
|
func (entry *Entry) Warnln(args ...interface{}) {
|
||||||
|
if entry.Logger.Level >= WarnLevel {
|
||||||
|
entry.Warn(entry.sprintlnn(args...))
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func (entry *Entry) Warningln(args ...interface{}) {
|
||||||
|
entry.Warnln(args...)
|
||||||
|
}
|
||||||
|
|
||||||
|
func (entry *Entry) Errorln(args ...interface{}) {
|
||||||
|
if entry.Logger.Level >= ErrorLevel {
|
||||||
|
entry.Error(entry.sprintlnn(args...))
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func (entry *Entry) Fatalln(args ...interface{}) {
|
||||||
|
if entry.Logger.Level >= FatalLevel {
|
||||||
|
entry.Fatal(entry.sprintlnn(args...))
|
||||||
|
}
|
||||||
|
os.Exit(1)
|
||||||
|
}
|
||||||
|
|
||||||
|
func (entry *Entry) Panicln(args ...interface{}) {
|
||||||
|
if entry.Logger.Level >= PanicLevel {
|
||||||
|
entry.Panic(entry.sprintlnn(args...))
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// Sprintlnn => Sprint no newline. This is to get the behavior of how
|
||||||
|
// fmt.Sprintln where spaces are always added between operands, regardless of
|
||||||
|
// their type. Instead of vendoring the Sprintln implementation to spare a
|
||||||
|
// string allocation, we do the simplest thing.
|
||||||
|
func (entry *Entry) sprintlnn(args ...interface{}) string {
|
||||||
|
msg := fmt.Sprintln(args...)
|
||||||
|
return msg[:len(msg)-1]
|
||||||
|
}
|
|
@ -0,0 +1,193 @@
|
||||||
|
package logrus
|
||||||
|
|
||||||
|
import (
|
||||||
|
"io"
|
||||||
|
)
|
||||||
|
|
||||||
|
var (
|
||||||
|
// std is the name of the standard logger in stdlib `log`
|
||||||
|
std = New()
|
||||||
|
)
|
||||||
|
|
||||||
|
func StandardLogger() *Logger {
|
||||||
|
return std
|
||||||
|
}
|
||||||
|
|
||||||
|
// SetOutput sets the standard logger output.
|
||||||
|
func SetOutput(out io.Writer) {
|
||||||
|
std.mu.Lock()
|
||||||
|
defer std.mu.Unlock()
|
||||||
|
std.Out = out
|
||||||
|
}
|
||||||
|
|
||||||
|
// SetFormatter sets the standard logger formatter.
|
||||||
|
func SetFormatter(formatter Formatter) {
|
||||||
|
std.mu.Lock()
|
||||||
|
defer std.mu.Unlock()
|
||||||
|
std.Formatter = formatter
|
||||||
|
}
|
||||||
|
|
||||||
|
// SetLevel sets the standard logger level.
|
||||||
|
func SetLevel(level Level) {
|
||||||
|
std.mu.Lock()
|
||||||
|
defer std.mu.Unlock()
|
||||||
|
std.Level = level
|
||||||
|
}
|
||||||
|
|
||||||
|
// GetLevel returns the standard logger level.
|
||||||
|
func GetLevel() Level {
|
||||||
|
std.mu.Lock()
|
||||||
|
defer std.mu.Unlock()
|
||||||
|
return std.Level
|
||||||
|
}
|
||||||
|
|
||||||
|
// AddHook adds a hook to the standard logger hooks.
|
||||||
|
func AddHook(hook Hook) {
|
||||||
|
std.mu.Lock()
|
||||||
|
defer std.mu.Unlock()
|
||||||
|
std.Hooks.Add(hook)
|
||||||
|
}
|
||||||
|
|
||||||
|
// WithError creates an entry from the standard logger and adds an error to it, using the value defined in ErrorKey as key.
|
||||||
|
func WithError(err error) *Entry {
|
||||||
|
return std.WithField(ErrorKey, err)
|
||||||
|
}
|
||||||
|
|
||||||
|
// WithField creates an entry from the standard logger and adds a field to
|
||||||
|
// it. If you want multiple fields, use `WithFields`.
|
||||||
|
//
|
||||||
|
// Note that it doesn't log until you call Debug, Print, Info, Warn, Fatal
|
||||||
|
// or Panic on the Entry it returns.
|
||||||
|
func WithField(key string, value interface{}) *Entry {
|
||||||
|
return std.WithField(key, value)
|
||||||
|
}
|
||||||
|
|
||||||
|
// WithFields creates an entry from the standard logger and adds multiple
|
||||||
|
// fields to it. This is simply a helper for `WithField`, invoking it
|
||||||
|
// once for each field.
|
||||||
|
//
|
||||||
|
// Note that it doesn't log until you call Debug, Print, Info, Warn, Fatal
|
||||||
|
// or Panic on the Entry it returns.
|
||||||
|
func WithFields(fields Fields) *Entry {
|
||||||
|
return std.WithFields(fields)
|
||||||
|
}
|
||||||
|
|
||||||
|
// Debug logs a message at level Debug on the standard logger.
|
||||||
|
func Debug(args ...interface{}) {
|
||||||
|
std.Debug(args...)
|
||||||
|
}
|
||||||
|
|
||||||
|
// Print logs a message at level Info on the standard logger.
|
||||||
|
func Print(args ...interface{}) {
|
||||||
|
std.Print(args...)
|
||||||
|
}
|
||||||
|
|
||||||
|
// Info logs a message at level Info on the standard logger.
|
||||||
|
func Info(args ...interface{}) {
|
||||||
|
std.Info(args...)
|
||||||
|
}
|
||||||
|
|
||||||
|
// Warn logs a message at level Warn on the standard logger.
|
||||||
|
func Warn(args ...interface{}) {
|
||||||
|
std.Warn(args...)
|
||||||
|
}
|
||||||
|
|
||||||
|
// Warning logs a message at level Warn on the standard logger.
|
||||||
|
func Warning(args ...interface{}) {
|
||||||
|
std.Warning(args...)
|
||||||
|
}
|
||||||
|
|
||||||
|
// Error logs a message at level Error on the standard logger.
|
||||||
|
func Error(args ...interface{}) {
|
||||||
|
std.Error(args...)
|
||||||
|
}
|
||||||
|
|
||||||
|
// Panic logs a message at level Panic on the standard logger.
|
||||||
|
func Panic(args ...interface{}) {
|
||||||
|
std.Panic(args...)
|
||||||
|
}
|
||||||
|
|
||||||
|
// Fatal logs a message at level Fatal on the standard logger.
|
||||||
|
func Fatal(args ...interface{}) {
|
||||||
|
std.Fatal(args...)
|
||||||
|
}
|
||||||
|
|
||||||
|
// Debugf logs a message at level Debug on the standard logger.
|
||||||
|
func Debugf(format string, args ...interface{}) {
|
||||||
|
std.Debugf(format, args...)
|
||||||
|
}
|
||||||
|
|
||||||
|
// Printf logs a message at level Info on the standard logger.
|
||||||
|
func Printf(format string, args ...interface{}) {
|
||||||
|
std.Printf(format, args...)
|
||||||
|
}
|
||||||
|
|
||||||
|
// Infof logs a message at level Info on the standard logger.
|
||||||
|
func Infof(format string, args ...interface{}) {
|
||||||
|
std.Infof(format, args...)
|
||||||
|
}
|
||||||
|
|
||||||
|
// Warnf logs a message at level Warn on the standard logger.
|
||||||
|
func Warnf(format string, args ...interface{}) {
|
||||||
|
std.Warnf(format, args...)
|
||||||
|
}
|
||||||
|
|
||||||
|
// Warningf logs a message at level Warn on the standard logger.
|
||||||
|
func Warningf(format string, args ...interface{}) {
|
||||||
|
std.Warningf(format, args...)
|
||||||
|
}
|
||||||
|
|
||||||
|
// Errorf logs a message at level Error on the standard logger.
|
||||||
|
func Errorf(format string, args ...interface{}) {
|
||||||
|
std.Errorf(format, args...)
|
||||||
|
}
|
||||||
|
|
||||||
|
// Panicf logs a message at level Panic on the standard logger.
|
||||||
|
func Panicf(format string, args ...interface{}) {
|
||||||
|
std.Panicf(format, args...)
|
||||||
|
}
|
||||||
|
|
||||||
|
// Fatalf logs a message at level Fatal on the standard logger.
|
||||||
|
func Fatalf(format string, args ...interface{}) {
|
||||||
|
std.Fatalf(format, args...)
|
||||||
|
}
|
||||||
|
|
||||||
|
// Debugln logs a message at level Debug on the standard logger.
|
||||||
|
func Debugln(args ...interface{}) {
|
||||||
|
std.Debugln(args...)
|
||||||
|
}
|
||||||
|
|
||||||
|
// Println logs a message at level Info on the standard logger.
|
||||||
|
func Println(args ...interface{}) {
|
||||||
|
std.Println(args...)
|
||||||
|
}
|
||||||
|
|
||||||
|
// Infoln logs a message at level Info on the standard logger.
|
||||||
|
func Infoln(args ...interface{}) {
|
||||||
|
std.Infoln(args...)
|
||||||
|
}
|
||||||
|
|
||||||
|
// Warnln logs a message at level Warn on the standard logger.
|
||||||
|
func Warnln(args ...interface{}) {
|
||||||
|
std.Warnln(args...)
|
||||||
|
}
|
||||||
|
|
||||||
|
// Warningln logs a message at level Warn on the standard logger.
|
||||||
|
func Warningln(args ...interface{}) {
|
||||||
|
std.Warningln(args...)
|
||||||
|
}
|
||||||
|
|
||||||
|
// Errorln logs a message at level Error on the standard logger.
|
||||||
|
func Errorln(args ...interface{}) {
|
||||||
|
std.Errorln(args...)
|
||||||
|
}
|
||||||
|
|
||||||
|
// Panicln logs a message at level Panic on the standard logger.
|
||||||
|
func Panicln(args ...interface{}) {
|
||||||
|
std.Panicln(args...)
|
||||||
|
}
|
||||||
|
|
||||||
|
// Fatalln logs a message at level Fatal on the standard logger.
|
||||||
|
func Fatalln(args ...interface{}) {
|
||||||
|
std.Fatalln(args...)
|
||||||
|
}
|
|
@ -0,0 +1,48 @@
|
||||||
|
package logrus
|
||||||
|
|
||||||
|
import "time"
|
||||||
|
|
||||||
|
const DefaultTimestampFormat = time.RFC3339
|
||||||
|
|
||||||
|
// The Formatter interface is used to implement a custom Formatter. It takes an
|
||||||
|
// `Entry`. It exposes all the fields, including the default ones:
|
||||||
|
//
|
||||||
|
// * `entry.Data["msg"]`. The message passed from Info, Warn, Error ..
|
||||||
|
// * `entry.Data["time"]`. The timestamp.
|
||||||
|
// * `entry.Data["level"]. The level the entry was logged at.
|
||||||
|
//
|
||||||
|
// Any additional fields added with `WithField` or `WithFields` are also in
|
||||||
|
// `entry.Data`. Format is expected to return an array of bytes which are then
|
||||||
|
// logged to `logger.Out`.
|
||||||
|
type Formatter interface {
|
||||||
|
Format(*Entry) ([]byte, error)
|
||||||
|
}
|
||||||
|
|
||||||
|
// This is to not silently overwrite `time`, `msg` and `level` fields when
|
||||||
|
// dumping it. If this code wasn't there doing:
|
||||||
|
//
|
||||||
|
// logrus.WithField("level", 1).Info("hello")
|
||||||
|
//
|
||||||
|
// Would just silently drop the user provided level. Instead with this code
|
||||||
|
// it'll logged as:
|
||||||
|
//
|
||||||
|
// {"level": "info", "fields.level": 1, "msg": "hello", "time": "..."}
|
||||||
|
//
|
||||||
|
// It's not exported because it's still using Data in an opinionated way. It's to
|
||||||
|
// avoid code duplication between the two default formatters.
|
||||||
|
func prefixFieldClashes(data Fields) {
|
||||||
|
_, ok := data["time"]
|
||||||
|
if ok {
|
||||||
|
data["fields.time"] = data["time"]
|
||||||
|
}
|
||||||
|
|
||||||
|
_, ok = data["msg"]
|
||||||
|
if ok {
|
||||||
|
data["fields.msg"] = data["msg"]
|
||||||
|
}
|
||||||
|
|
||||||
|
_, ok = data["level"]
|
||||||
|
if ok {
|
||||||
|
data["fields.level"] = data["level"]
|
||||||
|
}
|
||||||
|
}
|
|
@ -0,0 +1,34 @@
|
||||||
|
package logrus
|
||||||
|
|
||||||
|
// A hook to be fired when logging on the logging levels returned from
|
||||||
|
// `Levels()` on your implementation of the interface. Note that this is not
|
||||||
|
// fired in a goroutine or a channel with workers, you should handle such
|
||||||
|
// functionality yourself if your call is non-blocking and you don't wish for
|
||||||
|
// the logging calls for levels returned from `Levels()` to block.
|
||||||
|
type Hook interface {
|
||||||
|
Levels() []Level
|
||||||
|
Fire(*Entry) error
|
||||||
|
}
|
||||||
|
|
||||||
|
// Internal type for storing the hooks on a logger instance.
|
||||||
|
type LevelHooks map[Level][]Hook
|
||||||
|
|
||||||
|
// Add a hook to an instance of logger. This is called with
|
||||||
|
// `log.Hooks.Add(new(MyHook))` where `MyHook` implements the `Hook` interface.
|
||||||
|
func (hooks LevelHooks) Add(hook Hook) {
|
||||||
|
for _, level := range hook.Levels() {
|
||||||
|
hooks[level] = append(hooks[level], hook)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// Fire all the hooks for the passed level. Used by `entry.log` to fire
|
||||||
|
// appropriate hooks for a log entry.
|
||||||
|
func (hooks LevelHooks) Fire(level Level, entry *Entry) error {
|
||||||
|
for _, hook := range hooks[level] {
|
||||||
|
if err := hook.Fire(entry); err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
return nil
|
||||||
|
}
|
|
@ -0,0 +1,41 @@
|
||||||
|
package logrus
|
||||||
|
|
||||||
|
import (
|
||||||
|
"encoding/json"
|
||||||
|
"fmt"
|
||||||
|
)
|
||||||
|
|
||||||
|
type JSONFormatter struct {
|
||||||
|
// TimestampFormat sets the format used for marshaling timestamps.
|
||||||
|
TimestampFormat string
|
||||||
|
}
|
||||||
|
|
||||||
|
func (f *JSONFormatter) Format(entry *Entry) ([]byte, error) {
|
||||||
|
data := make(Fields, len(entry.Data)+3)
|
||||||
|
for k, v := range entry.Data {
|
||||||
|
switch v := v.(type) {
|
||||||
|
case error:
|
||||||
|
// Otherwise errors are ignored by `encoding/json`
|
||||||
|
// https://github.com/Sirupsen/logrus/issues/137
|
||||||
|
data[k] = v.Error()
|
||||||
|
default:
|
||||||
|
data[k] = v
|
||||||
|
}
|
||||||
|
}
|
||||||
|
prefixFieldClashes(data)
|
||||||
|
|
||||||
|
timestampFormat := f.TimestampFormat
|
||||||
|
if timestampFormat == "" {
|
||||||
|
timestampFormat = DefaultTimestampFormat
|
||||||
|
}
|
||||||
|
|
||||||
|
data["time"] = entry.Time.Format(timestampFormat)
|
||||||
|
data["msg"] = entry.Message
|
||||||
|
data["level"] = entry.Level.String()
|
||||||
|
|
||||||
|
serialized, err := json.Marshal(data)
|
||||||
|
if err != nil {
|
||||||
|
return nil, fmt.Errorf("Failed to marshal fields to JSON, %v", err)
|
||||||
|
}
|
||||||
|
return append(serialized, '\n'), nil
|
||||||
|
}
|
|
@ -0,0 +1,212 @@
|
||||||
|
package logrus
|
||||||
|
|
||||||
|
import (
|
||||||
|
"io"
|
||||||
|
"os"
|
||||||
|
"sync"
|
||||||
|
)
|
||||||
|
|
||||||
|
type Logger struct {
|
||||||
|
// The logs are `io.Copy`'d to this in a mutex. It's common to set this to a
|
||||||
|
// file, or leave it default which is `os.Stderr`. You can also set this to
|
||||||
|
// something more adventorous, such as logging to Kafka.
|
||||||
|
Out io.Writer
|
||||||
|
// Hooks for the logger instance. These allow firing events based on logging
|
||||||
|
// levels and log entries. For example, to send errors to an error tracking
|
||||||
|
// service, log to StatsD or dump the core on fatal errors.
|
||||||
|
Hooks LevelHooks
|
||||||
|
// All log entries pass through the formatter before logged to Out. The
|
||||||
|
// included formatters are `TextFormatter` and `JSONFormatter` for which
|
||||||
|
// TextFormatter is the default. In development (when a TTY is attached) it
|
||||||
|
// logs with colors, but to a file it wouldn't. You can easily implement your
|
||||||
|
// own that implements the `Formatter` interface, see the `README` or included
|
||||||
|
// formatters for examples.
|
||||||
|
Formatter Formatter
|
||||||
|
// The logging level the logger should log at. This is typically (and defaults
|
||||||
|
// to) `logrus.Info`, which allows Info(), Warn(), Error() and Fatal() to be
|
||||||
|
// logged. `logrus.Debug` is useful in
|
||||||
|
Level Level
|
||||||
|
// Used to sync writing to the log.
|
||||||
|
mu sync.Mutex
|
||||||
|
}
|
||||||
|
|
||||||
|
// Creates a new logger. Configuration should be set by changing `Formatter`,
|
||||||
|
// `Out` and `Hooks` directly on the default logger instance. You can also just
|
||||||
|
// instantiate your own:
|
||||||
|
//
|
||||||
|
// var log = &Logger{
|
||||||
|
// Out: os.Stderr,
|
||||||
|
// Formatter: new(JSONFormatter),
|
||||||
|
// Hooks: make(LevelHooks),
|
||||||
|
// Level: logrus.DebugLevel,
|
||||||
|
// }
|
||||||
|
//
|
||||||
|
// It's recommended to make this a global instance called `log`.
|
||||||
|
func New() *Logger {
|
||||||
|
return &Logger{
|
||||||
|
Out: os.Stderr,
|
||||||
|
Formatter: new(TextFormatter),
|
||||||
|
Hooks: make(LevelHooks),
|
||||||
|
Level: InfoLevel,
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// Adds a field to the log entry, note that you it doesn't log until you call
|
||||||
|
// Debug, Print, Info, Warn, Fatal or Panic. It only creates a log entry.
|
||||||
|
// If you want multiple fields, use `WithFields`.
|
||||||
|
func (logger *Logger) WithField(key string, value interface{}) *Entry {
|
||||||
|
return NewEntry(logger).WithField(key, value)
|
||||||
|
}
|
||||||
|
|
||||||
|
// Adds a struct of fields to the log entry. All it does is call `WithField` for
|
||||||
|
// each `Field`.
|
||||||
|
func (logger *Logger) WithFields(fields Fields) *Entry {
|
||||||
|
return NewEntry(logger).WithFields(fields)
|
||||||
|
}
|
||||||
|
|
||||||
|
// Add an error as single field to the log entry. All it does is call
|
||||||
|
// `WithError` for the given `error`.
|
||||||
|
func (logger *Logger) WithError(err error) *Entry {
|
||||||
|
return NewEntry(logger).WithError(err)
|
||||||
|
}
|
||||||
|
|
||||||
|
func (logger *Logger) Debugf(format string, args ...interface{}) {
|
||||||
|
if logger.Level >= DebugLevel {
|
||||||
|
NewEntry(logger).Debugf(format, args...)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func (logger *Logger) Infof(format string, args ...interface{}) {
|
||||||
|
if logger.Level >= InfoLevel {
|
||||||
|
NewEntry(logger).Infof(format, args...)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func (logger *Logger) Printf(format string, args ...interface{}) {
|
||||||
|
NewEntry(logger).Printf(format, args...)
|
||||||
|
}
|
||||||
|
|
||||||
|
func (logger *Logger) Warnf(format string, args ...interface{}) {
|
||||||
|
if logger.Level >= WarnLevel {
|
||||||
|
NewEntry(logger).Warnf(format, args...)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func (logger *Logger) Warningf(format string, args ...interface{}) {
|
||||||
|
if logger.Level >= WarnLevel {
|
||||||
|
NewEntry(logger).Warnf(format, args...)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func (logger *Logger) Errorf(format string, args ...interface{}) {
|
||||||
|
if logger.Level >= ErrorLevel {
|
||||||
|
NewEntry(logger).Errorf(format, args...)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func (logger *Logger) Fatalf(format string, args ...interface{}) {
|
||||||
|
if logger.Level >= FatalLevel {
|
||||||
|
NewEntry(logger).Fatalf(format, args...)
|
||||||
|
}
|
||||||
|
os.Exit(1)
|
||||||
|
}
|
||||||
|
|
||||||
|
func (logger *Logger) Panicf(format string, args ...interface{}) {
|
||||||
|
if logger.Level >= PanicLevel {
|
||||||
|
NewEntry(logger).Panicf(format, args...)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func (logger *Logger) Debug(args ...interface{}) {
|
||||||
|
if logger.Level >= DebugLevel {
|
||||||
|
NewEntry(logger).Debug(args...)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func (logger *Logger) Info(args ...interface{}) {
|
||||||
|
if logger.Level >= InfoLevel {
|
||||||
|
NewEntry(logger).Info(args...)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func (logger *Logger) Print(args ...interface{}) {
|
||||||
|
NewEntry(logger).Info(args...)
|
||||||
|
}
|
||||||
|
|
||||||
|
func (logger *Logger) Warn(args ...interface{}) {
|
||||||
|
if logger.Level >= WarnLevel {
|
||||||
|
NewEntry(logger).Warn(args...)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func (logger *Logger) Warning(args ...interface{}) {
|
||||||
|
if logger.Level >= WarnLevel {
|
||||||
|
NewEntry(logger).Warn(args...)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func (logger *Logger) Error(args ...interface{}) {
|
||||||
|
if logger.Level >= ErrorLevel {
|
||||||
|
NewEntry(logger).Error(args...)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func (logger *Logger) Fatal(args ...interface{}) {
|
||||||
|
if logger.Level >= FatalLevel {
|
||||||
|
NewEntry(logger).Fatal(args...)
|
||||||
|
}
|
||||||
|
os.Exit(1)
|
||||||
|
}
|
||||||
|
|
||||||
|
func (logger *Logger) Panic(args ...interface{}) {
|
||||||
|
if logger.Level >= PanicLevel {
|
||||||
|
NewEntry(logger).Panic(args...)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func (logger *Logger) Debugln(args ...interface{}) {
|
||||||
|
if logger.Level >= DebugLevel {
|
||||||
|
NewEntry(logger).Debugln(args...)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func (logger *Logger) Infoln(args ...interface{}) {
|
||||||
|
if logger.Level >= InfoLevel {
|
||||||
|
NewEntry(logger).Infoln(args...)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func (logger *Logger) Println(args ...interface{}) {
|
||||||
|
NewEntry(logger).Println(args...)
|
||||||
|
}
|
||||||
|
|
||||||
|
func (logger *Logger) Warnln(args ...interface{}) {
|
||||||
|
if logger.Level >= WarnLevel {
|
||||||
|
NewEntry(logger).Warnln(args...)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func (logger *Logger) Warningln(args ...interface{}) {
|
||||||
|
if logger.Level >= WarnLevel {
|
||||||
|
NewEntry(logger).Warnln(args...)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func (logger *Logger) Errorln(args ...interface{}) {
|
||||||
|
if logger.Level >= ErrorLevel {
|
||||||
|
NewEntry(logger).Errorln(args...)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func (logger *Logger) Fatalln(args ...interface{}) {
|
||||||
|
if logger.Level >= FatalLevel {
|
||||||
|
NewEntry(logger).Fatalln(args...)
|
||||||
|
}
|
||||||
|
os.Exit(1)
|
||||||
|
}
|
||||||
|
|
||||||
|
func (logger *Logger) Panicln(args ...interface{}) {
|
||||||
|
if logger.Level >= PanicLevel {
|
||||||
|
NewEntry(logger).Panicln(args...)
|
||||||
|
}
|
||||||
|
}
|
|
@ -0,0 +1,143 @@
|
||||||
|
package logrus
|
||||||
|
|
||||||
|
import (
|
||||||
|
"fmt"
|
||||||
|
"log"
|
||||||
|
"strings"
|
||||||
|
)
|
||||||
|
|
||||||
|
// Fields type, used to pass to `WithFields`.
|
||||||
|
type Fields map[string]interface{}
|
||||||
|
|
||||||
|
// Level type
|
||||||
|
type Level uint8
|
||||||
|
|
||||||
|
// Convert the Level to a string. E.g. PanicLevel becomes "panic".
|
||||||
|
func (level Level) String() string {
|
||||||
|
switch level {
|
||||||
|
case DebugLevel:
|
||||||
|
return "debug"
|
||||||
|
case InfoLevel:
|
||||||
|
return "info"
|
||||||
|
case WarnLevel:
|
||||||
|
return "warning"
|
||||||
|
case ErrorLevel:
|
||||||
|
return "error"
|
||||||
|
case FatalLevel:
|
||||||
|
return "fatal"
|
||||||
|
case PanicLevel:
|
||||||
|
return "panic"
|
||||||
|
}
|
||||||
|
|
||||||
|
return "unknown"
|
||||||
|
}
|
||||||
|
|
||||||
|
// ParseLevel takes a string level and returns the Logrus log level constant.
|
||||||
|
func ParseLevel(lvl string) (Level, error) {
|
||||||
|
switch strings.ToLower(lvl) {
|
||||||
|
case "panic":
|
||||||
|
return PanicLevel, nil
|
||||||
|
case "fatal":
|
||||||
|
return FatalLevel, nil
|
||||||
|
case "error":
|
||||||
|
return ErrorLevel, nil
|
||||||
|
case "warn", "warning":
|
||||||
|
return WarnLevel, nil
|
||||||
|
case "info":
|
||||||
|
return InfoLevel, nil
|
||||||
|
case "debug":
|
||||||
|
return DebugLevel, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
var l Level
|
||||||
|
return l, fmt.Errorf("not a valid logrus Level: %q", lvl)
|
||||||
|
}
|
||||||
|
|
||||||
|
// A constant exposing all logging levels
|
||||||
|
var AllLevels = []Level{
|
||||||
|
PanicLevel,
|
||||||
|
FatalLevel,
|
||||||
|
ErrorLevel,
|
||||||
|
WarnLevel,
|
||||||
|
InfoLevel,
|
||||||
|
DebugLevel,
|
||||||
|
}
|
||||||
|
|
||||||
|
// These are the different logging levels. You can set the logging level to log
|
||||||
|
// on your instance of logger, obtained with `logrus.New()`.
|
||||||
|
const (
|
||||||
|
// PanicLevel level, highest level of severity. Logs and then calls panic with the
|
||||||
|
// message passed to Debug, Info, ...
|
||||||
|
PanicLevel Level = iota
|
||||||
|
// FatalLevel level. Logs and then calls `os.Exit(1)`. It will exit even if the
|
||||||
|
// logging level is set to Panic.
|
||||||
|
FatalLevel
|
||||||
|
// ErrorLevel level. Logs. Used for errors that should definitely be noted.
|
||||||
|
// Commonly used for hooks to send errors to an error tracking service.
|
||||||
|
ErrorLevel
|
||||||
|
// WarnLevel level. Non-critical entries that deserve eyes.
|
||||||
|
WarnLevel
|
||||||
|
// InfoLevel level. General operational entries about what's going on inside the
|
||||||
|
// application.
|
||||||
|
InfoLevel
|
||||||
|
// DebugLevel level. Usually only enabled when debugging. Very verbose logging.
|
||||||
|
DebugLevel
|
||||||
|
)
|
||||||
|
|
||||||
|
// Won't compile if StdLogger can't be realized by a log.Logger
|
||||||
|
var (
|
||||||
|
_ StdLogger = &log.Logger{}
|
||||||
|
_ StdLogger = &Entry{}
|
||||||
|
_ StdLogger = &Logger{}
|
||||||
|
)
|
||||||
|
|
||||||
|
// StdLogger is what your logrus-enabled library should take, that way
|
||||||
|
// it'll accept a stdlib logger and a logrus logger. There's no standard
|
||||||
|
// interface, this is the closest we get, unfortunately.
|
||||||
|
type StdLogger interface {
|
||||||
|
Print(...interface{})
|
||||||
|
Printf(string, ...interface{})
|
||||||
|
Println(...interface{})
|
||||||
|
|
||||||
|
Fatal(...interface{})
|
||||||
|
Fatalf(string, ...interface{})
|
||||||
|
Fatalln(...interface{})
|
||||||
|
|
||||||
|
Panic(...interface{})
|
||||||
|
Panicf(string, ...interface{})
|
||||||
|
Panicln(...interface{})
|
||||||
|
}
|
||||||
|
|
||||||
|
// The FieldLogger interface generalizes the Entry and Logger types
|
||||||
|
type FieldLogger interface {
|
||||||
|
WithField(key string, value interface{}) *Entry
|
||||||
|
WithFields(fields Fields) *Entry
|
||||||
|
WithError(err error) *Entry
|
||||||
|
|
||||||
|
Debugf(format string, args ...interface{})
|
||||||
|
Infof(format string, args ...interface{})
|
||||||
|
Printf(format string, args ...interface{})
|
||||||
|
Warnf(format string, args ...interface{})
|
||||||
|
Warningf(format string, args ...interface{})
|
||||||
|
Errorf(format string, args ...interface{})
|
||||||
|
Fatalf(format string, args ...interface{})
|
||||||
|
Panicf(format string, args ...interface{})
|
||||||
|
|
||||||
|
Debug(args ...interface{})
|
||||||
|
Info(args ...interface{})
|
||||||
|
Print(args ...interface{})
|
||||||
|
Warn(args ...interface{})
|
||||||
|
Warning(args ...interface{})
|
||||||
|
Error(args ...interface{})
|
||||||
|
Fatal(args ...interface{})
|
||||||
|
Panic(args ...interface{})
|
||||||
|
|
||||||
|
Debugln(args ...interface{})
|
||||||
|
Infoln(args ...interface{})
|
||||||
|
Println(args ...interface{})
|
||||||
|
Warnln(args ...interface{})
|
||||||
|
Warningln(args ...interface{})
|
||||||
|
Errorln(args ...interface{})
|
||||||
|
Fatalln(args ...interface{})
|
||||||
|
Panicln(args ...interface{})
|
||||||
|
}
|
|
@ -0,0 +1,9 @@
|
||||||
|
// +build darwin freebsd openbsd netbsd dragonfly
|
||||||
|
|
||||||
|
package logrus
|
||||||
|
|
||||||
|
import "syscall"
|
||||||
|
|
||||||
|
const ioctlReadTermios = syscall.TIOCGETA
|
||||||
|
|
||||||
|
type Termios syscall.Termios
|
|
@ -0,0 +1,12 @@
|
||||||
|
// Based on ssh/terminal:
|
||||||
|
// Copyright 2013 The Go Authors. All rights reserved.
|
||||||
|
// Use of this source code is governed by a BSD-style
|
||||||
|
// license that can be found in the LICENSE file.
|
||||||
|
|
||||||
|
package logrus
|
||||||
|
|
||||||
|
import "syscall"
|
||||||
|
|
||||||
|
const ioctlReadTermios = syscall.TCGETS
|
||||||
|
|
||||||
|
type Termios syscall.Termios
|
|
@ -0,0 +1,21 @@
|
||||||
|
// Based on ssh/terminal:
|
||||||
|
// Copyright 2011 The Go Authors. All rights reserved.
|
||||||
|
// Use of this source code is governed by a BSD-style
|
||||||
|
// license that can be found in the LICENSE file.
|
||||||
|
|
||||||
|
// +build linux darwin freebsd openbsd netbsd dragonfly
|
||||||
|
|
||||||
|
package logrus
|
||||||
|
|
||||||
|
import (
|
||||||
|
"syscall"
|
||||||
|
"unsafe"
|
||||||
|
)
|
||||||
|
|
||||||
|
// IsTerminal returns true if stderr's file descriptor is a terminal.
|
||||||
|
func IsTerminal() bool {
|
||||||
|
fd := syscall.Stderr
|
||||||
|
var termios Termios
|
||||||
|
_, _, err := syscall.Syscall6(syscall.SYS_IOCTL, uintptr(fd), ioctlReadTermios, uintptr(unsafe.Pointer(&termios)), 0, 0, 0)
|
||||||
|
return err == 0
|
||||||
|
}
|
|
@ -0,0 +1,15 @@
|
||||||
|
// +build solaris
|
||||||
|
|
||||||
|
package logrus
|
||||||
|
|
||||||
|
import (
|
||||||
|
"os"
|
||||||
|
|
||||||
|
"golang.org/x/sys/unix"
|
||||||
|
)
|
||||||
|
|
||||||
|
// IsTerminal returns true if the given file descriptor is a terminal.
|
||||||
|
func IsTerminal() bool {
|
||||||
|
_, err := unix.IoctlGetTermios(int(os.Stdout.Fd()), unix.TCGETA)
|
||||||
|
return err == nil
|
||||||
|
}
|
|
@ -0,0 +1,27 @@
|
||||||
|
// Based on ssh/terminal:
|
||||||
|
// Copyright 2011 The Go Authors. All rights reserved.
|
||||||
|
// Use of this source code is governed by a BSD-style
|
||||||
|
// license that can be found in the LICENSE file.
|
||||||
|
|
||||||
|
// +build windows
|
||||||
|
|
||||||
|
package logrus
|
||||||
|
|
||||||
|
import (
|
||||||
|
"syscall"
|
||||||
|
"unsafe"
|
||||||
|
)
|
||||||
|
|
||||||
|
var kernel32 = syscall.NewLazyDLL("kernel32.dll")
|
||||||
|
|
||||||
|
var (
|
||||||
|
procGetConsoleMode = kernel32.NewProc("GetConsoleMode")
|
||||||
|
)
|
||||||
|
|
||||||
|
// IsTerminal returns true if stderr's file descriptor is a terminal.
|
||||||
|
func IsTerminal() bool {
|
||||||
|
fd := syscall.Stderr
|
||||||
|
var st uint32
|
||||||
|
r, _, e := syscall.Syscall(procGetConsoleMode.Addr(), 2, uintptr(fd), uintptr(unsafe.Pointer(&st)), 0)
|
||||||
|
return r != 0 && e == 0
|
||||||
|
}
|
|
@ -0,0 +1,161 @@
|
||||||
|
package logrus
|
||||||
|
|
||||||
|
import (
|
||||||
|
"bytes"
|
||||||
|
"fmt"
|
||||||
|
"runtime"
|
||||||
|
"sort"
|
||||||
|
"strings"
|
||||||
|
"time"
|
||||||
|
)
|
||||||
|
|
||||||
|
const (
|
||||||
|
nocolor = 0
|
||||||
|
red = 31
|
||||||
|
green = 32
|
||||||
|
yellow = 33
|
||||||
|
blue = 34
|
||||||
|
gray = 37
|
||||||
|
)
|
||||||
|
|
||||||
|
var (
|
||||||
|
baseTimestamp time.Time
|
||||||
|
isTerminal bool
|
||||||
|
)
|
||||||
|
|
||||||
|
func init() {
|
||||||
|
baseTimestamp = time.Now()
|
||||||
|
isTerminal = IsTerminal()
|
||||||
|
}
|
||||||
|
|
||||||
|
func miniTS() int {
|
||||||
|
return int(time.Since(baseTimestamp) / time.Second)
|
||||||
|
}
|
||||||
|
|
||||||
|
type TextFormatter struct {
|
||||||
|
// Set to true to bypass checking for a TTY before outputting colors.
|
||||||
|
ForceColors bool
|
||||||
|
|
||||||
|
// Force disabling colors.
|
||||||
|
DisableColors bool
|
||||||
|
|
||||||
|
// Disable timestamp logging. useful when output is redirected to logging
|
||||||
|
// system that already adds timestamps.
|
||||||
|
DisableTimestamp bool
|
||||||
|
|
||||||
|
// Enable logging the full timestamp when a TTY is attached instead of just
|
||||||
|
// the time passed since beginning of execution.
|
||||||
|
FullTimestamp bool
|
||||||
|
|
||||||
|
// TimestampFormat to use for display when a full timestamp is printed
|
||||||
|
TimestampFormat string
|
||||||
|
|
||||||
|
// The fields are sorted by default for a consistent output. For applications
|
||||||
|
// that log extremely frequently and don't use the JSON formatter this may not
|
||||||
|
// be desired.
|
||||||
|
DisableSorting bool
|
||||||
|
}
|
||||||
|
|
||||||
|
func (f *TextFormatter) Format(entry *Entry) ([]byte, error) {
|
||||||
|
var keys []string = make([]string, 0, len(entry.Data))
|
||||||
|
for k := range entry.Data {
|
||||||
|
keys = append(keys, k)
|
||||||
|
}
|
||||||
|
|
||||||
|
if !f.DisableSorting {
|
||||||
|
sort.Strings(keys)
|
||||||
|
}
|
||||||
|
|
||||||
|
b := &bytes.Buffer{}
|
||||||
|
|
||||||
|
prefixFieldClashes(entry.Data)
|
||||||
|
|
||||||
|
isColorTerminal := isTerminal && (runtime.GOOS != "windows")
|
||||||
|
isColored := (f.ForceColors || isColorTerminal) && !f.DisableColors
|
||||||
|
|
||||||
|
timestampFormat := f.TimestampFormat
|
||||||
|
if timestampFormat == "" {
|
||||||
|
timestampFormat = DefaultTimestampFormat
|
||||||
|
}
|
||||||
|
if isColored {
|
||||||
|
f.printColored(b, entry, keys, timestampFormat)
|
||||||
|
} else {
|
||||||
|
if !f.DisableTimestamp {
|
||||||
|
f.appendKeyValue(b, "time", entry.Time.Format(timestampFormat))
|
||||||
|
}
|
||||||
|
f.appendKeyValue(b, "level", entry.Level.String())
|
||||||
|
if entry.Message != "" {
|
||||||
|
f.appendKeyValue(b, "msg", entry.Message)
|
||||||
|
}
|
||||||
|
for _, key := range keys {
|
||||||
|
f.appendKeyValue(b, key, entry.Data[key])
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
b.WriteByte('\n')
|
||||||
|
return b.Bytes(), nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func (f *TextFormatter) printColored(b *bytes.Buffer, entry *Entry, keys []string, timestampFormat string) {
|
||||||
|
var levelColor int
|
||||||
|
switch entry.Level {
|
||||||
|
case DebugLevel:
|
||||||
|
levelColor = gray
|
||||||
|
case WarnLevel:
|
||||||
|
levelColor = yellow
|
||||||
|
case ErrorLevel, FatalLevel, PanicLevel:
|
||||||
|
levelColor = red
|
||||||
|
default:
|
||||||
|
levelColor = blue
|
||||||
|
}
|
||||||
|
|
||||||
|
levelText := strings.ToUpper(entry.Level.String())[0:4]
|
||||||
|
|
||||||
|
if !f.FullTimestamp {
|
||||||
|
fmt.Fprintf(b, "\x1b[%dm%s\x1b[0m[%04d] %-44s ", levelColor, levelText, miniTS(), entry.Message)
|
||||||
|
} else {
|
||||||
|
fmt.Fprintf(b, "\x1b[%dm%s\x1b[0m[%s] %-44s ", levelColor, levelText, entry.Time.Format(timestampFormat), entry.Message)
|
||||||
|
}
|
||||||
|
for _, k := range keys {
|
||||||
|
v := entry.Data[k]
|
||||||
|
fmt.Fprintf(b, " \x1b[%dm%s\x1b[0m=%+v", levelColor, k, v)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func needsQuoting(text string) bool {
|
||||||
|
for _, ch := range text {
|
||||||
|
if !((ch >= 'a' && ch <= 'z') ||
|
||||||
|
(ch >= 'A' && ch <= 'Z') ||
|
||||||
|
(ch >= '0' && ch <= '9') ||
|
||||||
|
ch == '-' || ch == '.') {
|
||||||
|
return false
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return true
|
||||||
|
}
|
||||||
|
|
||||||
|
func (f *TextFormatter) appendKeyValue(b *bytes.Buffer, key string, value interface{}) {
|
||||||
|
|
||||||
|
b.WriteString(key)
|
||||||
|
b.WriteByte('=')
|
||||||
|
|
||||||
|
switch value := value.(type) {
|
||||||
|
case string:
|
||||||
|
if needsQuoting(value) {
|
||||||
|
b.WriteString(value)
|
||||||
|
} else {
|
||||||
|
fmt.Fprintf(b, "%q", value)
|
||||||
|
}
|
||||||
|
case error:
|
||||||
|
errmsg := value.Error()
|
||||||
|
if needsQuoting(errmsg) {
|
||||||
|
b.WriteString(errmsg)
|
||||||
|
} else {
|
||||||
|
fmt.Fprintf(b, "%q", value)
|
||||||
|
}
|
||||||
|
default:
|
||||||
|
fmt.Fprint(b, value)
|
||||||
|
}
|
||||||
|
|
||||||
|
b.WriteByte(' ')
|
||||||
|
}
|
|
@ -0,0 +1,31 @@
|
||||||
|
package logrus
|
||||||
|
|
||||||
|
import (
|
||||||
|
"bufio"
|
||||||
|
"io"
|
||||||
|
"runtime"
|
||||||
|
)
|
||||||
|
|
||||||
|
func (logger *Logger) Writer() *io.PipeWriter {
|
||||||
|
reader, writer := io.Pipe()
|
||||||
|
|
||||||
|
go logger.writerScanner(reader)
|
||||||
|
runtime.SetFinalizer(writer, writerFinalizer)
|
||||||
|
|
||||||
|
return writer
|
||||||
|
}
|
||||||
|
|
||||||
|
func (logger *Logger) writerScanner(reader *io.PipeReader) {
|
||||||
|
scanner := bufio.NewScanner(reader)
|
||||||
|
for scanner.Scan() {
|
||||||
|
logger.Print(scanner.Text())
|
||||||
|
}
|
||||||
|
if err := scanner.Err(); err != nil {
|
||||||
|
logger.Errorf("Error while reading from Writer: %s", err)
|
||||||
|
}
|
||||||
|
reader.Close()
|
||||||
|
}
|
||||||
|
|
||||||
|
func writerFinalizer(writer *io.PipeWriter) {
|
||||||
|
writer.Close()
|
||||||
|
}
|
|
@ -0,0 +1,20 @@
|
||||||
|
The MIT License (MIT)
|
||||||
|
|
||||||
|
Copyright (c) 2013 Stack Exchange
|
||||||
|
|
||||||
|
Permission is hereby granted, free of charge, to any person obtaining a copy of
|
||||||
|
this software and associated documentation files (the "Software"), to deal in
|
||||||
|
the Software without restriction, including without limitation the rights to
|
||||||
|
use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of
|
||||||
|
the Software, and to permit persons to whom the Software is furnished to do so,
|
||||||
|
subject to the following conditions:
|
||||||
|
|
||||||
|
The above copyright notice and this permission notice shall be included in all
|
||||||
|
copies or substantial portions of the Software.
|
||||||
|
|
||||||
|
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
|
||||||
|
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS
|
||||||
|
FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR
|
||||||
|
COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
|
||||||
|
IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
|
||||||
|
CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
|
|
@ -0,0 +1,4 @@
|
||||||
|
wmi
|
||||||
|
===
|
||||||
|
|
||||||
|
Package wmi provides a WQL interface for WMI on Windows.
|
|
@ -0,0 +1,416 @@
|
||||||
|
// +build windows
|
||||||
|
|
||||||
|
/*
|
||||||
|
Package wmi provides a WQL interface for WMI on Windows.
|
||||||
|
|
||||||
|
Example code to print names of running processes:
|
||||||
|
|
||||||
|
type Win32_Process struct {
|
||||||
|
Name string
|
||||||
|
}
|
||||||
|
|
||||||
|
func main() {
|
||||||
|
var dst []Win32_Process
|
||||||
|
q := wmi.CreateQuery(&dst, "")
|
||||||
|
err := wmi.Query(q, &dst)
|
||||||
|
if err != nil {
|
||||||
|
log.Fatal(err)
|
||||||
|
}
|
||||||
|
for i, v := range dst {
|
||||||
|
println(i, v.Name)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
*/
|
||||||
|
package wmi
|
||||||
|
|
||||||
|
import (
|
||||||
|
"bytes"
|
||||||
|
"errors"
|
||||||
|
"fmt"
|
||||||
|
"log"
|
||||||
|
"os"
|
||||||
|
"reflect"
|
||||||
|
"runtime"
|
||||||
|
"strconv"
|
||||||
|
"strings"
|
||||||
|
"sync"
|
||||||
|
"time"
|
||||||
|
|
||||||
|
"github.com/go-ole/go-ole"
|
||||||
|
"github.com/go-ole/go-ole/oleutil"
|
||||||
|
)
|
||||||
|
|
||||||
|
var l = log.New(os.Stdout, "", log.LstdFlags)
|
||||||
|
|
||||||
|
var (
|
||||||
|
ErrInvalidEntityType = errors.New("wmi: invalid entity type")
|
||||||
|
lock sync.Mutex
|
||||||
|
)
|
||||||
|
|
||||||
|
// QueryNamespace invokes Query with the given namespace on the local machine.
|
||||||
|
func QueryNamespace(query string, dst interface{}, namespace string) error {
|
||||||
|
return Query(query, dst, nil, namespace)
|
||||||
|
}
|
||||||
|
|
||||||
|
// Query runs the WQL query and appends the values to dst.
|
||||||
|
//
|
||||||
|
// dst must have type *[]S or *[]*S, for some struct type S. Fields selected in
|
||||||
|
// the query must have the same name in dst. Supported types are all signed and
|
||||||
|
// unsigned integers, time.Time, string, bool, or a pointer to one of those.
|
||||||
|
// Array types are not supported.
|
||||||
|
//
|
||||||
|
// By default, the local machine and default namespace are used. These can be
|
||||||
|
// changed using connectServerArgs. See
|
||||||
|
// http://msdn.microsoft.com/en-us/library/aa393720.aspx for details.
|
||||||
|
//
|
||||||
|
// Query is a wrapper around DefaultClient.Query.
|
||||||
|
func Query(query string, dst interface{}, connectServerArgs ...interface{}) error {
|
||||||
|
return DefaultClient.Query(query, dst, connectServerArgs...)
|
||||||
|
}
|
||||||
|
|
||||||
|
// A Client is an WMI query client.
|
||||||
|
//
|
||||||
|
// Its zero value (DefaultClient) is a usable client.
|
||||||
|
type Client struct {
|
||||||
|
// NonePtrZero specifies if nil values for fields which aren't pointers
|
||||||
|
// should be returned as the field types zero value.
|
||||||
|
//
|
||||||
|
// Setting this to true allows stucts without pointer fields to be used
|
||||||
|
// without the risk failure should a nil value returned from WMI.
|
||||||
|
NonePtrZero bool
|
||||||
|
|
||||||
|
// PtrNil specifies if nil values for pointer fields should be returned
|
||||||
|
// as nil.
|
||||||
|
//
|
||||||
|
// Setting this to true will set pointer fields to nil where WMI
|
||||||
|
// returned nil, otherwise the types zero value will be returned.
|
||||||
|
PtrNil bool
|
||||||
|
|
||||||
|
// AllowMissingFields specifies that struct fields not present in the
|
||||||
|
// query result should not result in an error.
|
||||||
|
//
|
||||||
|
// Setting this to true allows custom queries to be used with full
|
||||||
|
// struct definitions instead of having to define multiple structs.
|
||||||
|
AllowMissingFields bool
|
||||||
|
}
|
||||||
|
|
||||||
|
// DefaultClient is the default Client and is used by Query, QueryNamespace
|
||||||
|
var DefaultClient = &Client{}
|
||||||
|
|
||||||
|
// Query runs the WQL query and appends the values to dst.
|
||||||
|
//
|
||||||
|
// dst must have type *[]S or *[]*S, for some struct type S. Fields selected in
|
||||||
|
// the query must have the same name in dst. Supported types are all signed and
|
||||||
|
// unsigned integers, time.Time, string, bool, or a pointer to one of those.
|
||||||
|
// Array types are not supported.
|
||||||
|
//
|
||||||
|
// By default, the local machine and default namespace are used. These can be
|
||||||
|
// changed using connectServerArgs. See
|
||||||
|
// http://msdn.microsoft.com/en-us/library/aa393720.aspx for details.
|
||||||
|
func (c *Client) Query(query string, dst interface{}, connectServerArgs ...interface{}) error {
|
||||||
|
dv := reflect.ValueOf(dst)
|
||||||
|
if dv.Kind() != reflect.Ptr || dv.IsNil() {
|
||||||
|
return ErrInvalidEntityType
|
||||||
|
}
|
||||||
|
dv = dv.Elem()
|
||||||
|
mat, elemType := checkMultiArg(dv)
|
||||||
|
if mat == multiArgTypeInvalid {
|
||||||
|
return ErrInvalidEntityType
|
||||||
|
}
|
||||||
|
|
||||||
|
lock.Lock()
|
||||||
|
defer lock.Unlock()
|
||||||
|
runtime.LockOSThread()
|
||||||
|
defer runtime.UnlockOSThread()
|
||||||
|
|
||||||
|
err := ole.CoInitializeEx(0, ole.COINIT_MULTITHREADED)
|
||||||
|
if err != nil {
|
||||||
|
oleerr := err.(*ole.OleError)
|
||||||
|
// S_FALSE = 0x00000001 // CoInitializeEx was already called on this thread
|
||||||
|
if oleerr.Code() != ole.S_OK && oleerr.Code() != 0x00000001 {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
} else {
|
||||||
|
// Only invoke CoUninitialize if the thread was not initizlied before.
|
||||||
|
// This will allow other go packages based on go-ole play along
|
||||||
|
// with this library.
|
||||||
|
defer ole.CoUninitialize()
|
||||||
|
}
|
||||||
|
|
||||||
|
unknown, err := oleutil.CreateObject("WbemScripting.SWbemLocator")
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
defer unknown.Release()
|
||||||
|
|
||||||
|
wmi, err := unknown.QueryInterface(ole.IID_IDispatch)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
defer wmi.Release()
|
||||||
|
|
||||||
|
// service is a SWbemServices
|
||||||
|
serviceRaw, err := oleutil.CallMethod(wmi, "ConnectServer", connectServerArgs...)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
service := serviceRaw.ToIDispatch()
|
||||||
|
defer serviceRaw.Clear()
|
||||||
|
|
||||||
|
// result is a SWBemObjectSet
|
||||||
|
resultRaw, err := oleutil.CallMethod(service, "ExecQuery", query)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
result := resultRaw.ToIDispatch()
|
||||||
|
defer resultRaw.Clear()
|
||||||
|
|
||||||
|
count, err := oleInt64(result, "Count")
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
// Initialize a slice with Count capacity
|
||||||
|
dv.Set(reflect.MakeSlice(dv.Type(), 0, int(count)))
|
||||||
|
|
||||||
|
var errFieldMismatch error
|
||||||
|
for i := int64(0); i < count; i++ {
|
||||||
|
err := func() error {
|
||||||
|
// item is a SWbemObject, but really a Win32_Process
|
||||||
|
itemRaw, err := oleutil.CallMethod(result, "ItemIndex", i)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
item := itemRaw.ToIDispatch()
|
||||||
|
defer itemRaw.Clear()
|
||||||
|
|
||||||
|
ev := reflect.New(elemType)
|
||||||
|
if err = c.loadEntity(ev.Interface(), item); err != nil {
|
||||||
|
if _, ok := err.(*ErrFieldMismatch); ok {
|
||||||
|
// We continue loading entities even in the face of field mismatch errors.
|
||||||
|
// If we encounter any other error, that other error is returned. Otherwise,
|
||||||
|
// an ErrFieldMismatch is returned.
|
||||||
|
errFieldMismatch = err
|
||||||
|
} else {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
}
|
||||||
|
if mat != multiArgTypeStructPtr {
|
||||||
|
ev = ev.Elem()
|
||||||
|
}
|
||||||
|
dv.Set(reflect.Append(dv, ev))
|
||||||
|
return nil
|
||||||
|
}()
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return errFieldMismatch
|
||||||
|
}
|
||||||
|
|
||||||
|
// ErrFieldMismatch is returned when a field is to be loaded into a different
|
||||||
|
// type than the one it was stored from, or when a field is missing or
|
||||||
|
// unexported in the destination struct.
|
||||||
|
// StructType is the type of the struct pointed to by the destination argument.
|
||||||
|
type ErrFieldMismatch struct {
|
||||||
|
StructType reflect.Type
|
||||||
|
FieldName string
|
||||||
|
Reason string
|
||||||
|
}
|
||||||
|
|
||||||
|
func (e *ErrFieldMismatch) Error() string {
|
||||||
|
return fmt.Sprintf("wmi: cannot load field %q into a %q: %s",
|
||||||
|
e.FieldName, e.StructType, e.Reason)
|
||||||
|
}
|
||||||
|
|
||||||
|
var timeType = reflect.TypeOf(time.Time{})
|
||||||
|
|
||||||
|
// loadEntity loads a SWbemObject into a struct pointer.
|
||||||
|
func (c *Client) loadEntity(dst interface{}, src *ole.IDispatch) (errFieldMismatch error) {
|
||||||
|
v := reflect.ValueOf(dst).Elem()
|
||||||
|
for i := 0; i < v.NumField(); i++ {
|
||||||
|
f := v.Field(i)
|
||||||
|
of := f
|
||||||
|
isPtr := f.Kind() == reflect.Ptr
|
||||||
|
if isPtr {
|
||||||
|
ptr := reflect.New(f.Type().Elem())
|
||||||
|
f.Set(ptr)
|
||||||
|
f = f.Elem()
|
||||||
|
}
|
||||||
|
n := v.Type().Field(i).Name
|
||||||
|
if !f.CanSet() {
|
||||||
|
return &ErrFieldMismatch{
|
||||||
|
StructType: of.Type(),
|
||||||
|
FieldName: n,
|
||||||
|
Reason: "CanSet() is false",
|
||||||
|
}
|
||||||
|
}
|
||||||
|
prop, err := oleutil.GetProperty(src, n)
|
||||||
|
if err != nil {
|
||||||
|
if !c.AllowMissingFields {
|
||||||
|
errFieldMismatch = &ErrFieldMismatch{
|
||||||
|
StructType: of.Type(),
|
||||||
|
FieldName: n,
|
||||||
|
Reason: "no such struct field",
|
||||||
|
}
|
||||||
|
}
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
defer prop.Clear()
|
||||||
|
|
||||||
|
switch val := prop.Value().(type) {
|
||||||
|
case int8, int16, int32, int64, int:
|
||||||
|
v := reflect.ValueOf(val).Int()
|
||||||
|
switch f.Kind() {
|
||||||
|
case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64:
|
||||||
|
f.SetInt(v)
|
||||||
|
case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64:
|
||||||
|
f.SetUint(uint64(v))
|
||||||
|
default:
|
||||||
|
return &ErrFieldMismatch{
|
||||||
|
StructType: of.Type(),
|
||||||
|
FieldName: n,
|
||||||
|
Reason: "not an integer class",
|
||||||
|
}
|
||||||
|
}
|
||||||
|
case uint8, uint16, uint32, uint64:
|
||||||
|
v := reflect.ValueOf(val).Uint()
|
||||||
|
switch f.Kind() {
|
||||||
|
case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64:
|
||||||
|
f.SetInt(int64(v))
|
||||||
|
case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64:
|
||||||
|
f.SetUint(v)
|
||||||
|
default:
|
||||||
|
return &ErrFieldMismatch{
|
||||||
|
StructType: of.Type(),
|
||||||
|
FieldName: n,
|
||||||
|
Reason: "not an integer class",
|
||||||
|
}
|
||||||
|
}
|
||||||
|
case string:
|
||||||
|
switch f.Kind() {
|
||||||
|
case reflect.String:
|
||||||
|
f.SetString(val)
|
||||||
|
case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64:
|
||||||
|
iv, err := strconv.ParseInt(val, 10, 64)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
f.SetInt(iv)
|
||||||
|
case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64:
|
||||||
|
uv, err := strconv.ParseUint(val, 10, 64)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
f.SetUint(uv)
|
||||||
|
case reflect.Struct:
|
||||||
|
switch f.Type() {
|
||||||
|
case timeType:
|
||||||
|
if len(val) == 25 {
|
||||||
|
mins, err := strconv.Atoi(val[22:])
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
val = val[:22] + fmt.Sprintf("%02d%02d", mins/60, mins%60)
|
||||||
|
}
|
||||||
|
t, err := time.Parse("20060102150405.000000-0700", val)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
f.Set(reflect.ValueOf(t))
|
||||||
|
}
|
||||||
|
}
|
||||||
|
case bool:
|
||||||
|
switch f.Kind() {
|
||||||
|
case reflect.Bool:
|
||||||
|
f.SetBool(val)
|
||||||
|
default:
|
||||||
|
return &ErrFieldMismatch{
|
||||||
|
StructType: of.Type(),
|
||||||
|
FieldName: n,
|
||||||
|
Reason: "not a bool",
|
||||||
|
}
|
||||||
|
}
|
||||||
|
default:
|
||||||
|
typeof := reflect.TypeOf(val)
|
||||||
|
if typeof == nil && (isPtr || c.NonePtrZero) {
|
||||||
|
if (isPtr && c.PtrNil) || (!isPtr && c.NonePtrZero) {
|
||||||
|
of.Set(reflect.Zero(of.Type()))
|
||||||
|
}
|
||||||
|
break
|
||||||
|
}
|
||||||
|
return &ErrFieldMismatch{
|
||||||
|
StructType: of.Type(),
|
||||||
|
FieldName: n,
|
||||||
|
Reason: fmt.Sprintf("unsupported type (%T)", val),
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return errFieldMismatch
|
||||||
|
}
|
||||||
|
|
||||||
|
type multiArgType int
|
||||||
|
|
||||||
|
const (
|
||||||
|
multiArgTypeInvalid multiArgType = iota
|
||||||
|
multiArgTypeStruct
|
||||||
|
multiArgTypeStructPtr
|
||||||
|
)
|
||||||
|
|
||||||
|
// checkMultiArg checks that v has type []S, []*S for some struct type S.
|
||||||
|
//
|
||||||
|
// It returns what category the slice's elements are, and the reflect.Type
|
||||||
|
// that represents S.
|
||||||
|
func checkMultiArg(v reflect.Value) (m multiArgType, elemType reflect.Type) {
|
||||||
|
if v.Kind() != reflect.Slice {
|
||||||
|
return multiArgTypeInvalid, nil
|
||||||
|
}
|
||||||
|
elemType = v.Type().Elem()
|
||||||
|
switch elemType.Kind() {
|
||||||
|
case reflect.Struct:
|
||||||
|
return multiArgTypeStruct, elemType
|
||||||
|
case reflect.Ptr:
|
||||||
|
elemType = elemType.Elem()
|
||||||
|
if elemType.Kind() == reflect.Struct {
|
||||||
|
return multiArgTypeStructPtr, elemType
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return multiArgTypeInvalid, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func oleInt64(item *ole.IDispatch, prop string) (int64, error) {
|
||||||
|
v, err := oleutil.GetProperty(item, prop)
|
||||||
|
if err != nil {
|
||||||
|
return 0, err
|
||||||
|
}
|
||||||
|
defer v.Clear()
|
||||||
|
|
||||||
|
i := int64(v.Val)
|
||||||
|
return i, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// CreateQuery returns a WQL query string that queries all columns of src. where
|
||||||
|
// is an optional string that is appended to the query, to be used with WHERE
|
||||||
|
// clauses. In such a case, the "WHERE" string should appear at the beginning.
|
||||||
|
func CreateQuery(src interface{}, where string) string {
|
||||||
|
var b bytes.Buffer
|
||||||
|
b.WriteString("SELECT ")
|
||||||
|
s := reflect.Indirect(reflect.ValueOf(src))
|
||||||
|
t := s.Type()
|
||||||
|
if s.Kind() == reflect.Slice {
|
||||||
|
t = t.Elem()
|
||||||
|
}
|
||||||
|
if t.Kind() != reflect.Struct {
|
||||||
|
return ""
|
||||||
|
}
|
||||||
|
var fields []string
|
||||||
|
for i := 0; i < t.NumField(); i++ {
|
||||||
|
fields = append(fields, t.Field(i).Name)
|
||||||
|
}
|
||||||
|
b.WriteString(strings.Join(fields, ", "))
|
||||||
|
b.WriteString(" FROM ")
|
||||||
|
b.WriteString(t.Name())
|
||||||
|
b.WriteString(" " + where)
|
||||||
|
return b.String()
|
||||||
|
}
|
|
@ -0,0 +1,20 @@
|
||||||
|
The MIT License (MIT)
|
||||||
|
|
||||||
|
Copyright (c) 2013 Armon Dadgar
|
||||||
|
|
||||||
|
Permission is hereby granted, free of charge, to any person obtaining a copy of
|
||||||
|
this software and associated documentation files (the "Software"), to deal in
|
||||||
|
the Software without restriction, including without limitation the rights to
|
||||||
|
use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of
|
||||||
|
the Software, and to permit persons to whom the Software is furnished to do so,
|
||||||
|
subject to the following conditions:
|
||||||
|
|
||||||
|
The above copyright notice and this permission notice shall be included in all
|
||||||
|
copies or substantial portions of the Software.
|
||||||
|
|
||||||
|
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
|
||||||
|
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS
|
||||||
|
FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR
|
||||||
|
COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
|
||||||
|
IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
|
||||||
|
CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
|
|
@ -0,0 +1,191 @@
|
||||||
|
|
||||||
|
Apache License
|
||||||
|
Version 2.0, January 2004
|
||||||
|
https://www.apache.org/licenses/
|
||||||
|
|
||||||
|
TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
|
||||||
|
|
||||||
|
1. Definitions.
|
||||||
|
|
||||||
|
"License" shall mean the terms and conditions for use, reproduction,
|
||||||
|
and distribution as defined by Sections 1 through 9 of this document.
|
||||||
|
|
||||||
|
"Licensor" shall mean the copyright owner or entity authorized by
|
||||||
|
the copyright owner that is granting the License.
|
||||||
|
|
||||||
|
"Legal Entity" shall mean the union of the acting entity and all
|
||||||
|
other entities that control, are controlled by, or are under common
|
||||||
|
control with that entity. For the purposes of this definition,
|
||||||
|
"control" means (i) the power, direct or indirect, to cause the
|
||||||
|
direction or management of such entity, whether by contract or
|
||||||
|
otherwise, or (ii) ownership of fifty percent (50%) or more of the
|
||||||
|
outstanding shares, or (iii) beneficial ownership of such entity.
|
||||||
|
|
||||||
|
"You" (or "Your") shall mean an individual or Legal Entity
|
||||||
|
exercising permissions granted by this License.
|
||||||
|
|
||||||
|
"Source" form shall mean the preferred form for making modifications,
|
||||||
|
including but not limited to software source code, documentation
|
||||||
|
source, and configuration files.
|
||||||
|
|
||||||
|
"Object" form shall mean any form resulting from mechanical
|
||||||
|
transformation or translation of a Source form, including but
|
||||||
|
not limited to compiled object code, generated documentation,
|
||||||
|
and conversions to other media types.
|
||||||
|
|
||||||
|
"Work" shall mean the work of authorship, whether in Source or
|
||||||
|
Object form, made available under the License, as indicated by a
|
||||||
|
copyright notice that is included in or attached to the work
|
||||||
|
(an example is provided in the Appendix below).
|
||||||
|
|
||||||
|
"Derivative Works" shall mean any work, whether in Source or Object
|
||||||
|
form, that is based on (or derived from) the Work and for which the
|
||||||
|
editorial revisions, annotations, elaborations, or other modifications
|
||||||
|
represent, as a whole, an original work of authorship. For the purposes
|
||||||
|
of this License, Derivative Works shall not include works that remain
|
||||||
|
separable from, or merely link (or bind by name) to the interfaces of,
|
||||||
|
the Work and Derivative Works thereof.
|
||||||
|
|
||||||
|
"Contribution" shall mean any work of authorship, including
|
||||||
|
the original version of the Work and any modifications or additions
|
||||||
|
to that Work or Derivative Works thereof, that is intentionally
|
||||||
|
submitted to Licensor for inclusion in the Work by the copyright owner
|
||||||
|
or by an individual or Legal Entity authorized to submit on behalf of
|
||||||
|
the copyright owner. For the purposes of this definition, "submitted"
|
||||||
|
means any form of electronic, verbal, or written communication sent
|
||||||
|
to the Licensor or its representatives, including but not limited to
|
||||||
|
communication on electronic mailing lists, source code control systems,
|
||||||
|
and issue tracking systems that are managed by, or on behalf of, the
|
||||||
|
Licensor for the purpose of discussing and improving the Work, but
|
||||||
|
excluding communication that is conspicuously marked or otherwise
|
||||||
|
designated in writing by the copyright owner as "Not a Contribution."
|
||||||
|
|
||||||
|
"Contributor" shall mean Licensor and any individual or Legal Entity
|
||||||
|
on behalf of whom a Contribution has been received by Licensor and
|
||||||
|
subsequently incorporated within the Work.
|
||||||
|
|
||||||
|
2. Grant of Copyright License. Subject to the terms and conditions of
|
||||||
|
this License, each Contributor hereby grants to You a perpetual,
|
||||||
|
worldwide, non-exclusive, no-charge, royalty-free, irrevocable
|
||||||
|
copyright license to reproduce, prepare Derivative Works of,
|
||||||
|
publicly display, publicly perform, sublicense, and distribute the
|
||||||
|
Work and such Derivative Works in Source or Object form.
|
||||||
|
|
||||||
|
3. Grant of Patent License. Subject to the terms and conditions of
|
||||||
|
this License, each Contributor hereby grants to You a perpetual,
|
||||||
|
worldwide, non-exclusive, no-charge, royalty-free, irrevocable
|
||||||
|
(except as stated in this section) patent license to make, have made,
|
||||||
|
use, offer to sell, sell, import, and otherwise transfer the Work,
|
||||||
|
where such license applies only to those patent claims licensable
|
||||||
|
by such Contributor that are necessarily infringed by their
|
||||||
|
Contribution(s) alone or by combination of their Contribution(s)
|
||||||
|
with the Work to which such Contribution(s) was submitted. If You
|
||||||
|
institute patent litigation against any entity (including a
|
||||||
|
cross-claim or counterclaim in a lawsuit) alleging that the Work
|
||||||
|
or a Contribution incorporated within the Work constitutes direct
|
||||||
|
or contributory patent infringement, then any patent licenses
|
||||||
|
granted to You under this License for that Work shall terminate
|
||||||
|
as of the date such litigation is filed.
|
||||||
|
|
||||||
|
4. Redistribution. You may reproduce and distribute copies of the
|
||||||
|
Work or Derivative Works thereof in any medium, with or without
|
||||||
|
modifications, and in Source or Object form, provided that You
|
||||||
|
meet the following conditions:
|
||||||
|
|
||||||
|
(a) You must give any other recipients of the Work or
|
||||||
|
Derivative Works a copy of this License; and
|
||||||
|
|
||||||
|
(b) You must cause any modified files to carry prominent notices
|
||||||
|
stating that You changed the files; and
|
||||||
|
|
||||||
|
(c) You must retain, in the Source form of any Derivative Works
|
||||||
|
that You distribute, all copyright, patent, trademark, and
|
||||||
|
attribution notices from the Source form of the Work,
|
||||||
|
excluding those notices that do not pertain to any part of
|
||||||
|
the Derivative Works; and
|
||||||
|
|
||||||
|
(d) If the Work includes a "NOTICE" text file as part of its
|
||||||
|
distribution, then any Derivative Works that You distribute must
|
||||||
|
include a readable copy of the attribution notices contained
|
||||||
|
within such NOTICE file, excluding those notices that do not
|
||||||
|
pertain to any part of the Derivative Works, in at least one
|
||||||
|
of the following places: within a NOTICE text file distributed
|
||||||
|
as part of the Derivative Works; within the Source form or
|
||||||
|
documentation, if provided along with the Derivative Works; or,
|
||||||
|
within a display generated by the Derivative Works, if and
|
||||||
|
wherever such third-party notices normally appear. The contents
|
||||||
|
of the NOTICE file are for informational purposes only and
|
||||||
|
do not modify the License. You may add Your own attribution
|
||||||
|
notices within Derivative Works that You distribute, alongside
|
||||||
|
or as an addendum to the NOTICE text from the Work, provided
|
||||||
|
that such additional attribution notices cannot be construed
|
||||||
|
as modifying the License.
|
||||||
|
|
||||||
|
You may add Your own copyright statement to Your modifications and
|
||||||
|
may provide additional or different license terms and conditions
|
||||||
|
for use, reproduction, or distribution of Your modifications, or
|
||||||
|
for any such Derivative Works as a whole, provided Your use,
|
||||||
|
reproduction, and distribution of the Work otherwise complies with
|
||||||
|
the conditions stated in this License.
|
||||||
|
|
||||||
|
5. Submission of Contributions. Unless You explicitly state otherwise,
|
||||||
|
any Contribution intentionally submitted for inclusion in the Work
|
||||||
|
by You to the Licensor shall be under the terms and conditions of
|
||||||
|
this License, without any additional terms or conditions.
|
||||||
|
Notwithstanding the above, nothing herein shall supersede or modify
|
||||||
|
the terms of any separate license agreement you may have executed
|
||||||
|
with Licensor regarding such Contributions.
|
||||||
|
|
||||||
|
6. Trademarks. This License does not grant permission to use the trade
|
||||||
|
names, trademarks, service marks, or product names of the Licensor,
|
||||||
|
except as required for reasonable and customary use in describing the
|
||||||
|
origin of the Work and reproducing the content of the NOTICE file.
|
||||||
|
|
||||||
|
7. Disclaimer of Warranty. Unless required by applicable law or
|
||||||
|
agreed to in writing, Licensor provides the Work (and each
|
||||||
|
Contributor provides its Contributions) on an "AS IS" BASIS,
|
||||||
|
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
|
||||||
|
implied, including, without limitation, any warranties or conditions
|
||||||
|
of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
|
||||||
|
PARTICULAR PURPOSE. You are solely responsible for determining the
|
||||||
|
appropriateness of using or redistributing the Work and assume any
|
||||||
|
risks associated with Your exercise of permissions under this License.
|
||||||
|
|
||||||
|
8. Limitation of Liability. In no event and under no legal theory,
|
||||||
|
whether in tort (including negligence), contract, or otherwise,
|
||||||
|
unless required by applicable law (such as deliberate and grossly
|
||||||
|
negligent acts) or agreed to in writing, shall any Contributor be
|
||||||
|
liable to You for damages, including any direct, indirect, special,
|
||||||
|
incidental, or consequential damages of any character arising as a
|
||||||
|
result of this License or out of the use or inability to use the
|
||||||
|
Work (including but not limited to damages for loss of goodwill,
|
||||||
|
work stoppage, computer failure or malfunction, or any and all
|
||||||
|
other commercial damages or losses), even if such Contributor
|
||||||
|
has been advised of the possibility of such damages.
|
||||||
|
|
||||||
|
9. Accepting Warranty or Additional Liability. While redistributing
|
||||||
|
the Work or Derivative Works thereof, You may choose to offer,
|
||||||
|
and charge a fee for, acceptance of support, warranty, indemnity,
|
||||||
|
or other liability obligations and/or rights consistent with this
|
||||||
|
License. However, in accepting such obligations, You may act only
|
||||||
|
on Your own behalf and on Your sole responsibility, not on behalf
|
||||||
|
of any other Contributor, and only if You agree to indemnify,
|
||||||
|
defend, and hold each Contributor harmless for any liability
|
||||||
|
incurred by, or claims asserted against, such Contributor by reason
|
||||||
|
of your accepting any such warranty or additional liability.
|
||||||
|
|
||||||
|
END OF TERMS AND CONDITIONS
|
||||||
|
|
||||||
|
Copyright 2013-2016 Docker, Inc.
|
||||||
|
|
||||||
|
Licensed under the Apache License, Version 2.0 (the "License");
|
||||||
|
you may not use this file except in compliance with the License.
|
||||||
|
You may obtain a copy of the License at
|
||||||
|
|
||||||
|
https://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
|
||||||
|
Unless required by applicable law or agreed to in writing, software
|
||||||
|
distributed under the License is distributed on an "AS IS" BASIS,
|
||||||
|
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||||
|
See the License for the specific language governing permissions and
|
||||||
|
limitations under the License.
|
|
@ -0,0 +1,19 @@
|
||||||
|
Docker
|
||||||
|
Copyright 2012-2016 Docker, Inc.
|
||||||
|
|
||||||
|
This product includes software developed at Docker, Inc. (https://www.docker.com).
|
||||||
|
|
||||||
|
This product contains software (https://github.com/kr/pty) developed
|
||||||
|
by Keith Rarick, licensed under the MIT License.
|
||||||
|
|
||||||
|
The following is courtesy of our legal counsel:
|
||||||
|
|
||||||
|
|
||||||
|
Use and transfer of Docker may be subject to certain restrictions by the
|
||||||
|
United States and other governments.
|
||||||
|
It is your responsibility to ensure that your use and/or transfer does not
|
||||||
|
violate applicable laws.
|
||||||
|
|
||||||
|
For more information, please see https://www.bis.doc.gov
|
||||||
|
|
||||||
|
See also https://www.apache.org/dev/crypto.html and/or seek legal counsel.
|
|
@ -0,0 +1,58 @@
|
||||||
|
package mount
|
||||||
|
|
||||||
|
// Type represents the type of a mount.
|
||||||
|
type Type string
|
||||||
|
|
||||||
|
const (
|
||||||
|
// TypeBind BIND
|
||||||
|
TypeBind Type = "bind"
|
||||||
|
// TypeVolume VOLUME
|
||||||
|
TypeVolume Type = "volume"
|
||||||
|
)
|
||||||
|
|
||||||
|
// Mount represents a mount (volume).
|
||||||
|
type Mount struct {
|
||||||
|
Type Type `json:",omitempty"`
|
||||||
|
Source string `json:",omitempty"`
|
||||||
|
Target string `json:",omitempty"`
|
||||||
|
ReadOnly bool `json:",omitempty"`
|
||||||
|
|
||||||
|
BindOptions *BindOptions `json:",omitempty"`
|
||||||
|
VolumeOptions *VolumeOptions `json:",omitempty"`
|
||||||
|
}
|
||||||
|
|
||||||
|
// Propagation represents the propagation of a mount.
|
||||||
|
type Propagation string
|
||||||
|
|
||||||
|
const (
|
||||||
|
// PropagationRPrivate RPRIVATE
|
||||||
|
PropagationRPrivate Propagation = "rprivate"
|
||||||
|
// PropagationPrivate PRIVATE
|
||||||
|
PropagationPrivate Propagation = "private"
|
||||||
|
// PropagationRShared RSHARED
|
||||||
|
PropagationRShared Propagation = "rshared"
|
||||||
|
// PropagationShared SHARED
|
||||||
|
PropagationShared Propagation = "shared"
|
||||||
|
// PropagationRSlave RSLAVE
|
||||||
|
PropagationRSlave Propagation = "rslave"
|
||||||
|
// PropagationSlave SLAVE
|
||||||
|
PropagationSlave Propagation = "slave"
|
||||||
|
)
|
||||||
|
|
||||||
|
// BindOptions defines options specific to mounts of type "bind".
|
||||||
|
type BindOptions struct {
|
||||||
|
Propagation Propagation `json:",omitempty"`
|
||||||
|
}
|
||||||
|
|
||||||
|
// VolumeOptions represents the options for a mount of type volume.
|
||||||
|
type VolumeOptions struct {
|
||||||
|
NoCopy bool `json:",omitempty"`
|
||||||
|
Labels map[string]string `json:",omitempty"`
|
||||||
|
DriverConfig *Driver `json:",omitempty"`
|
||||||
|
}
|
||||||
|
|
||||||
|
// Driver represents a volume driver.
|
||||||
|
type Driver struct {
|
||||||
|
Name string `json:",omitempty"`
|
||||||
|
Options map[string]string `json:",omitempty"`
|
||||||
|
}
|
|
@ -0,0 +1,21 @@
|
||||||
|
package swarm
|
||||||
|
|
||||||
|
import "time"
|
||||||
|
|
||||||
|
// Version represents the internal object version.
|
||||||
|
type Version struct {
|
||||||
|
Index uint64 `json:",omitempty"`
|
||||||
|
}
|
||||||
|
|
||||||
|
// Meta is a base object inherited by most of the other once.
|
||||||
|
type Meta struct {
|
||||||
|
Version Version `json:",omitempty"`
|
||||||
|
CreatedAt time.Time `json:",omitempty"`
|
||||||
|
UpdatedAt time.Time `json:",omitempty"`
|
||||||
|
}
|
||||||
|
|
||||||
|
// Annotations represents how to describe an object.
|
||||||
|
type Annotations struct {
|
||||||
|
Name string `json:",omitempty"`
|
||||||
|
Labels map[string]string `json:",omitempty"`
|
||||||
|
}
|
|
@ -0,0 +1,22 @@
|
||||||
|
package swarm
|
||||||
|
|
||||||
|
import (
|
||||||
|
"time"
|
||||||
|
|
||||||
|
"github.com/docker/docker/api/types/mount"
|
||||||
|
)
|
||||||
|
|
||||||
|
// ContainerSpec represents the spec of a container.
|
||||||
|
type ContainerSpec struct {
|
||||||
|
Image string `json:",omitempty"`
|
||||||
|
Labels map[string]string `json:",omitempty"`
|
||||||
|
Command []string `json:",omitempty"`
|
||||||
|
Args []string `json:",omitempty"`
|
||||||
|
Env []string `json:",omitempty"`
|
||||||
|
Dir string `json:",omitempty"`
|
||||||
|
User string `json:",omitempty"`
|
||||||
|
Groups []string `json:",omitempty"`
|
||||||
|
TTY bool `json:",omitempty"`
|
||||||
|
Mounts []mount.Mount `json:",omitempty"`
|
||||||
|
StopGracePeriod *time.Duration `json:",omitempty"`
|
||||||
|
}
|
|
@ -0,0 +1,102 @@
|
||||||
|
package swarm
|
||||||
|
|
||||||
|
// Endpoint represents an endpoint.
|
||||||
|
type Endpoint struct {
|
||||||
|
Spec EndpointSpec `json:",omitempty"`
|
||||||
|
Ports []PortConfig `json:",omitempty"`
|
||||||
|
VirtualIPs []EndpointVirtualIP `json:",omitempty"`
|
||||||
|
}
|
||||||
|
|
||||||
|
// EndpointSpec represents the spec of an endpoint.
|
||||||
|
type EndpointSpec struct {
|
||||||
|
Mode ResolutionMode `json:",omitempty"`
|
||||||
|
Ports []PortConfig `json:",omitempty"`
|
||||||
|
}
|
||||||
|
|
||||||
|
// ResolutionMode represents a resolution mode.
|
||||||
|
type ResolutionMode string
|
||||||
|
|
||||||
|
const (
|
||||||
|
// ResolutionModeVIP VIP
|
||||||
|
ResolutionModeVIP ResolutionMode = "vip"
|
||||||
|
// ResolutionModeDNSRR DNSRR
|
||||||
|
ResolutionModeDNSRR ResolutionMode = "dnsrr"
|
||||||
|
)
|
||||||
|
|
||||||
|
// PortConfig represents the config of a port.
|
||||||
|
type PortConfig struct {
|
||||||
|
Name string `json:",omitempty"`
|
||||||
|
Protocol PortConfigProtocol `json:",omitempty"`
|
||||||
|
// TargetPort is the port inside the container
|
||||||
|
TargetPort uint32 `json:",omitempty"`
|
||||||
|
// PublishedPort is the port on the swarm hosts
|
||||||
|
PublishedPort uint32 `json:",omitempty"`
|
||||||
|
}
|
||||||
|
|
||||||
|
// PortConfigProtocol represents the protocol of a port.
|
||||||
|
type PortConfigProtocol string
|
||||||
|
|
||||||
|
const (
|
||||||
|
// TODO(stevvooe): These should be used generally, not just for PortConfig.
|
||||||
|
|
||||||
|
// PortConfigProtocolTCP TCP
|
||||||
|
PortConfigProtocolTCP PortConfigProtocol = "tcp"
|
||||||
|
// PortConfigProtocolUDP UDP
|
||||||
|
PortConfigProtocolUDP PortConfigProtocol = "udp"
|
||||||
|
)
|
||||||
|
|
||||||
|
// EndpointVirtualIP represents the virtual ip of a port.
|
||||||
|
type EndpointVirtualIP struct {
|
||||||
|
NetworkID string `json:",omitempty"`
|
||||||
|
Addr string `json:",omitempty"`
|
||||||
|
}
|
||||||
|
|
||||||
|
// Network represents a network.
|
||||||
|
type Network struct {
|
||||||
|
ID string
|
||||||
|
Meta
|
||||||
|
Spec NetworkSpec `json:",omitempty"`
|
||||||
|
DriverState Driver `json:",omitempty"`
|
||||||
|
IPAMOptions *IPAMOptions `json:",omitempty"`
|
||||||
|
}
|
||||||
|
|
||||||
|
// NetworkSpec represents the spec of a network.
|
||||||
|
type NetworkSpec struct {
|
||||||
|
Annotations
|
||||||
|
DriverConfiguration *Driver `json:",omitempty"`
|
||||||
|
IPv6Enabled bool `json:",omitempty"`
|
||||||
|
Internal bool `json:",omitempty"`
|
||||||
|
Attachable bool `json:",omitempty"`
|
||||||
|
IPAMOptions *IPAMOptions `json:",omitempty"`
|
||||||
|
}
|
||||||
|
|
||||||
|
// NetworkAttachmentConfig represents the configuration of a network attachment.
|
||||||
|
type NetworkAttachmentConfig struct {
|
||||||
|
Target string `json:",omitempty"`
|
||||||
|
Aliases []string `json:",omitempty"`
|
||||||
|
}
|
||||||
|
|
||||||
|
// NetworkAttachment represents a network attachment.
|
||||||
|
type NetworkAttachment struct {
|
||||||
|
Network Network `json:",omitempty"`
|
||||||
|
Addresses []string `json:",omitempty"`
|
||||||
|
}
|
||||||
|
|
||||||
|
// IPAMOptions represents ipam options.
|
||||||
|
type IPAMOptions struct {
|
||||||
|
Driver Driver `json:",omitempty"`
|
||||||
|
Configs []IPAMConfig `json:",omitempty"`
|
||||||
|
}
|
||||||
|
|
||||||
|
// IPAMConfig represents ipam configuration.
|
||||||
|
type IPAMConfig struct {
|
||||||
|
Subnet string `json:",omitempty"`
|
||||||
|
Range string `json:",omitempty"`
|
||||||
|
Gateway string `json:",omitempty"`
|
||||||
|
}
|
||||||
|
|
||||||
|
// Driver represents a network driver.
|
||||||
|
type Driver struct {
|
||||||
|
Name string `json:",omitempty"`
|
||||||
|
Options map[string]string `json:",omitempty"`
|
||||||
|
}
|
|
@ -0,0 +1,113 @@
|
||||||
|
package swarm
|
||||||
|
|
||||||
|
// Node represents a node.
|
||||||
|
type Node struct {
|
||||||
|
ID string
|
||||||
|
Meta
|
||||||
|
// Spec defines the desired state of the node as specified by the user.
|
||||||
|
// The system will honor this and will *never* modify it.
|
||||||
|
Spec NodeSpec `json:",omitempty"`
|
||||||
|
// Description encapsulates the properties of the Node as reported by the
|
||||||
|
// agent.
|
||||||
|
Description NodeDescription `json:",omitempty"`
|
||||||
|
// Status provides the current status of the node, as seen by the manager.
|
||||||
|
Status NodeStatus `json:",omitempty"`
|
||||||
|
// ManagerStatus provides the current status of the node's manager
|
||||||
|
// component, if the node is a manager.
|
||||||
|
ManagerStatus *ManagerStatus `json:",omitempty"`
|
||||||
|
}
|
||||||
|
|
||||||
|
// NodeSpec represents the spec of a node.
|
||||||
|
type NodeSpec struct {
|
||||||
|
Annotations
|
||||||
|
Role NodeRole `json:",omitempty"`
|
||||||
|
Availability NodeAvailability `json:",omitempty"`
|
||||||
|
}
|
||||||
|
|
||||||
|
// NodeRole represents the role of a node.
|
||||||
|
type NodeRole string
|
||||||
|
|
||||||
|
const (
|
||||||
|
// NodeRoleWorker WORKER
|
||||||
|
NodeRoleWorker NodeRole = "worker"
|
||||||
|
// NodeRoleManager MANAGER
|
||||||
|
NodeRoleManager NodeRole = "manager"
|
||||||
|
)
|
||||||
|
|
||||||
|
// NodeAvailability represents the availability of a node.
|
||||||
|
type NodeAvailability string
|
||||||
|
|
||||||
|
const (
|
||||||
|
// NodeAvailabilityActive ACTIVE
|
||||||
|
NodeAvailabilityActive NodeAvailability = "active"
|
||||||
|
// NodeAvailabilityPause PAUSE
|
||||||
|
NodeAvailabilityPause NodeAvailability = "pause"
|
||||||
|
// NodeAvailabilityDrain DRAIN
|
||||||
|
NodeAvailabilityDrain NodeAvailability = "drain"
|
||||||
|
)
|
||||||
|
|
||||||
|
// NodeDescription represents the description of a node.
|
||||||
|
type NodeDescription struct {
|
||||||
|
Hostname string `json:",omitempty"`
|
||||||
|
Platform Platform `json:",omitempty"`
|
||||||
|
Resources Resources `json:",omitempty"`
|
||||||
|
Engine EngineDescription `json:",omitempty"`
|
||||||
|
}
|
||||||
|
|
||||||
|
// Platform represents the platfrom (Arch/OS).
|
||||||
|
type Platform struct {
|
||||||
|
Architecture string `json:",omitempty"`
|
||||||
|
OS string `json:",omitempty"`
|
||||||
|
}
|
||||||
|
|
||||||
|
// EngineDescription represents the description of an engine.
|
||||||
|
type EngineDescription struct {
|
||||||
|
EngineVersion string `json:",omitempty"`
|
||||||
|
Labels map[string]string `json:",omitempty"`
|
||||||
|
Plugins []PluginDescription `json:",omitempty"`
|
||||||
|
}
|
||||||
|
|
||||||
|
// PluginDescription represents the description of an engine plugin.
|
||||||
|
type PluginDescription struct {
|
||||||
|
Type string `json:",omitempty"`
|
||||||
|
Name string `json:",omitempty"`
|
||||||
|
}
|
||||||
|
|
||||||
|
// NodeStatus represents the status of a node.
|
||||||
|
type NodeStatus struct {
|
||||||
|
State NodeState `json:",omitempty"`
|
||||||
|
Message string `json:",omitempty"`
|
||||||
|
}
|
||||||
|
|
||||||
|
// Reachability represents the reachability of a node.
|
||||||
|
type Reachability string
|
||||||
|
|
||||||
|
const (
|
||||||
|
// ReachabilityUnknown UNKNOWN
|
||||||
|
ReachabilityUnknown Reachability = "unknown"
|
||||||
|
// ReachabilityUnreachable UNREACHABLE
|
||||||
|
ReachabilityUnreachable Reachability = "unreachable"
|
||||||
|
// ReachabilityReachable REACHABLE
|
||||||
|
ReachabilityReachable Reachability = "reachable"
|
||||||
|
)
|
||||||
|
|
||||||
|
// ManagerStatus represents the status of a manager.
|
||||||
|
type ManagerStatus struct {
|
||||||
|
Leader bool `json:",omitempty"`
|
||||||
|
Reachability Reachability `json:",omitempty"`
|
||||||
|
Addr string `json:",omitempty"`
|
||||||
|
}
|
||||||
|
|
||||||
|
// NodeState represents the state of a node.
|
||||||
|
type NodeState string
|
||||||
|
|
||||||
|
const (
|
||||||
|
// NodeStateUnknown UNKNOWN
|
||||||
|
NodeStateUnknown NodeState = "unknown"
|
||||||
|
// NodeStateDown DOWN
|
||||||
|
NodeStateDown NodeState = "down"
|
||||||
|
// NodeStateReady READY
|
||||||
|
NodeStateReady NodeState = "ready"
|
||||||
|
// NodeStateDisconnected DISCONNECTED
|
||||||
|
NodeStateDisconnected NodeState = "disconnected"
|
||||||
|
)
|
|
@ -0,0 +1,105 @@
|
||||||
|
package swarm
|
||||||
|
|
||||||
|
import "time"
|
||||||
|
|
||||||
|
// Service represents a service.
|
||||||
|
type Service struct {
|
||||||
|
ID string
|
||||||
|
Meta
|
||||||
|
Spec ServiceSpec `json:",omitempty"`
|
||||||
|
PreviousSpec *ServiceSpec `json:",omitempty"`
|
||||||
|
Endpoint Endpoint `json:",omitempty"`
|
||||||
|
UpdateStatus UpdateStatus `json:",omitempty"`
|
||||||
|
}
|
||||||
|
|
||||||
|
// ServiceSpec represents the spec of a service.
|
||||||
|
type ServiceSpec struct {
|
||||||
|
Annotations
|
||||||
|
|
||||||
|
// TaskTemplate defines how the service should construct new tasks when
|
||||||
|
// orchestrating this service.
|
||||||
|
TaskTemplate TaskSpec `json:",omitempty"`
|
||||||
|
Mode ServiceMode `json:",omitempty"`
|
||||||
|
UpdateConfig *UpdateConfig `json:",omitempty"`
|
||||||
|
|
||||||
|
// Networks field in ServiceSpec is deprecated. The
|
||||||
|
// same field in TaskSpec should be used instead.
|
||||||
|
// This field will be removed in a future release.
|
||||||
|
Networks []NetworkAttachmentConfig `json:",omitempty"`
|
||||||
|
EndpointSpec *EndpointSpec `json:",omitempty"`
|
||||||
|
}
|
||||||
|
|
||||||
|
// ServiceMode represents the mode of a service.
|
||||||
|
type ServiceMode struct {
|
||||||
|
Replicated *ReplicatedService `json:",omitempty"`
|
||||||
|
Global *GlobalService `json:",omitempty"`
|
||||||
|
}
|
||||||
|
|
||||||
|
// UpdateState is the state of a service update.
|
||||||
|
type UpdateState string
|
||||||
|
|
||||||
|
const (
|
||||||
|
// UpdateStateUpdating is the updating state.
|
||||||
|
UpdateStateUpdating UpdateState = "updating"
|
||||||
|
// UpdateStatePaused is the paused state.
|
||||||
|
UpdateStatePaused UpdateState = "paused"
|
||||||
|
// UpdateStateCompleted is the completed state.
|
||||||
|
UpdateStateCompleted UpdateState = "completed"
|
||||||
|
)
|
||||||
|
|
||||||
|
// UpdateStatus reports the status of a service update.
|
||||||
|
type UpdateStatus struct {
|
||||||
|
State UpdateState `json:",omitempty"`
|
||||||
|
StartedAt time.Time `json:",omitempty"`
|
||||||
|
CompletedAt time.Time `json:",omitempty"`
|
||||||
|
Message string `json:",omitempty"`
|
||||||
|
}
|
||||||
|
|
||||||
|
// ReplicatedService is a kind of ServiceMode.
|
||||||
|
type ReplicatedService struct {
|
||||||
|
Replicas *uint64 `json:",omitempty"`
|
||||||
|
}
|
||||||
|
|
||||||
|
// GlobalService is a kind of ServiceMode.
|
||||||
|
type GlobalService struct{}
|
||||||
|
|
||||||
|
const (
|
||||||
|
// UpdateFailureActionPause PAUSE
|
||||||
|
UpdateFailureActionPause = "pause"
|
||||||
|
// UpdateFailureActionContinue CONTINUE
|
||||||
|
UpdateFailureActionContinue = "continue"
|
||||||
|
)
|
||||||
|
|
||||||
|
// UpdateConfig represents the update configuration.
|
||||||
|
type UpdateConfig struct {
|
||||||
|
// Maximum number of tasks to be updated in one iteration.
|
||||||
|
// 0 means unlimited parallelism.
|
||||||
|
Parallelism uint64 `json:",omitempty"`
|
||||||
|
|
||||||
|
// Amount of time between updates.
|
||||||
|
Delay time.Duration `json:",omitempty"`
|
||||||
|
|
||||||
|
// FailureAction is the action to take when an update failures.
|
||||||
|
FailureAction string `json:",omitempty"`
|
||||||
|
|
||||||
|
// Monitor indicates how long to monitor a task for failure after it is
|
||||||
|
// created. If the task fails by ending up in one of the states
|
||||||
|
// REJECTED, COMPLETED, or FAILED, within Monitor from its creation,
|
||||||
|
// this counts as a failure. If it fails after Monitor, it does not
|
||||||
|
// count as a failure. If Monitor is unspecified, a default value will
|
||||||
|
// be used.
|
||||||
|
Monitor time.Duration `json:",omitempty"`
|
||||||
|
|
||||||
|
// MaxFailureRatio is the fraction of tasks that may fail during
|
||||||
|
// an update before the failure action is invoked. Any task created by
|
||||||
|
// the current update which ends up in one of the states REJECTED,
|
||||||
|
// COMPLETED or FAILED within Monitor from its creation counts as a
|
||||||
|
// failure. The number of failures is divided by the number of tasks
|
||||||
|
// being updated, and if this fraction is greater than
|
||||||
|
// MaxFailureRatio, the failure action is invoked.
|
||||||
|
//
|
||||||
|
// If the failure action is CONTINUE, there is no effect.
|
||||||
|
// If the failure action is PAUSE, no more tasks will be updated until
|
||||||
|
// another update is started.
|
||||||
|
MaxFailureRatio float32
|
||||||
|
}
|
|
@ -0,0 +1,178 @@
|
||||||
|
package swarm
|
||||||
|
|
||||||
|
import "time"
|
||||||
|
|
||||||
|
// ClusterInfo represents info about the cluster for outputing in "info"
|
||||||
|
// it contains the same information as "Swarm", but without the JoinTokens
|
||||||
|
type ClusterInfo struct {
|
||||||
|
ID string
|
||||||
|
Meta
|
||||||
|
Spec Spec
|
||||||
|
}
|
||||||
|
|
||||||
|
// Swarm represents a swarm.
|
||||||
|
type Swarm struct {
|
||||||
|
ClusterInfo
|
||||||
|
JoinTokens JoinTokens
|
||||||
|
}
|
||||||
|
|
||||||
|
// JoinTokens contains the tokens workers and managers need to join the swarm.
|
||||||
|
type JoinTokens struct {
|
||||||
|
// Worker is the join token workers may use to join the swarm.
|
||||||
|
Worker string
|
||||||
|
// Manager is the join token managers may use to join the swarm.
|
||||||
|
Manager string
|
||||||
|
}
|
||||||
|
|
||||||
|
// Spec represents the spec of a swarm.
|
||||||
|
type Spec struct {
|
||||||
|
Annotations
|
||||||
|
|
||||||
|
Orchestration OrchestrationConfig `json:",omitempty"`
|
||||||
|
Raft RaftConfig `json:",omitempty"`
|
||||||
|
Dispatcher DispatcherConfig `json:",omitempty"`
|
||||||
|
CAConfig CAConfig `json:",omitempty"`
|
||||||
|
TaskDefaults TaskDefaults `json:",omitempty"`
|
||||||
|
}
|
||||||
|
|
||||||
|
// OrchestrationConfig represents orchestration configuration.
|
||||||
|
type OrchestrationConfig struct {
|
||||||
|
// TaskHistoryRetentionLimit is the number of historic tasks to keep per instance or
|
||||||
|
// node. If negative, never remove completed or failed tasks.
|
||||||
|
TaskHistoryRetentionLimit *int64 `json:",omitempty"`
|
||||||
|
}
|
||||||
|
|
||||||
|
// TaskDefaults parameterizes cluster-level task creation with default values.
|
||||||
|
type TaskDefaults struct {
|
||||||
|
// LogDriver selects the log driver to use for tasks created in the
|
||||||
|
// orchestrator if unspecified by a service.
|
||||||
|
//
|
||||||
|
// Updating this value will only have an affect on new tasks. Old tasks
|
||||||
|
// will continue use their previously configured log driver until
|
||||||
|
// recreated.
|
||||||
|
LogDriver *Driver `json:",omitempty"`
|
||||||
|
}
|
||||||
|
|
||||||
|
// RaftConfig represents raft configuration.
|
||||||
|
type RaftConfig struct {
|
||||||
|
// SnapshotInterval is the number of log entries between snapshots.
|
||||||
|
SnapshotInterval uint64 `json:",omitempty"`
|
||||||
|
|
||||||
|
// KeepOldSnapshots is the number of snapshots to keep beyond the
|
||||||
|
// current snapshot.
|
||||||
|
KeepOldSnapshots uint64 `json:",omitempty"`
|
||||||
|
|
||||||
|
// LogEntriesForSlowFollowers is the number of log entries to keep
|
||||||
|
// around to sync up slow followers after a snapshot is created.
|
||||||
|
LogEntriesForSlowFollowers uint64 `json:",omitempty"`
|
||||||
|
|
||||||
|
// ElectionTick is the number of ticks that a follower will wait for a message
|
||||||
|
// from the leader before becoming a candidate and starting an election.
|
||||||
|
// ElectionTick must be greater than HeartbeatTick.
|
||||||
|
//
|
||||||
|
// A tick currently defaults to one second, so these translate directly to
|
||||||
|
// seconds currently, but this is NOT guaranteed.
|
||||||
|
ElectionTick int
|
||||||
|
|
||||||
|
// HeartbeatTick is the number of ticks between heartbeats. Every
|
||||||
|
// HeartbeatTick ticks, the leader will send a heartbeat to the
|
||||||
|
// followers.
|
||||||
|
//
|
||||||
|
// A tick currently defaults to one second, so these translate directly to
|
||||||
|
// seconds currently, but this is NOT guaranteed.
|
||||||
|
HeartbeatTick int
|
||||||
|
}
|
||||||
|
|
||||||
|
// DispatcherConfig represents dispatcher configuration.
|
||||||
|
type DispatcherConfig struct {
|
||||||
|
// HeartbeatPeriod defines how often agent should send heartbeats to
|
||||||
|
// dispatcher.
|
||||||
|
HeartbeatPeriod time.Duration `json:",omitempty"`
|
||||||
|
}
|
||||||
|
|
||||||
|
// CAConfig represents CA configuration.
|
||||||
|
type CAConfig struct {
|
||||||
|
// NodeCertExpiry is the duration certificates should be issued for
|
||||||
|
NodeCertExpiry time.Duration `json:",omitempty"`
|
||||||
|
|
||||||
|
// ExternalCAs is a list of CAs to which a manager node will make
|
||||||
|
// certificate signing requests for node certificates.
|
||||||
|
ExternalCAs []*ExternalCA `json:",omitempty"`
|
||||||
|
}
|
||||||
|
|
||||||
|
// ExternalCAProtocol represents type of external CA.
|
||||||
|
type ExternalCAProtocol string
|
||||||
|
|
||||||
|
// ExternalCAProtocolCFSSL CFSSL
|
||||||
|
const ExternalCAProtocolCFSSL ExternalCAProtocol = "cfssl"
|
||||||
|
|
||||||
|
// ExternalCA defines external CA to be used by the cluster.
|
||||||
|
type ExternalCA struct {
|
||||||
|
// Protocol is the protocol used by this external CA.
|
||||||
|
Protocol ExternalCAProtocol
|
||||||
|
|
||||||
|
// URL is the URL where the external CA can be reached.
|
||||||
|
URL string
|
||||||
|
|
||||||
|
// Options is a set of additional key/value pairs whose interpretation
|
||||||
|
// depends on the specified CA type.
|
||||||
|
Options map[string]string `json:",omitempty"`
|
||||||
|
}
|
||||||
|
|
||||||
|
// InitRequest is the request used to init a swarm.
|
||||||
|
type InitRequest struct {
|
||||||
|
ListenAddr string
|
||||||
|
AdvertiseAddr string
|
||||||
|
ForceNewCluster bool
|
||||||
|
Spec Spec
|
||||||
|
}
|
||||||
|
|
||||||
|
// JoinRequest is the request used to join a swarm.
|
||||||
|
type JoinRequest struct {
|
||||||
|
ListenAddr string
|
||||||
|
AdvertiseAddr string
|
||||||
|
RemoteAddrs []string
|
||||||
|
JoinToken string // accept by secret
|
||||||
|
}
|
||||||
|
|
||||||
|
// LocalNodeState represents the state of the local node.
|
||||||
|
type LocalNodeState string
|
||||||
|
|
||||||
|
const (
|
||||||
|
// LocalNodeStateInactive INACTIVE
|
||||||
|
LocalNodeStateInactive LocalNodeState = "inactive"
|
||||||
|
// LocalNodeStatePending PENDING
|
||||||
|
LocalNodeStatePending LocalNodeState = "pending"
|
||||||
|
// LocalNodeStateActive ACTIVE
|
||||||
|
LocalNodeStateActive LocalNodeState = "active"
|
||||||
|
// LocalNodeStateError ERROR
|
||||||
|
LocalNodeStateError LocalNodeState = "error"
|
||||||
|
)
|
||||||
|
|
||||||
|
// Info represents generic information about swarm.
|
||||||
|
type Info struct {
|
||||||
|
NodeID string
|
||||||
|
NodeAddr string
|
||||||
|
|
||||||
|
LocalNodeState LocalNodeState
|
||||||
|
ControlAvailable bool
|
||||||
|
Error string
|
||||||
|
|
||||||
|
RemoteManagers []Peer
|
||||||
|
Nodes int
|
||||||
|
Managers int
|
||||||
|
|
||||||
|
Cluster ClusterInfo
|
||||||
|
}
|
||||||
|
|
||||||
|
// Peer represents a peer.
|
||||||
|
type Peer struct {
|
||||||
|
NodeID string
|
||||||
|
Addr string
|
||||||
|
}
|
||||||
|
|
||||||
|
// UpdateFlags contains flags for SwarmUpdate.
|
||||||
|
type UpdateFlags struct {
|
||||||
|
RotateWorkerToken bool
|
||||||
|
RotateManagerToken bool
|
||||||
|
}
|
|
@ -0,0 +1,121 @@
|
||||||
|
package swarm
|
||||||
|
|
||||||
|
import "time"
|
||||||
|
|
||||||
|
// TaskState represents the state of a task.
|
||||||
|
type TaskState string
|
||||||
|
|
||||||
|
const (
|
||||||
|
// TaskStateNew NEW
|
||||||
|
TaskStateNew TaskState = "new"
|
||||||
|
// TaskStateAllocated ALLOCATED
|
||||||
|
TaskStateAllocated TaskState = "allocated"
|
||||||
|
// TaskStatePending PENDING
|
||||||
|
TaskStatePending TaskState = "pending"
|
||||||
|
// TaskStateAssigned ASSIGNED
|
||||||
|
TaskStateAssigned TaskState = "assigned"
|
||||||
|
// TaskStateAccepted ACCEPTED
|
||||||
|
TaskStateAccepted TaskState = "accepted"
|
||||||
|
// TaskStatePreparing PREPARING
|
||||||
|
TaskStatePreparing TaskState = "preparing"
|
||||||
|
// TaskStateReady READY
|
||||||
|
TaskStateReady TaskState = "ready"
|
||||||
|
// TaskStateStarting STARTING
|
||||||
|
TaskStateStarting TaskState = "starting"
|
||||||
|
// TaskStateRunning RUNNING
|
||||||
|
TaskStateRunning TaskState = "running"
|
||||||
|
// TaskStateComplete COMPLETE
|
||||||
|
TaskStateComplete TaskState = "complete"
|
||||||
|
// TaskStateShutdown SHUTDOWN
|
||||||
|
TaskStateShutdown TaskState = "shutdown"
|
||||||
|
// TaskStateFailed FAILED
|
||||||
|
TaskStateFailed TaskState = "failed"
|
||||||
|
// TaskStateRejected REJECTED
|
||||||
|
TaskStateRejected TaskState = "rejected"
|
||||||
|
)
|
||||||
|
|
||||||
|
// Task represents a task.
|
||||||
|
type Task struct {
|
||||||
|
ID string
|
||||||
|
Meta
|
||||||
|
Annotations
|
||||||
|
|
||||||
|
Spec TaskSpec `json:",omitempty"`
|
||||||
|
ServiceID string `json:",omitempty"`
|
||||||
|
Slot int `json:",omitempty"`
|
||||||
|
NodeID string `json:",omitempty"`
|
||||||
|
Status TaskStatus `json:",omitempty"`
|
||||||
|
DesiredState TaskState `json:",omitempty"`
|
||||||
|
NetworksAttachments []NetworkAttachment `json:",omitempty"`
|
||||||
|
}
|
||||||
|
|
||||||
|
// TaskSpec represents the spec of a task.
|
||||||
|
type TaskSpec struct {
|
||||||
|
ContainerSpec ContainerSpec `json:",omitempty"`
|
||||||
|
Resources *ResourceRequirements `json:",omitempty"`
|
||||||
|
RestartPolicy *RestartPolicy `json:",omitempty"`
|
||||||
|
Placement *Placement `json:",omitempty"`
|
||||||
|
Networks []NetworkAttachmentConfig `json:",omitempty"`
|
||||||
|
|
||||||
|
// LogDriver specifies the LogDriver to use for tasks created from this
|
||||||
|
// spec. If not present, the one on cluster default on swarm.Spec will be
|
||||||
|
// used, finally falling back to the engine default if not specified.
|
||||||
|
LogDriver *Driver `json:",omitempty"`
|
||||||
|
|
||||||
|
// ForceUpdate is a counter that triggers an update even if no relevant
|
||||||
|
// parameters have been changed.
|
||||||
|
ForceUpdate uint64
|
||||||
|
}
|
||||||
|
|
||||||
|
// Resources represents resources (CPU/Memory).
|
||||||
|
type Resources struct {
|
||||||
|
NanoCPUs int64 `json:",omitempty"`
|
||||||
|
MemoryBytes int64 `json:",omitempty"`
|
||||||
|
}
|
||||||
|
|
||||||
|
// ResourceRequirements represents resources requirements.
|
||||||
|
type ResourceRequirements struct {
|
||||||
|
Limits *Resources `json:",omitempty"`
|
||||||
|
Reservations *Resources `json:",omitempty"`
|
||||||
|
}
|
||||||
|
|
||||||
|
// Placement represents orchestration parameters.
|
||||||
|
type Placement struct {
|
||||||
|
Constraints []string `json:",omitempty"`
|
||||||
|
}
|
||||||
|
|
||||||
|
// RestartPolicy represents the restart policy.
|
||||||
|
type RestartPolicy struct {
|
||||||
|
Condition RestartPolicyCondition `json:",omitempty"`
|
||||||
|
Delay *time.Duration `json:",omitempty"`
|
||||||
|
MaxAttempts *uint64 `json:",omitempty"`
|
||||||
|
Window *time.Duration `json:",omitempty"`
|
||||||
|
}
|
||||||
|
|
||||||
|
// RestartPolicyCondition represents when to restart.
|
||||||
|
type RestartPolicyCondition string
|
||||||
|
|
||||||
|
const (
|
||||||
|
// RestartPolicyConditionNone NONE
|
||||||
|
RestartPolicyConditionNone RestartPolicyCondition = "none"
|
||||||
|
// RestartPolicyConditionOnFailure ON_FAILURE
|
||||||
|
RestartPolicyConditionOnFailure RestartPolicyCondition = "on-failure"
|
||||||
|
// RestartPolicyConditionAny ANY
|
||||||
|
RestartPolicyConditionAny RestartPolicyCondition = "any"
|
||||||
|
)
|
||||||
|
|
||||||
|
// TaskStatus represents the status of a task.
|
||||||
|
type TaskStatus struct {
|
||||||
|
Timestamp time.Time `json:",omitempty"`
|
||||||
|
State TaskState `json:",omitempty"`
|
||||||
|
Message string `json:",omitempty"`
|
||||||
|
Err string `json:",omitempty"`
|
||||||
|
ContainerStatus ContainerStatus `json:",omitempty"`
|
||||||
|
}
|
||||||
|
|
||||||
|
// ContainerStatus represents the status of a container.
|
||||||
|
type ContainerStatus struct {
|
||||||
|
ContainerID string `json:",omitempty"`
|
||||||
|
PID int `json:",omitempty"`
|
||||||
|
ExitCode int `json:",omitempty"`
|
||||||
|
}
|
|
@ -0,0 +1,148 @@
|
||||||
|
package opts
|
||||||
|
|
||||||
|
import (
|
||||||
|
"fmt"
|
||||||
|
"net"
|
||||||
|
"net/url"
|
||||||
|
"strconv"
|
||||||
|
"strings"
|
||||||
|
)
|
||||||
|
|
||||||
|
var (
|
||||||
|
// DefaultHTTPPort Default HTTP Port used if only the protocol is provided to -H flag e.g. docker daemon -H tcp://
|
||||||
|
// These are the IANA registered port numbers for use with Docker
|
||||||
|
// see http://www.iana.org/assignments/service-names-port-numbers/service-names-port-numbers.xhtml?search=docker
|
||||||
|
DefaultHTTPPort = 2375 // Default HTTP Port
|
||||||
|
// DefaultTLSHTTPPort Default HTTP Port used when TLS enabled
|
||||||
|
DefaultTLSHTTPPort = 2376 // Default TLS encrypted HTTP Port
|
||||||
|
// DefaultUnixSocket Path for the unix socket.
|
||||||
|
// Docker daemon by default always listens on the default unix socket
|
||||||
|
DefaultUnixSocket = "/var/run/docker.sock"
|
||||||
|
// DefaultTCPHost constant defines the default host string used by docker on Windows
|
||||||
|
DefaultTCPHost = fmt.Sprintf("tcp://%s:%d", DefaultHTTPHost, DefaultHTTPPort)
|
||||||
|
// DefaultTLSHost constant defines the default host string used by docker for TLS sockets
|
||||||
|
DefaultTLSHost = fmt.Sprintf("tcp://%s:%d", DefaultHTTPHost, DefaultTLSHTTPPort)
|
||||||
|
// DefaultNamedPipe defines the default named pipe used by docker on Windows
|
||||||
|
DefaultNamedPipe = `//./pipe/docker_engine`
|
||||||
|
)
|
||||||
|
|
||||||
|
// ValidateHost validates that the specified string is a valid host and returns it.
|
||||||
|
func ValidateHost(val string) (string, error) {
|
||||||
|
host := strings.TrimSpace(val)
|
||||||
|
// The empty string means default and is not handled by parseDockerDaemonHost
|
||||||
|
if host != "" {
|
||||||
|
_, err := parseDockerDaemonHost(host)
|
||||||
|
if err != nil {
|
||||||
|
return val, err
|
||||||
|
}
|
||||||
|
}
|
||||||
|
// Note: unlike most flag validators, we don't return the mutated value here
|
||||||
|
// we need to know what the user entered later (using ParseHost) to adjust for tls
|
||||||
|
return val, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// ParseHost and set defaults for a Daemon host string
|
||||||
|
func ParseHost(defaultToTLS bool, val string) (string, error) {
|
||||||
|
host := strings.TrimSpace(val)
|
||||||
|
if host == "" {
|
||||||
|
if defaultToTLS {
|
||||||
|
host = DefaultTLSHost
|
||||||
|
} else {
|
||||||
|
host = DefaultHost
|
||||||
|
}
|
||||||
|
} else {
|
||||||
|
var err error
|
||||||
|
host, err = parseDockerDaemonHost(host)
|
||||||
|
if err != nil {
|
||||||
|
return val, err
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return host, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// parseDockerDaemonHost parses the specified address and returns an address that will be used as the host.
|
||||||
|
// Depending of the address specified, this may return one of the global Default* strings defined in hosts.go.
|
||||||
|
func parseDockerDaemonHost(addr string) (string, error) {
|
||||||
|
addrParts := strings.Split(addr, "://")
|
||||||
|
if len(addrParts) == 1 && addrParts[0] != "" {
|
||||||
|
addrParts = []string{"tcp", addrParts[0]}
|
||||||
|
}
|
||||||
|
|
||||||
|
switch addrParts[0] {
|
||||||
|
case "tcp":
|
||||||
|
return parseTCPAddr(addrParts[1], DefaultTCPHost)
|
||||||
|
case "unix":
|
||||||
|
return parseSimpleProtoAddr("unix", addrParts[1], DefaultUnixSocket)
|
||||||
|
case "npipe":
|
||||||
|
return parseSimpleProtoAddr("npipe", addrParts[1], DefaultNamedPipe)
|
||||||
|
case "fd":
|
||||||
|
return addr, nil
|
||||||
|
default:
|
||||||
|
return "", fmt.Errorf("Invalid bind address format: %s", addr)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// parseSimpleProtoAddr parses and validates that the specified address is a valid
|
||||||
|
// socket address for simple protocols like unix and npipe. It returns a formatted
|
||||||
|
// socket address, either using the address parsed from addr, or the contents of
|
||||||
|
// defaultAddr if addr is a blank string.
|
||||||
|
func parseSimpleProtoAddr(proto, addr, defaultAddr string) (string, error) {
|
||||||
|
addr = strings.TrimPrefix(addr, proto+"://")
|
||||||
|
if strings.Contains(addr, "://") {
|
||||||
|
return "", fmt.Errorf("Invalid proto, expected %s: %s", proto, addr)
|
||||||
|
}
|
||||||
|
if addr == "" {
|
||||||
|
addr = defaultAddr
|
||||||
|
}
|
||||||
|
return fmt.Sprintf("%s://%s", proto, addr), nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// parseTCPAddr parses and validates that the specified address is a valid TCP
|
||||||
|
// address. It returns a formatted TCP address, either using the address parsed
|
||||||
|
// from tryAddr, or the contents of defaultAddr if tryAddr is a blank string.
|
||||||
|
// tryAddr is expected to have already been Trim()'d
|
||||||
|
// defaultAddr must be in the full `tcp://host:port` form
|
||||||
|
func parseTCPAddr(tryAddr string, defaultAddr string) (string, error) {
|
||||||
|
if tryAddr == "" || tryAddr == "tcp://" {
|
||||||
|
return defaultAddr, nil
|
||||||
|
}
|
||||||
|
addr := strings.TrimPrefix(tryAddr, "tcp://")
|
||||||
|
if strings.Contains(addr, "://") || addr == "" {
|
||||||
|
return "", fmt.Errorf("Invalid proto, expected tcp: %s", tryAddr)
|
||||||
|
}
|
||||||
|
|
||||||
|
defaultAddr = strings.TrimPrefix(defaultAddr, "tcp://")
|
||||||
|
defaultHost, defaultPort, err := net.SplitHostPort(defaultAddr)
|
||||||
|
if err != nil {
|
||||||
|
return "", err
|
||||||
|
}
|
||||||
|
// url.Parse fails for trailing colon on IPv6 brackets on Go 1.5, but
|
||||||
|
// not 1.4. See https://github.com/golang/go/issues/12200 and
|
||||||
|
// https://github.com/golang/go/issues/6530.
|
||||||
|
if strings.HasSuffix(addr, "]:") {
|
||||||
|
addr += defaultPort
|
||||||
|
}
|
||||||
|
|
||||||
|
u, err := url.Parse("tcp://" + addr)
|
||||||
|
if err != nil {
|
||||||
|
return "", err
|
||||||
|
}
|
||||||
|
|
||||||
|
host, port, err := net.SplitHostPort(u.Host)
|
||||||
|
if err != nil {
|
||||||
|
return "", fmt.Errorf("Invalid bind address format: %s", tryAddr)
|
||||||
|
}
|
||||||
|
|
||||||
|
if host == "" {
|
||||||
|
host = defaultHost
|
||||||
|
}
|
||||||
|
if port == "" {
|
||||||
|
port = defaultPort
|
||||||
|
}
|
||||||
|
p, err := strconv.Atoi(port)
|
||||||
|
if err != nil && p == 0 {
|
||||||
|
return "", fmt.Errorf("Invalid bind address format: %s", tryAddr)
|
||||||
|
}
|
||||||
|
|
||||||
|
return fmt.Sprintf("tcp://%s%s", net.JoinHostPort(host, port), u.Path), nil
|
||||||
|
}
|
|
@ -0,0 +1,8 @@
|
||||||
|
// +build !windows
|
||||||
|
|
||||||
|
package opts
|
||||||
|
|
||||||
|
import "fmt"
|
||||||
|
|
||||||
|
// DefaultHost constant defines the default host string used by docker on other hosts than Windows
|
||||||
|
var DefaultHost = fmt.Sprintf("unix://%s", DefaultUnixSocket)
|
|
@ -0,0 +1,6 @@
|
||||||
|
// +build windows
|
||||||
|
|
||||||
|
package opts
|
||||||
|
|
||||||
|
// DefaultHost constant defines the default host string used by docker on Windows
|
||||||
|
var DefaultHost = "npipe://" + DefaultNamedPipe
|
|
@ -0,0 +1,42 @@
|
||||||
|
package opts
|
||||||
|
|
||||||
|
import (
|
||||||
|
"fmt"
|
||||||
|
"net"
|
||||||
|
)
|
||||||
|
|
||||||
|
// IPOpt holds an IP. It is used to store values from CLI flags.
|
||||||
|
type IPOpt struct {
|
||||||
|
*net.IP
|
||||||
|
}
|
||||||
|
|
||||||
|
// NewIPOpt creates a new IPOpt from a reference net.IP and a
|
||||||
|
// string representation of an IP. If the string is not a valid
|
||||||
|
// IP it will fallback to the specified reference.
|
||||||
|
func NewIPOpt(ref *net.IP, defaultVal string) *IPOpt {
|
||||||
|
o := &IPOpt{
|
||||||
|
IP: ref,
|
||||||
|
}
|
||||||
|
o.Set(defaultVal)
|
||||||
|
return o
|
||||||
|
}
|
||||||
|
|
||||||
|
// Set sets an IPv4 or IPv6 address from a given string. If the given
|
||||||
|
// string is not parseable as an IP address it returns an error.
|
||||||
|
func (o *IPOpt) Set(val string) error {
|
||||||
|
ip := net.ParseIP(val)
|
||||||
|
if ip == nil {
|
||||||
|
return fmt.Errorf("%s is not an ip address", val)
|
||||||
|
}
|
||||||
|
*o.IP = ip
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// String returns the IP address stored in the IPOpt. If stored IP is a
|
||||||
|
// nil pointer, it returns an empty string.
|
||||||
|
func (o *IPOpt) String() string {
|
||||||
|
if *o.IP == nil {
|
||||||
|
return ""
|
||||||
|
}
|
||||||
|
return o.IP.String()
|
||||||
|
}
|
|
@ -0,0 +1,274 @@
|
||||||
|
package opts
|
||||||
|
|
||||||
|
import (
|
||||||
|
"fmt"
|
||||||
|
"net"
|
||||||
|
"regexp"
|
||||||
|
"strings"
|
||||||
|
)
|
||||||
|
|
||||||
|
var (
|
||||||
|
alphaRegexp = regexp.MustCompile(`[a-zA-Z]`)
|
||||||
|
domainRegexp = regexp.MustCompile(`^(:?(:?[a-zA-Z0-9]|(:?[a-zA-Z0-9][a-zA-Z0-9\-]*[a-zA-Z0-9]))(:?\.(:?[a-zA-Z0-9]|(:?[a-zA-Z0-9][a-zA-Z0-9\-]*[a-zA-Z0-9])))*)\.?\s*$`)
|
||||||
|
)
|
||||||
|
|
||||||
|
// ListOpts holds a list of values and a validation function.
|
||||||
|
type ListOpts struct {
|
||||||
|
values *[]string
|
||||||
|
validator ValidatorFctType
|
||||||
|
}
|
||||||
|
|
||||||
|
// NewListOpts creates a new ListOpts with the specified validator.
|
||||||
|
func NewListOpts(validator ValidatorFctType) ListOpts {
|
||||||
|
var values []string
|
||||||
|
return *NewListOptsRef(&values, validator)
|
||||||
|
}
|
||||||
|
|
||||||
|
// NewListOptsRef creates a new ListOpts with the specified values and validator.
|
||||||
|
func NewListOptsRef(values *[]string, validator ValidatorFctType) *ListOpts {
|
||||||
|
return &ListOpts{
|
||||||
|
values: values,
|
||||||
|
validator: validator,
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func (opts *ListOpts) String() string {
|
||||||
|
return fmt.Sprintf("%v", []string((*opts.values)))
|
||||||
|
}
|
||||||
|
|
||||||
|
// Set validates if needed the input value and adds it to the
|
||||||
|
// internal slice.
|
||||||
|
func (opts *ListOpts) Set(value string) error {
|
||||||
|
if opts.validator != nil {
|
||||||
|
v, err := opts.validator(value)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
value = v
|
||||||
|
}
|
||||||
|
(*opts.values) = append((*opts.values), value)
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// Delete removes the specified element from the slice.
|
||||||
|
func (opts *ListOpts) Delete(key string) {
|
||||||
|
for i, k := range *opts.values {
|
||||||
|
if k == key {
|
||||||
|
(*opts.values) = append((*opts.values)[:i], (*opts.values)[i+1:]...)
|
||||||
|
return
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// GetMap returns the content of values in a map in order to avoid
|
||||||
|
// duplicates.
|
||||||
|
func (opts *ListOpts) GetMap() map[string]struct{} {
|
||||||
|
ret := make(map[string]struct{})
|
||||||
|
for _, k := range *opts.values {
|
||||||
|
ret[k] = struct{}{}
|
||||||
|
}
|
||||||
|
return ret
|
||||||
|
}
|
||||||
|
|
||||||
|
// GetAll returns the values of slice.
|
||||||
|
func (opts *ListOpts) GetAll() []string {
|
||||||
|
return (*opts.values)
|
||||||
|
}
|
||||||
|
|
||||||
|
// GetAllOrEmpty returns the values of the slice
|
||||||
|
// or an empty slice when there are no values.
|
||||||
|
func (opts *ListOpts) GetAllOrEmpty() []string {
|
||||||
|
v := *opts.values
|
||||||
|
if v == nil {
|
||||||
|
return make([]string, 0)
|
||||||
|
}
|
||||||
|
return v
|
||||||
|
}
|
||||||
|
|
||||||
|
// Get checks the existence of the specified key.
|
||||||
|
func (opts *ListOpts) Get(key string) bool {
|
||||||
|
for _, k := range *opts.values {
|
||||||
|
if k == key {
|
||||||
|
return true
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return false
|
||||||
|
}
|
||||||
|
|
||||||
|
// Len returns the amount of element in the slice.
|
||||||
|
func (opts *ListOpts) Len() int {
|
||||||
|
return len((*opts.values))
|
||||||
|
}
|
||||||
|
|
||||||
|
// NamedOption is an interface that list and map options
|
||||||
|
// with names implement.
|
||||||
|
type NamedOption interface {
|
||||||
|
Name() string
|
||||||
|
}
|
||||||
|
|
||||||
|
// NamedListOpts is a ListOpts with a configuration name.
|
||||||
|
// This struct is useful to keep reference to the assigned
|
||||||
|
// field name in the internal configuration struct.
|
||||||
|
type NamedListOpts struct {
|
||||||
|
name string
|
||||||
|
ListOpts
|
||||||
|
}
|
||||||
|
|
||||||
|
var _ NamedOption = &NamedListOpts{}
|
||||||
|
|
||||||
|
// NewNamedListOptsRef creates a reference to a new NamedListOpts struct.
|
||||||
|
func NewNamedListOptsRef(name string, values *[]string, validator ValidatorFctType) *NamedListOpts {
|
||||||
|
return &NamedListOpts{
|
||||||
|
name: name,
|
||||||
|
ListOpts: *NewListOptsRef(values, validator),
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// Name returns the name of the NamedListOpts in the configuration.
|
||||||
|
func (o *NamedListOpts) Name() string {
|
||||||
|
return o.name
|
||||||
|
}
|
||||||
|
|
||||||
|
//MapOpts holds a map of values and a validation function.
|
||||||
|
type MapOpts struct {
|
||||||
|
values map[string]string
|
||||||
|
validator ValidatorFctType
|
||||||
|
}
|
||||||
|
|
||||||
|
// Set validates if needed the input value and add it to the
|
||||||
|
// internal map, by splitting on '='.
|
||||||
|
func (opts *MapOpts) Set(value string) error {
|
||||||
|
if opts.validator != nil {
|
||||||
|
v, err := opts.validator(value)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
value = v
|
||||||
|
}
|
||||||
|
vals := strings.SplitN(value, "=", 2)
|
||||||
|
if len(vals) == 1 {
|
||||||
|
(opts.values)[vals[0]] = ""
|
||||||
|
} else {
|
||||||
|
(opts.values)[vals[0]] = vals[1]
|
||||||
|
}
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// GetAll returns the values of MapOpts as a map.
|
||||||
|
func (opts *MapOpts) GetAll() map[string]string {
|
||||||
|
return opts.values
|
||||||
|
}
|
||||||
|
|
||||||
|
func (opts *MapOpts) String() string {
|
||||||
|
return fmt.Sprintf("%v", map[string]string((opts.values)))
|
||||||
|
}
|
||||||
|
|
||||||
|
// NewMapOpts creates a new MapOpts with the specified map of values and a validator.
|
||||||
|
func NewMapOpts(values map[string]string, validator ValidatorFctType) *MapOpts {
|
||||||
|
if values == nil {
|
||||||
|
values = make(map[string]string)
|
||||||
|
}
|
||||||
|
return &MapOpts{
|
||||||
|
values: values,
|
||||||
|
validator: validator,
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// NamedMapOpts is a MapOpts struct with a configuration name.
|
||||||
|
// This struct is useful to keep reference to the assigned
|
||||||
|
// field name in the internal configuration struct.
|
||||||
|
type NamedMapOpts struct {
|
||||||
|
name string
|
||||||
|
MapOpts
|
||||||
|
}
|
||||||
|
|
||||||
|
var _ NamedOption = &NamedMapOpts{}
|
||||||
|
|
||||||
|
// NewNamedMapOpts creates a reference to a new NamedMapOpts struct.
|
||||||
|
func NewNamedMapOpts(name string, values map[string]string, validator ValidatorFctType) *NamedMapOpts {
|
||||||
|
return &NamedMapOpts{
|
||||||
|
name: name,
|
||||||
|
MapOpts: *NewMapOpts(values, validator),
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// Name returns the name of the NamedMapOpts in the configuration.
|
||||||
|
func (o *NamedMapOpts) Name() string {
|
||||||
|
return o.name
|
||||||
|
}
|
||||||
|
|
||||||
|
// ValidatorFctType defines a validator function that returns a validated string and/or an error.
|
||||||
|
type ValidatorFctType func(val string) (string, error)
|
||||||
|
|
||||||
|
// ValidatorFctListType defines a validator function that returns a validated list of string and/or an error
|
||||||
|
type ValidatorFctListType func(val string) ([]string, error)
|
||||||
|
|
||||||
|
// ValidateIPAddress validates an Ip address.
|
||||||
|
func ValidateIPAddress(val string) (string, error) {
|
||||||
|
var ip = net.ParseIP(strings.TrimSpace(val))
|
||||||
|
if ip != nil {
|
||||||
|
return ip.String(), nil
|
||||||
|
}
|
||||||
|
return "", fmt.Errorf("%s is not an ip address", val)
|
||||||
|
}
|
||||||
|
|
||||||
|
// ValidateDNSSearch validates domain for resolvconf search configuration.
|
||||||
|
// A zero length domain is represented by a dot (.).
|
||||||
|
func ValidateDNSSearch(val string) (string, error) {
|
||||||
|
if val = strings.Trim(val, " "); val == "." {
|
||||||
|
return val, nil
|
||||||
|
}
|
||||||
|
return validateDomain(val)
|
||||||
|
}
|
||||||
|
|
||||||
|
func validateDomain(val string) (string, error) {
|
||||||
|
if alphaRegexp.FindString(val) == "" {
|
||||||
|
return "", fmt.Errorf("%s is not a valid domain", val)
|
||||||
|
}
|
||||||
|
ns := domainRegexp.FindSubmatch([]byte(val))
|
||||||
|
if len(ns) > 0 && len(ns[1]) < 255 {
|
||||||
|
return string(ns[1]), nil
|
||||||
|
}
|
||||||
|
return "", fmt.Errorf("%s is not a valid domain", val)
|
||||||
|
}
|
||||||
|
|
||||||
|
// ValidateLabel validates that the specified string is a valid label, and returns it.
|
||||||
|
// Labels are in the form on key=value.
|
||||||
|
func ValidateLabel(val string) (string, error) {
|
||||||
|
if strings.Count(val, "=") < 1 {
|
||||||
|
return "", fmt.Errorf("bad attribute format: %s", val)
|
||||||
|
}
|
||||||
|
return val, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// ValidateSysctl validates an sysctl and returns it.
|
||||||
|
func ValidateSysctl(val string) (string, error) {
|
||||||
|
validSysctlMap := map[string]bool{
|
||||||
|
"kernel.msgmax": true,
|
||||||
|
"kernel.msgmnb": true,
|
||||||
|
"kernel.msgmni": true,
|
||||||
|
"kernel.sem": true,
|
||||||
|
"kernel.shmall": true,
|
||||||
|
"kernel.shmmax": true,
|
||||||
|
"kernel.shmmni": true,
|
||||||
|
"kernel.shm_rmid_forced": true,
|
||||||
|
}
|
||||||
|
validSysctlPrefixes := []string{
|
||||||
|
"net.",
|
||||||
|
"fs.mqueue.",
|
||||||
|
}
|
||||||
|
arr := strings.Split(val, "=")
|
||||||
|
if len(arr) < 2 {
|
||||||
|
return "", fmt.Errorf("sysctl '%s' is not whitelisted", val)
|
||||||
|
}
|
||||||
|
if validSysctlMap[arr[0]] {
|
||||||
|
return val, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
for _, vp := range validSysctlPrefixes {
|
||||||
|
if strings.HasPrefix(arr[0], vp) {
|
||||||
|
return val, nil
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return "", fmt.Errorf("sysctl '%s' is not whitelisted", val)
|
||||||
|
}
|
|
@ -0,0 +1,6 @@
|
||||||
|
// +build !windows
|
||||||
|
|
||||||
|
package opts
|
||||||
|
|
||||||
|
// DefaultHTTPHost Default HTTP Host used if only port is provided to -H flag e.g. docker daemon -H tcp://:8080
|
||||||
|
const DefaultHTTPHost = "localhost"
|
|
@ -0,0 +1,56 @@
|
||||||
|
package opts
|
||||||
|
|
||||||
|
// TODO Windows. Identify bug in GOLang 1.5.1+ and/or Windows Server 2016 TP5.
|
||||||
|
// @jhowardmsft, @swernli.
|
||||||
|
//
|
||||||
|
// On Windows, this mitigates a problem with the default options of running
|
||||||
|
// a docker client against a local docker daemon on TP5.
|
||||||
|
//
|
||||||
|
// What was found that if the default host is "localhost", even if the client
|
||||||
|
// (and daemon as this is local) is not physically on a network, and the DNS
|
||||||
|
// cache is flushed (ipconfig /flushdns), then the client will pause for
|
||||||
|
// exactly one second when connecting to the daemon for calls. For example
|
||||||
|
// using docker run windowsservercore cmd, the CLI will send a create followed
|
||||||
|
// by an attach. You see the delay between the attach finishing and the attach
|
||||||
|
// being seen by the daemon.
|
||||||
|
//
|
||||||
|
// Here's some daemon debug logs with additional debug spew put in. The
|
||||||
|
// AfterWriteJSON log is the very last thing the daemon does as part of the
|
||||||
|
// create call. The POST /attach is the second CLI call. Notice the second
|
||||||
|
// time gap.
|
||||||
|
//
|
||||||
|
// time="2015-11-06T13:38:37.259627400-08:00" level=debug msg="After createRootfs"
|
||||||
|
// time="2015-11-06T13:38:37.263626300-08:00" level=debug msg="After setHostConfig"
|
||||||
|
// time="2015-11-06T13:38:37.267631200-08:00" level=debug msg="before createContainerPl...."
|
||||||
|
// time="2015-11-06T13:38:37.271629500-08:00" level=debug msg=ToDiskLocking....
|
||||||
|
// time="2015-11-06T13:38:37.275643200-08:00" level=debug msg="loggin event...."
|
||||||
|
// time="2015-11-06T13:38:37.277627600-08:00" level=debug msg="logged event...."
|
||||||
|
// time="2015-11-06T13:38:37.279631800-08:00" level=debug msg="In defer func"
|
||||||
|
// time="2015-11-06T13:38:37.282628100-08:00" level=debug msg="After daemon.create"
|
||||||
|
// time="2015-11-06T13:38:37.286651700-08:00" level=debug msg="return 2"
|
||||||
|
// time="2015-11-06T13:38:37.289629500-08:00" level=debug msg="Returned from daemon.ContainerCreate"
|
||||||
|
// time="2015-11-06T13:38:37.311629100-08:00" level=debug msg="After WriteJSON"
|
||||||
|
// ... 1 second gap here....
|
||||||
|
// time="2015-11-06T13:38:38.317866200-08:00" level=debug msg="Calling POST /v1.22/containers/984758282b842f779e805664b2c95d563adc9a979c8a3973e68c807843ee4757/attach"
|
||||||
|
// time="2015-11-06T13:38:38.326882500-08:00" level=info msg="POST /v1.22/containers/984758282b842f779e805664b2c95d563adc9a979c8a3973e68c807843ee4757/attach?stderr=1&stdin=1&stdout=1&stream=1"
|
||||||
|
//
|
||||||
|
// We suspect this is either a bug introduced in GOLang 1.5.1, or that a change
|
||||||
|
// in GOLang 1.5.1 (from 1.4.3) is exposing a bug in Windows. In theory,
|
||||||
|
// the Windows networking stack is supposed to resolve "localhost" internally,
|
||||||
|
// without hitting DNS, or even reading the hosts file (which is why localhost
|
||||||
|
// is commented out in the hosts file on Windows).
|
||||||
|
//
|
||||||
|
// We have validated that working around this using the actual IPv4 localhost
|
||||||
|
// address does not cause the delay.
|
||||||
|
//
|
||||||
|
// This does not occur with the docker client built with 1.4.3 on the same
|
||||||
|
// Windows build, regardless of whether the daemon is built using 1.5.1
|
||||||
|
// or 1.4.3. It does not occur on Linux. We also verified we see the same thing
|
||||||
|
// on a cross-compiled Windows binary (from Linux).
|
||||||
|
//
|
||||||
|
// Final note: This is a mitigation, not a 'real' fix. It is still susceptible
|
||||||
|
// to the delay if a user were to do 'docker run -H=tcp://localhost:2375...'
|
||||||
|
// explicitly.
|
||||||
|
|
||||||
|
// DefaultHTTPHost Default HTTP Host used if only port is provided to -H flag e.g. docker daemon -H tcp://:8080
|
||||||
|
const DefaultHTTPHost = "127.0.0.1"
|
|
@ -0,0 +1 @@
|
||||||
|
This code provides helper functions for dealing with archive files.
|
File diff suppressed because it is too large
Load Diff
|
@ -0,0 +1,112 @@
|
||||||
|
// +build !windows
|
||||||
|
|
||||||
|
package archive
|
||||||
|
|
||||||
|
import (
|
||||||
|
"archive/tar"
|
||||||
|
"errors"
|
||||||
|
"os"
|
||||||
|
"path/filepath"
|
||||||
|
"syscall"
|
||||||
|
|
||||||
|
"github.com/docker/docker/pkg/system"
|
||||||
|
)
|
||||||
|
|
||||||
|
// fixVolumePathPrefix does platform specific processing to ensure that if
|
||||||
|
// the path being passed in is not in a volume path format, convert it to one.
|
||||||
|
func fixVolumePathPrefix(srcPath string) string {
|
||||||
|
return srcPath
|
||||||
|
}
|
||||||
|
|
||||||
|
// getWalkRoot calculates the root path when performing a TarWithOptions.
|
||||||
|
// We use a separate function as this is platform specific. On Linux, we
|
||||||
|
// can't use filepath.Join(srcPath,include) because this will clean away
|
||||||
|
// a trailing "." or "/" which may be important.
|
||||||
|
func getWalkRoot(srcPath string, include string) string {
|
||||||
|
return srcPath + string(filepath.Separator) + include
|
||||||
|
}
|
||||||
|
|
||||||
|
// CanonicalTarNameForPath returns platform-specific filepath
|
||||||
|
// to canonical posix-style path for tar archival. p is relative
|
||||||
|
// path.
|
||||||
|
func CanonicalTarNameForPath(p string) (string, error) {
|
||||||
|
return p, nil // already unix-style
|
||||||
|
}
|
||||||
|
|
||||||
|
// chmodTarEntry is used to adjust the file permissions used in tar header based
|
||||||
|
// on the platform the archival is done.
|
||||||
|
|
||||||
|
func chmodTarEntry(perm os.FileMode) os.FileMode {
|
||||||
|
return perm // noop for unix as golang APIs provide perm bits correctly
|
||||||
|
}
|
||||||
|
|
||||||
|
func setHeaderForSpecialDevice(hdr *tar.Header, ta *tarAppender, name string, stat interface{}) (inode uint64, err error) {
|
||||||
|
s, ok := stat.(*syscall.Stat_t)
|
||||||
|
|
||||||
|
if !ok {
|
||||||
|
err = errors.New("cannot convert stat value to syscall.Stat_t")
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
inode = uint64(s.Ino)
|
||||||
|
|
||||||
|
// Currently go does not fill in the major/minors
|
||||||
|
if s.Mode&syscall.S_IFBLK != 0 ||
|
||||||
|
s.Mode&syscall.S_IFCHR != 0 {
|
||||||
|
hdr.Devmajor = int64(major(uint64(s.Rdev)))
|
||||||
|
hdr.Devminor = int64(minor(uint64(s.Rdev)))
|
||||||
|
}
|
||||||
|
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
func getFileUIDGID(stat interface{}) (int, int, error) {
|
||||||
|
s, ok := stat.(*syscall.Stat_t)
|
||||||
|
|
||||||
|
if !ok {
|
||||||
|
return -1, -1, errors.New("cannot convert stat value to syscall.Stat_t")
|
||||||
|
}
|
||||||
|
return int(s.Uid), int(s.Gid), nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func major(device uint64) uint64 {
|
||||||
|
return (device >> 8) & 0xfff
|
||||||
|
}
|
||||||
|
|
||||||
|
func minor(device uint64) uint64 {
|
||||||
|
return (device & 0xff) | ((device >> 12) & 0xfff00)
|
||||||
|
}
|
||||||
|
|
||||||
|
// handleTarTypeBlockCharFifo is an OS-specific helper function used by
|
||||||
|
// createTarFile to handle the following types of header: Block; Char; Fifo
|
||||||
|
func handleTarTypeBlockCharFifo(hdr *tar.Header, path string) error {
|
||||||
|
mode := uint32(hdr.Mode & 07777)
|
||||||
|
switch hdr.Typeflag {
|
||||||
|
case tar.TypeBlock:
|
||||||
|
mode |= syscall.S_IFBLK
|
||||||
|
case tar.TypeChar:
|
||||||
|
mode |= syscall.S_IFCHR
|
||||||
|
case tar.TypeFifo:
|
||||||
|
mode |= syscall.S_IFIFO
|
||||||
|
}
|
||||||
|
|
||||||
|
if err := system.Mknod(path, mode, int(system.Mkdev(hdr.Devmajor, hdr.Devminor))); err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func handleLChmod(hdr *tar.Header, path string, hdrInfo os.FileInfo) error {
|
||||||
|
if hdr.Typeflag == tar.TypeLink {
|
||||||
|
if fi, err := os.Lstat(hdr.Linkname); err == nil && (fi.Mode()&os.ModeSymlink == 0) {
|
||||||
|
if err := os.Chmod(path, hdrInfo.Mode()); err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
}
|
||||||
|
} else if hdr.Typeflag != tar.TypeSymlink {
|
||||||
|
if err := os.Chmod(path, hdrInfo.Mode()); err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return nil
|
||||||
|
}
|
|
@ -0,0 +1,70 @@
|
||||||
|
// +build windows
|
||||||
|
|
||||||
|
package archive
|
||||||
|
|
||||||
|
import (
|
||||||
|
"archive/tar"
|
||||||
|
"fmt"
|
||||||
|
"os"
|
||||||
|
"path/filepath"
|
||||||
|
"strings"
|
||||||
|
|
||||||
|
"github.com/docker/docker/pkg/longpath"
|
||||||
|
)
|
||||||
|
|
||||||
|
// fixVolumePathPrefix does platform specific processing to ensure that if
|
||||||
|
// the path being passed in is not in a volume path format, convert it to one.
|
||||||
|
func fixVolumePathPrefix(srcPath string) string {
|
||||||
|
return longpath.AddPrefix(srcPath)
|
||||||
|
}
|
||||||
|
|
||||||
|
// getWalkRoot calculates the root path when performing a TarWithOptions.
|
||||||
|
// We use a separate function as this is platform specific.
|
||||||
|
func getWalkRoot(srcPath string, include string) string {
|
||||||
|
return filepath.Join(srcPath, include)
|
||||||
|
}
|
||||||
|
|
||||||
|
// CanonicalTarNameForPath returns platform-specific filepath
|
||||||
|
// to canonical posix-style path for tar archival. p is relative
|
||||||
|
// path.
|
||||||
|
func CanonicalTarNameForPath(p string) (string, error) {
|
||||||
|
// windows: convert windows style relative path with backslashes
|
||||||
|
// into forward slashes. Since windows does not allow '/' or '\'
|
||||||
|
// in file names, it is mostly safe to replace however we must
|
||||||
|
// check just in case
|
||||||
|
if strings.Contains(p, "/") {
|
||||||
|
return "", fmt.Errorf("Windows path contains forward slash: %s", p)
|
||||||
|
}
|
||||||
|
return strings.Replace(p, string(os.PathSeparator), "/", -1), nil
|
||||||
|
|
||||||
|
}
|
||||||
|
|
||||||
|
// chmodTarEntry is used to adjust the file permissions used in tar header based
|
||||||
|
// on the platform the archival is done.
|
||||||
|
func chmodTarEntry(perm os.FileMode) os.FileMode {
|
||||||
|
perm &= 0755
|
||||||
|
// Add the x bit: make everything +x from windows
|
||||||
|
perm |= 0111
|
||||||
|
|
||||||
|
return perm
|
||||||
|
}
|
||||||
|
|
||||||
|
func setHeaderForSpecialDevice(hdr *tar.Header, ta *tarAppender, name string, stat interface{}) (inode uint64, err error) {
|
||||||
|
// do nothing. no notion of Rdev, Inode, Nlink in stat on Windows
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
// handleTarTypeBlockCharFifo is an OS-specific helper function used by
|
||||||
|
// createTarFile to handle the following types of header: Block; Char; Fifo
|
||||||
|
func handleTarTypeBlockCharFifo(hdr *tar.Header, path string) error {
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func handleLChmod(hdr *tar.Header, path string, hdrInfo os.FileInfo) error {
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func getFileUIDGID(stat interface{}) (int, int, error) {
|
||||||
|
// no notion of file ownership mapping yet on Windows
|
||||||
|
return 0, 0, nil
|
||||||
|
}
|
|
@ -0,0 +1,416 @@
|
||||||
|
package archive
|
||||||
|
|
||||||
|
import (
|
||||||
|
"archive/tar"
|
||||||
|
"bytes"
|
||||||
|
"fmt"
|
||||||
|
"io"
|
||||||
|
"io/ioutil"
|
||||||
|
"os"
|
||||||
|
"path/filepath"
|
||||||
|
"sort"
|
||||||
|
"strings"
|
||||||
|
"syscall"
|
||||||
|
"time"
|
||||||
|
|
||||||
|
"github.com/Sirupsen/logrus"
|
||||||
|
"github.com/docker/docker/pkg/idtools"
|
||||||
|
"github.com/docker/docker/pkg/pools"
|
||||||
|
"github.com/docker/docker/pkg/system"
|
||||||
|
)
|
||||||
|
|
||||||
|
// ChangeType represents the change type.
|
||||||
|
type ChangeType int
|
||||||
|
|
||||||
|
const (
|
||||||
|
// ChangeModify represents the modify operation.
|
||||||
|
ChangeModify = iota
|
||||||
|
// ChangeAdd represents the add operation.
|
||||||
|
ChangeAdd
|
||||||
|
// ChangeDelete represents the delete operation.
|
||||||
|
ChangeDelete
|
||||||
|
)
|
||||||
|
|
||||||
|
func (c ChangeType) String() string {
|
||||||
|
switch c {
|
||||||
|
case ChangeModify:
|
||||||
|
return "C"
|
||||||
|
case ChangeAdd:
|
||||||
|
return "A"
|
||||||
|
case ChangeDelete:
|
||||||
|
return "D"
|
||||||
|
}
|
||||||
|
return ""
|
||||||
|
}
|
||||||
|
|
||||||
|
// Change represents a change, it wraps the change type and path.
|
||||||
|
// It describes changes of the files in the path respect to the
|
||||||
|
// parent layers. The change could be modify, add, delete.
|
||||||
|
// This is used for layer diff.
|
||||||
|
type Change struct {
|
||||||
|
Path string
|
||||||
|
Kind ChangeType
|
||||||
|
}
|
||||||
|
|
||||||
|
func (change *Change) String() string {
|
||||||
|
return fmt.Sprintf("%s %s", change.Kind, change.Path)
|
||||||
|
}
|
||||||
|
|
||||||
|
// for sort.Sort
|
||||||
|
type changesByPath []Change
|
||||||
|
|
||||||
|
func (c changesByPath) Less(i, j int) bool { return c[i].Path < c[j].Path }
|
||||||
|
func (c changesByPath) Len() int { return len(c) }
|
||||||
|
func (c changesByPath) Swap(i, j int) { c[j], c[i] = c[i], c[j] }
|
||||||
|
|
||||||
|
// Gnu tar and the go tar writer don't have sub-second mtime
|
||||||
|
// precision, which is problematic when we apply changes via tar
|
||||||
|
// files, we handle this by comparing for exact times, *or* same
|
||||||
|
// second count and either a or b having exactly 0 nanoseconds
|
||||||
|
func sameFsTime(a, b time.Time) bool {
|
||||||
|
return a == b ||
|
||||||
|
(a.Unix() == b.Unix() &&
|
||||||
|
(a.Nanosecond() == 0 || b.Nanosecond() == 0))
|
||||||
|
}
|
||||||
|
|
||||||
|
func sameFsTimeSpec(a, b syscall.Timespec) bool {
|
||||||
|
return a.Sec == b.Sec &&
|
||||||
|
(a.Nsec == b.Nsec || a.Nsec == 0 || b.Nsec == 0)
|
||||||
|
}
|
||||||
|
|
||||||
|
// Changes walks the path rw and determines changes for the files in the path,
|
||||||
|
// with respect to the parent layers
|
||||||
|
func Changes(layers []string, rw string) ([]Change, error) {
|
||||||
|
var (
|
||||||
|
changes []Change
|
||||||
|
changedDirs = make(map[string]struct{})
|
||||||
|
)
|
||||||
|
|
||||||
|
err := filepath.Walk(rw, func(path string, f os.FileInfo, err error) error {
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
// Rebase path
|
||||||
|
path, err = filepath.Rel(rw, path)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
// As this runs on the daemon side, file paths are OS specific.
|
||||||
|
path = filepath.Join(string(os.PathSeparator), path)
|
||||||
|
|
||||||
|
// Skip root
|
||||||
|
if path == string(os.PathSeparator) {
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// Skip AUFS metadata
|
||||||
|
if matched, err := filepath.Match(string(os.PathSeparator)+WhiteoutMetaPrefix+"*", path); err != nil || matched {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
change := Change{
|
||||||
|
Path: path,
|
||||||
|
}
|
||||||
|
|
||||||
|
// Find out what kind of modification happened
|
||||||
|
file := filepath.Base(path)
|
||||||
|
// If there is a whiteout, then the file was removed
|
||||||
|
if strings.HasPrefix(file, WhiteoutPrefix) {
|
||||||
|
originalFile := file[len(WhiteoutPrefix):]
|
||||||
|
change.Path = filepath.Join(filepath.Dir(path), originalFile)
|
||||||
|
change.Kind = ChangeDelete
|
||||||
|
} else {
|
||||||
|
// Otherwise, the file was added
|
||||||
|
change.Kind = ChangeAdd
|
||||||
|
|
||||||
|
// ...Unless it already existed in a top layer, in which case, it's a modification
|
||||||
|
for _, layer := range layers {
|
||||||
|
stat, err := os.Stat(filepath.Join(layer, path))
|
||||||
|
if err != nil && !os.IsNotExist(err) {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
if err == nil {
|
||||||
|
// The file existed in the top layer, so that's a modification
|
||||||
|
|
||||||
|
// However, if it's a directory, maybe it wasn't actually modified.
|
||||||
|
// If you modify /foo/bar/baz, then /foo will be part of the changed files only because it's the parent of bar
|
||||||
|
if stat.IsDir() && f.IsDir() {
|
||||||
|
if f.Size() == stat.Size() && f.Mode() == stat.Mode() && sameFsTime(f.ModTime(), stat.ModTime()) {
|
||||||
|
// Both directories are the same, don't record the change
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
}
|
||||||
|
change.Kind = ChangeModify
|
||||||
|
break
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// If /foo/bar/file.txt is modified, then /foo/bar must be part of the changed files.
|
||||||
|
// This block is here to ensure the change is recorded even if the
|
||||||
|
// modify time, mode and size of the parent directory in the rw and ro layers are all equal.
|
||||||
|
// Check https://github.com/docker/docker/pull/13590 for details.
|
||||||
|
if f.IsDir() {
|
||||||
|
changedDirs[path] = struct{}{}
|
||||||
|
}
|
||||||
|
if change.Kind == ChangeAdd || change.Kind == ChangeDelete {
|
||||||
|
parent := filepath.Dir(path)
|
||||||
|
if _, ok := changedDirs[parent]; !ok && parent != "/" {
|
||||||
|
changes = append(changes, Change{Path: parent, Kind: ChangeModify})
|
||||||
|
changedDirs[parent] = struct{}{}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// Record change
|
||||||
|
changes = append(changes, change)
|
||||||
|
return nil
|
||||||
|
})
|
||||||
|
if err != nil && !os.IsNotExist(err) {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
return changes, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// FileInfo describes the information of a file.
|
||||||
|
type FileInfo struct {
|
||||||
|
parent *FileInfo
|
||||||
|
name string
|
||||||
|
stat *system.StatT
|
||||||
|
children map[string]*FileInfo
|
||||||
|
capability []byte
|
||||||
|
added bool
|
||||||
|
}
|
||||||
|
|
||||||
|
// LookUp looks up the file information of a file.
|
||||||
|
func (info *FileInfo) LookUp(path string) *FileInfo {
|
||||||
|
// As this runs on the daemon side, file paths are OS specific.
|
||||||
|
parent := info
|
||||||
|
if path == string(os.PathSeparator) {
|
||||||
|
return info
|
||||||
|
}
|
||||||
|
|
||||||
|
pathElements := strings.Split(path, string(os.PathSeparator))
|
||||||
|
for _, elem := range pathElements {
|
||||||
|
if elem != "" {
|
||||||
|
child := parent.children[elem]
|
||||||
|
if child == nil {
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
parent = child
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return parent
|
||||||
|
}
|
||||||
|
|
||||||
|
func (info *FileInfo) path() string {
|
||||||
|
if info.parent == nil {
|
||||||
|
// As this runs on the daemon side, file paths are OS specific.
|
||||||
|
return string(os.PathSeparator)
|
||||||
|
}
|
||||||
|
return filepath.Join(info.parent.path(), info.name)
|
||||||
|
}
|
||||||
|
|
||||||
|
func (info *FileInfo) addChanges(oldInfo *FileInfo, changes *[]Change) {
|
||||||
|
|
||||||
|
sizeAtEntry := len(*changes)
|
||||||
|
|
||||||
|
if oldInfo == nil {
|
||||||
|
// add
|
||||||
|
change := Change{
|
||||||
|
Path: info.path(),
|
||||||
|
Kind: ChangeAdd,
|
||||||
|
}
|
||||||
|
*changes = append(*changes, change)
|
||||||
|
info.added = true
|
||||||
|
}
|
||||||
|
|
||||||
|
// We make a copy so we can modify it to detect additions
|
||||||
|
// also, we only recurse on the old dir if the new info is a directory
|
||||||
|
// otherwise any previous delete/change is considered recursive
|
||||||
|
oldChildren := make(map[string]*FileInfo)
|
||||||
|
if oldInfo != nil && info.isDir() {
|
||||||
|
for k, v := range oldInfo.children {
|
||||||
|
oldChildren[k] = v
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
for name, newChild := range info.children {
|
||||||
|
oldChild, _ := oldChildren[name]
|
||||||
|
if oldChild != nil {
|
||||||
|
// change?
|
||||||
|
oldStat := oldChild.stat
|
||||||
|
newStat := newChild.stat
|
||||||
|
// Note: We can't compare inode or ctime or blocksize here, because these change
|
||||||
|
// when copying a file into a container. However, that is not generally a problem
|
||||||
|
// because any content change will change mtime, and any status change should
|
||||||
|
// be visible when actually comparing the stat fields. The only time this
|
||||||
|
// breaks down is if some code intentionally hides a change by setting
|
||||||
|
// back mtime
|
||||||
|
if statDifferent(oldStat, newStat) ||
|
||||||
|
bytes.Compare(oldChild.capability, newChild.capability) != 0 {
|
||||||
|
change := Change{
|
||||||
|
Path: newChild.path(),
|
||||||
|
Kind: ChangeModify,
|
||||||
|
}
|
||||||
|
*changes = append(*changes, change)
|
||||||
|
newChild.added = true
|
||||||
|
}
|
||||||
|
|
||||||
|
// Remove from copy so we can detect deletions
|
||||||
|
delete(oldChildren, name)
|
||||||
|
}
|
||||||
|
|
||||||
|
newChild.addChanges(oldChild, changes)
|
||||||
|
}
|
||||||
|
for _, oldChild := range oldChildren {
|
||||||
|
// delete
|
||||||
|
change := Change{
|
||||||
|
Path: oldChild.path(),
|
||||||
|
Kind: ChangeDelete,
|
||||||
|
}
|
||||||
|
*changes = append(*changes, change)
|
||||||
|
}
|
||||||
|
|
||||||
|
// If there were changes inside this directory, we need to add it, even if the directory
|
||||||
|
// itself wasn't changed. This is needed to properly save and restore filesystem permissions.
|
||||||
|
// As this runs on the daemon side, file paths are OS specific.
|
||||||
|
if len(*changes) > sizeAtEntry && info.isDir() && !info.added && info.path() != string(os.PathSeparator) {
|
||||||
|
change := Change{
|
||||||
|
Path: info.path(),
|
||||||
|
Kind: ChangeModify,
|
||||||
|
}
|
||||||
|
// Let's insert the directory entry before the recently added entries located inside this dir
|
||||||
|
*changes = append(*changes, change) // just to resize the slice, will be overwritten
|
||||||
|
copy((*changes)[sizeAtEntry+1:], (*changes)[sizeAtEntry:])
|
||||||
|
(*changes)[sizeAtEntry] = change
|
||||||
|
}
|
||||||
|
|
||||||
|
}
|
||||||
|
|
||||||
|
// Changes add changes to file information.
|
||||||
|
func (info *FileInfo) Changes(oldInfo *FileInfo) []Change {
|
||||||
|
var changes []Change
|
||||||
|
|
||||||
|
info.addChanges(oldInfo, &changes)
|
||||||
|
|
||||||
|
return changes
|
||||||
|
}
|
||||||
|
|
||||||
|
func newRootFileInfo() *FileInfo {
|
||||||
|
// As this runs on the daemon side, file paths are OS specific.
|
||||||
|
root := &FileInfo{
|
||||||
|
name: string(os.PathSeparator),
|
||||||
|
children: make(map[string]*FileInfo),
|
||||||
|
}
|
||||||
|
return root
|
||||||
|
}
|
||||||
|
|
||||||
|
// ChangesDirs compares two directories and generates an array of Change objects describing the changes.
|
||||||
|
// If oldDir is "", then all files in newDir will be Add-Changes.
|
||||||
|
func ChangesDirs(newDir, oldDir string) ([]Change, error) {
|
||||||
|
var (
|
||||||
|
oldRoot, newRoot *FileInfo
|
||||||
|
)
|
||||||
|
if oldDir == "" {
|
||||||
|
emptyDir, err := ioutil.TempDir("", "empty")
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
defer os.Remove(emptyDir)
|
||||||
|
oldDir = emptyDir
|
||||||
|
}
|
||||||
|
oldRoot, newRoot, err := collectFileInfoForChanges(oldDir, newDir)
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
|
||||||
|
return newRoot.Changes(oldRoot), nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// ChangesSize calculates the size in bytes of the provided changes, based on newDir.
|
||||||
|
func ChangesSize(newDir string, changes []Change) int64 {
|
||||||
|
var (
|
||||||
|
size int64
|
||||||
|
sf = make(map[uint64]struct{})
|
||||||
|
)
|
||||||
|
for _, change := range changes {
|
||||||
|
if change.Kind == ChangeModify || change.Kind == ChangeAdd {
|
||||||
|
file := filepath.Join(newDir, change.Path)
|
||||||
|
fileInfo, err := os.Lstat(file)
|
||||||
|
if err != nil {
|
||||||
|
logrus.Errorf("Can not stat %q: %s", file, err)
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
|
||||||
|
if fileInfo != nil && !fileInfo.IsDir() {
|
||||||
|
if hasHardlinks(fileInfo) {
|
||||||
|
inode := getIno(fileInfo)
|
||||||
|
if _, ok := sf[inode]; !ok {
|
||||||
|
size += fileInfo.Size()
|
||||||
|
sf[inode] = struct{}{}
|
||||||
|
}
|
||||||
|
} else {
|
||||||
|
size += fileInfo.Size()
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return size
|
||||||
|
}
|
||||||
|
|
||||||
|
// ExportChanges produces an Archive from the provided changes, relative to dir.
|
||||||
|
func ExportChanges(dir string, changes []Change, uidMaps, gidMaps []idtools.IDMap) (Archive, error) {
|
||||||
|
reader, writer := io.Pipe()
|
||||||
|
go func() {
|
||||||
|
ta := &tarAppender{
|
||||||
|
TarWriter: tar.NewWriter(writer),
|
||||||
|
Buffer: pools.BufioWriter32KPool.Get(nil),
|
||||||
|
SeenFiles: make(map[uint64]string),
|
||||||
|
UIDMaps: uidMaps,
|
||||||
|
GIDMaps: gidMaps,
|
||||||
|
}
|
||||||
|
// this buffer is needed for the duration of this piped stream
|
||||||
|
defer pools.BufioWriter32KPool.Put(ta.Buffer)
|
||||||
|
|
||||||
|
sort.Sort(changesByPath(changes))
|
||||||
|
|
||||||
|
// In general we log errors here but ignore them because
|
||||||
|
// during e.g. a diff operation the container can continue
|
||||||
|
// mutating the filesystem and we can see transient errors
|
||||||
|
// from this
|
||||||
|
for _, change := range changes {
|
||||||
|
if change.Kind == ChangeDelete {
|
||||||
|
whiteOutDir := filepath.Dir(change.Path)
|
||||||
|
whiteOutBase := filepath.Base(change.Path)
|
||||||
|
whiteOut := filepath.Join(whiteOutDir, WhiteoutPrefix+whiteOutBase)
|
||||||
|
timestamp := time.Now()
|
||||||
|
hdr := &tar.Header{
|
||||||
|
Name: whiteOut[1:],
|
||||||
|
Size: 0,
|
||||||
|
ModTime: timestamp,
|
||||||
|
AccessTime: timestamp,
|
||||||
|
ChangeTime: timestamp,
|
||||||
|
}
|
||||||
|
if err := ta.TarWriter.WriteHeader(hdr); err != nil {
|
||||||
|
logrus.Debugf("Can't write whiteout header: %s", err)
|
||||||
|
}
|
||||||
|
} else {
|
||||||
|
path := filepath.Join(dir, change.Path)
|
||||||
|
if err := ta.addTarFile(path, change.Path[1:]); err != nil {
|
||||||
|
logrus.Debugf("Can't add file %s to tar: %s", path, err)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// Make sure to check the error on Close.
|
||||||
|
if err := ta.TarWriter.Close(); err != nil {
|
||||||
|
logrus.Debugf("Can't close layer: %s", err)
|
||||||
|
}
|
||||||
|
if err := writer.Close(); err != nil {
|
||||||
|
logrus.Debugf("failed close Changes writer: %s", err)
|
||||||
|
}
|
||||||
|
}()
|
||||||
|
return reader, nil
|
||||||
|
}
|
|
@ -0,0 +1,285 @@
|
||||||
|
package archive
|
||||||
|
|
||||||
|
import (
|
||||||
|
"bytes"
|
||||||
|
"fmt"
|
||||||
|
"os"
|
||||||
|
"path/filepath"
|
||||||
|
"sort"
|
||||||
|
"syscall"
|
||||||
|
"unsafe"
|
||||||
|
|
||||||
|
"github.com/docker/docker/pkg/system"
|
||||||
|
)
|
||||||
|
|
||||||
|
// walker is used to implement collectFileInfoForChanges on linux. Where this
|
||||||
|
// method in general returns the entire contents of two directory trees, we
|
||||||
|
// optimize some FS calls out on linux. In particular, we take advantage of the
|
||||||
|
// fact that getdents(2) returns the inode of each file in the directory being
|
||||||
|
// walked, which, when walking two trees in parallel to generate a list of
|
||||||
|
// changes, can be used to prune subtrees without ever having to lstat(2) them
|
||||||
|
// directly. Eliminating stat calls in this way can save up to seconds on large
|
||||||
|
// images.
|
||||||
|
type walker struct {
|
||||||
|
dir1 string
|
||||||
|
dir2 string
|
||||||
|
root1 *FileInfo
|
||||||
|
root2 *FileInfo
|
||||||
|
}
|
||||||
|
|
||||||
|
// collectFileInfoForChanges returns a complete representation of the trees
|
||||||
|
// rooted at dir1 and dir2, with one important exception: any subtree or
|
||||||
|
// leaf where the inode and device numbers are an exact match between dir1
|
||||||
|
// and dir2 will be pruned from the results. This method is *only* to be used
|
||||||
|
// to generating a list of changes between the two directories, as it does not
|
||||||
|
// reflect the full contents.
|
||||||
|
func collectFileInfoForChanges(dir1, dir2 string) (*FileInfo, *FileInfo, error) {
|
||||||
|
w := &walker{
|
||||||
|
dir1: dir1,
|
||||||
|
dir2: dir2,
|
||||||
|
root1: newRootFileInfo(),
|
||||||
|
root2: newRootFileInfo(),
|
||||||
|
}
|
||||||
|
|
||||||
|
i1, err := os.Lstat(w.dir1)
|
||||||
|
if err != nil {
|
||||||
|
return nil, nil, err
|
||||||
|
}
|
||||||
|
i2, err := os.Lstat(w.dir2)
|
||||||
|
if err != nil {
|
||||||
|
return nil, nil, err
|
||||||
|
}
|
||||||
|
|
||||||
|
if err := w.walk("/", i1, i2); err != nil {
|
||||||
|
return nil, nil, err
|
||||||
|
}
|
||||||
|
|
||||||
|
return w.root1, w.root2, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// Given a FileInfo, its path info, and a reference to the root of the tree
|
||||||
|
// being constructed, register this file with the tree.
|
||||||
|
func walkchunk(path string, fi os.FileInfo, dir string, root *FileInfo) error {
|
||||||
|
if fi == nil {
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
parent := root.LookUp(filepath.Dir(path))
|
||||||
|
if parent == nil {
|
||||||
|
return fmt.Errorf("collectFileInfoForChanges: Unexpectedly no parent for %s", path)
|
||||||
|
}
|
||||||
|
info := &FileInfo{
|
||||||
|
name: filepath.Base(path),
|
||||||
|
children: make(map[string]*FileInfo),
|
||||||
|
parent: parent,
|
||||||
|
}
|
||||||
|
cpath := filepath.Join(dir, path)
|
||||||
|
stat, err := system.FromStatT(fi.Sys().(*syscall.Stat_t))
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
info.stat = stat
|
||||||
|
info.capability, _ = system.Lgetxattr(cpath, "security.capability") // lgetxattr(2): fs access
|
||||||
|
parent.children[info.name] = info
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// Walk a subtree rooted at the same path in both trees being iterated. For
|
||||||
|
// example, /docker/overlay/1234/a/b/c/d and /docker/overlay/8888/a/b/c/d
|
||||||
|
func (w *walker) walk(path string, i1, i2 os.FileInfo) (err error) {
|
||||||
|
// Register these nodes with the return trees, unless we're still at the
|
||||||
|
// (already-created) roots:
|
||||||
|
if path != "/" {
|
||||||
|
if err := walkchunk(path, i1, w.dir1, w.root1); err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
if err := walkchunk(path, i2, w.dir2, w.root2); err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
is1Dir := i1 != nil && i1.IsDir()
|
||||||
|
is2Dir := i2 != nil && i2.IsDir()
|
||||||
|
|
||||||
|
sameDevice := false
|
||||||
|
if i1 != nil && i2 != nil {
|
||||||
|
si1 := i1.Sys().(*syscall.Stat_t)
|
||||||
|
si2 := i2.Sys().(*syscall.Stat_t)
|
||||||
|
if si1.Dev == si2.Dev {
|
||||||
|
sameDevice = true
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// If these files are both non-existent, or leaves (non-dirs), we are done.
|
||||||
|
if !is1Dir && !is2Dir {
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// Fetch the names of all the files contained in both directories being walked:
|
||||||
|
var names1, names2 []nameIno
|
||||||
|
if is1Dir {
|
||||||
|
names1, err = readdirnames(filepath.Join(w.dir1, path)) // getdents(2): fs access
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
}
|
||||||
|
if is2Dir {
|
||||||
|
names2, err = readdirnames(filepath.Join(w.dir2, path)) // getdents(2): fs access
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// We have lists of the files contained in both parallel directories, sorted
|
||||||
|
// in the same order. Walk them in parallel, generating a unique merged list
|
||||||
|
// of all items present in either or both directories.
|
||||||
|
var names []string
|
||||||
|
ix1 := 0
|
||||||
|
ix2 := 0
|
||||||
|
|
||||||
|
for {
|
||||||
|
if ix1 >= len(names1) {
|
||||||
|
break
|
||||||
|
}
|
||||||
|
if ix2 >= len(names2) {
|
||||||
|
break
|
||||||
|
}
|
||||||
|
|
||||||
|
ni1 := names1[ix1]
|
||||||
|
ni2 := names2[ix2]
|
||||||
|
|
||||||
|
switch bytes.Compare([]byte(ni1.name), []byte(ni2.name)) {
|
||||||
|
case -1: // ni1 < ni2 -- advance ni1
|
||||||
|
// we will not encounter ni1 in names2
|
||||||
|
names = append(names, ni1.name)
|
||||||
|
ix1++
|
||||||
|
case 0: // ni1 == ni2
|
||||||
|
if ni1.ino != ni2.ino || !sameDevice {
|
||||||
|
names = append(names, ni1.name)
|
||||||
|
}
|
||||||
|
ix1++
|
||||||
|
ix2++
|
||||||
|
case 1: // ni1 > ni2 -- advance ni2
|
||||||
|
// we will not encounter ni2 in names1
|
||||||
|
names = append(names, ni2.name)
|
||||||
|
ix2++
|
||||||
|
}
|
||||||
|
}
|
||||||
|
for ix1 < len(names1) {
|
||||||
|
names = append(names, names1[ix1].name)
|
||||||
|
ix1++
|
||||||
|
}
|
||||||
|
for ix2 < len(names2) {
|
||||||
|
names = append(names, names2[ix2].name)
|
||||||
|
ix2++
|
||||||
|
}
|
||||||
|
|
||||||
|
// For each of the names present in either or both of the directories being
|
||||||
|
// iterated, stat the name under each root, and recurse the pair of them:
|
||||||
|
for _, name := range names {
|
||||||
|
fname := filepath.Join(path, name)
|
||||||
|
var cInfo1, cInfo2 os.FileInfo
|
||||||
|
if is1Dir {
|
||||||
|
cInfo1, err = os.Lstat(filepath.Join(w.dir1, fname)) // lstat(2): fs access
|
||||||
|
if err != nil && !os.IsNotExist(err) {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
}
|
||||||
|
if is2Dir {
|
||||||
|
cInfo2, err = os.Lstat(filepath.Join(w.dir2, fname)) // lstat(2): fs access
|
||||||
|
if err != nil && !os.IsNotExist(err) {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
}
|
||||||
|
if err = w.walk(fname, cInfo1, cInfo2); err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// {name,inode} pairs used to support the early-pruning logic of the walker type
|
||||||
|
type nameIno struct {
|
||||||
|
name string
|
||||||
|
ino uint64
|
||||||
|
}
|
||||||
|
|
||||||
|
type nameInoSlice []nameIno
|
||||||
|
|
||||||
|
func (s nameInoSlice) Len() int { return len(s) }
|
||||||
|
func (s nameInoSlice) Swap(i, j int) { s[i], s[j] = s[j], s[i] }
|
||||||
|
func (s nameInoSlice) Less(i, j int) bool { return s[i].name < s[j].name }
|
||||||
|
|
||||||
|
// readdirnames is a hacked-apart version of the Go stdlib code, exposing inode
|
||||||
|
// numbers further up the stack when reading directory contents. Unlike
|
||||||
|
// os.Readdirnames, which returns a list of filenames, this function returns a
|
||||||
|
// list of {filename,inode} pairs.
|
||||||
|
func readdirnames(dirname string) (names []nameIno, err error) {
|
||||||
|
var (
|
||||||
|
size = 100
|
||||||
|
buf = make([]byte, 4096)
|
||||||
|
nbuf int
|
||||||
|
bufp int
|
||||||
|
nb int
|
||||||
|
)
|
||||||
|
|
||||||
|
f, err := os.Open(dirname)
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
defer f.Close()
|
||||||
|
|
||||||
|
names = make([]nameIno, 0, size) // Empty with room to grow.
|
||||||
|
for {
|
||||||
|
// Refill the buffer if necessary
|
||||||
|
if bufp >= nbuf {
|
||||||
|
bufp = 0
|
||||||
|
nbuf, err = syscall.ReadDirent(int(f.Fd()), buf) // getdents on linux
|
||||||
|
if nbuf < 0 {
|
||||||
|
nbuf = 0
|
||||||
|
}
|
||||||
|
if err != nil {
|
||||||
|
return nil, os.NewSyscallError("readdirent", err)
|
||||||
|
}
|
||||||
|
if nbuf <= 0 {
|
||||||
|
break // EOF
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// Drain the buffer
|
||||||
|
nb, names = parseDirent(buf[bufp:nbuf], names)
|
||||||
|
bufp += nb
|
||||||
|
}
|
||||||
|
|
||||||
|
sl := nameInoSlice(names)
|
||||||
|
sort.Sort(sl)
|
||||||
|
return sl, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// parseDirent is a minor modification of syscall.ParseDirent (linux version)
|
||||||
|
// which returns {name,inode} pairs instead of just names.
|
||||||
|
func parseDirent(buf []byte, names []nameIno) (consumed int, newnames []nameIno) {
|
||||||
|
origlen := len(buf)
|
||||||
|
for len(buf) > 0 {
|
||||||
|
dirent := (*syscall.Dirent)(unsafe.Pointer(&buf[0]))
|
||||||
|
buf = buf[dirent.Reclen:]
|
||||||
|
if dirent.Ino == 0 { // File absent in directory.
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
bytes := (*[10000]byte)(unsafe.Pointer(&dirent.Name[0]))
|
||||||
|
var name = string(bytes[0:clen(bytes[:])])
|
||||||
|
if name == "." || name == ".." { // Useless names
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
names = append(names, nameIno{name, dirent.Ino})
|
||||||
|
}
|
||||||
|
return origlen - len(buf), names
|
||||||
|
}
|
||||||
|
|
||||||
|
func clen(n []byte) int {
|
||||||
|
for i := 0; i < len(n); i++ {
|
||||||
|
if n[i] == 0 {
|
||||||
|
return i
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return len(n)
|
||||||
|
}
|
|
@ -0,0 +1,97 @@
|
||||||
|
// +build !linux
|
||||||
|
|
||||||
|
package archive
|
||||||
|
|
||||||
|
import (
|
||||||
|
"fmt"
|
||||||
|
"os"
|
||||||
|
"path/filepath"
|
||||||
|
"runtime"
|
||||||
|
"strings"
|
||||||
|
|
||||||
|
"github.com/docker/docker/pkg/system"
|
||||||
|
)
|
||||||
|
|
||||||
|
func collectFileInfoForChanges(oldDir, newDir string) (*FileInfo, *FileInfo, error) {
|
||||||
|
var (
|
||||||
|
oldRoot, newRoot *FileInfo
|
||||||
|
err1, err2 error
|
||||||
|
errs = make(chan error, 2)
|
||||||
|
)
|
||||||
|
go func() {
|
||||||
|
oldRoot, err1 = collectFileInfo(oldDir)
|
||||||
|
errs <- err1
|
||||||
|
}()
|
||||||
|
go func() {
|
||||||
|
newRoot, err2 = collectFileInfo(newDir)
|
||||||
|
errs <- err2
|
||||||
|
}()
|
||||||
|
|
||||||
|
// block until both routines have returned
|
||||||
|
for i := 0; i < 2; i++ {
|
||||||
|
if err := <-errs; err != nil {
|
||||||
|
return nil, nil, err
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
return oldRoot, newRoot, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func collectFileInfo(sourceDir string) (*FileInfo, error) {
|
||||||
|
root := newRootFileInfo()
|
||||||
|
|
||||||
|
err := filepath.Walk(sourceDir, func(path string, f os.FileInfo, err error) error {
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
// Rebase path
|
||||||
|
relPath, err := filepath.Rel(sourceDir, path)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
// As this runs on the daemon side, file paths are OS specific.
|
||||||
|
relPath = filepath.Join(string(os.PathSeparator), relPath)
|
||||||
|
|
||||||
|
// See https://github.com/golang/go/issues/9168 - bug in filepath.Join.
|
||||||
|
// Temporary workaround. If the returned path starts with two backslashes,
|
||||||
|
// trim it down to a single backslash. Only relevant on Windows.
|
||||||
|
if runtime.GOOS == "windows" {
|
||||||
|
if strings.HasPrefix(relPath, `\\`) {
|
||||||
|
relPath = relPath[1:]
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
if relPath == string(os.PathSeparator) {
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
parent := root.LookUp(filepath.Dir(relPath))
|
||||||
|
if parent == nil {
|
||||||
|
return fmt.Errorf("collectFileInfo: Unexpectedly no parent for %s", relPath)
|
||||||
|
}
|
||||||
|
|
||||||
|
info := &FileInfo{
|
||||||
|
name: filepath.Base(relPath),
|
||||||
|
children: make(map[string]*FileInfo),
|
||||||
|
parent: parent,
|
||||||
|
}
|
||||||
|
|
||||||
|
s, err := system.Lstat(path)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
info.stat = s
|
||||||
|
|
||||||
|
info.capability, _ = system.Lgetxattr(path, "security.capability")
|
||||||
|
|
||||||
|
parent.children[info.name] = info
|
||||||
|
|
||||||
|
return nil
|
||||||
|
})
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
return root, nil
|
||||||
|
}
|
|
@ -0,0 +1,36 @@
|
||||||
|
// +build !windows
|
||||||
|
|
||||||
|
package archive
|
||||||
|
|
||||||
|
import (
|
||||||
|
"os"
|
||||||
|
"syscall"
|
||||||
|
|
||||||
|
"github.com/docker/docker/pkg/system"
|
||||||
|
)
|
||||||
|
|
||||||
|
func statDifferent(oldStat *system.StatT, newStat *system.StatT) bool {
|
||||||
|
// Don't look at size for dirs, its not a good measure of change
|
||||||
|
if oldStat.Mode() != newStat.Mode() ||
|
||||||
|
oldStat.UID() != newStat.UID() ||
|
||||||
|
oldStat.GID() != newStat.GID() ||
|
||||||
|
oldStat.Rdev() != newStat.Rdev() ||
|
||||||
|
// Don't look at size for dirs, its not a good measure of change
|
||||||
|
(oldStat.Mode()&syscall.S_IFDIR != syscall.S_IFDIR &&
|
||||||
|
(!sameFsTimeSpec(oldStat.Mtim(), newStat.Mtim()) || (oldStat.Size() != newStat.Size()))) {
|
||||||
|
return true
|
||||||
|
}
|
||||||
|
return false
|
||||||
|
}
|
||||||
|
|
||||||
|
func (info *FileInfo) isDir() bool {
|
||||||
|
return info.parent == nil || info.stat.Mode()&syscall.S_IFDIR != 0
|
||||||
|
}
|
||||||
|
|
||||||
|
func getIno(fi os.FileInfo) uint64 {
|
||||||
|
return uint64(fi.Sys().(*syscall.Stat_t).Ino)
|
||||||
|
}
|
||||||
|
|
||||||
|
func hasHardlinks(fi os.FileInfo) bool {
|
||||||
|
return fi.Sys().(*syscall.Stat_t).Nlink > 1
|
||||||
|
}
|
|
@ -0,0 +1,30 @@
|
||||||
|
package archive
|
||||||
|
|
||||||
|
import (
|
||||||
|
"os"
|
||||||
|
|
||||||
|
"github.com/docker/docker/pkg/system"
|
||||||
|
)
|
||||||
|
|
||||||
|
func statDifferent(oldStat *system.StatT, newStat *system.StatT) bool {
|
||||||
|
|
||||||
|
// Don't look at size for dirs, its not a good measure of change
|
||||||
|
if oldStat.ModTime() != newStat.ModTime() ||
|
||||||
|
oldStat.Mode() != newStat.Mode() ||
|
||||||
|
oldStat.Size() != newStat.Size() && !oldStat.IsDir() {
|
||||||
|
return true
|
||||||
|
}
|
||||||
|
return false
|
||||||
|
}
|
||||||
|
|
||||||
|
func (info *FileInfo) isDir() bool {
|
||||||
|
return info.parent == nil || info.stat.IsDir()
|
||||||
|
}
|
||||||
|
|
||||||
|
func getIno(fi os.FileInfo) (inode uint64) {
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
func hasHardlinks(fi os.FileInfo) bool {
|
||||||
|
return false
|
||||||
|
}
|
|
@ -0,0 +1,458 @@
|
||||||
|
package archive
|
||||||
|
|
||||||
|
import (
|
||||||
|
"archive/tar"
|
||||||
|
"errors"
|
||||||
|
"io"
|
||||||
|
"io/ioutil"
|
||||||
|
"os"
|
||||||
|
"path/filepath"
|
||||||
|
"strings"
|
||||||
|
|
||||||
|
"github.com/Sirupsen/logrus"
|
||||||
|
"github.com/docker/docker/pkg/system"
|
||||||
|
)
|
||||||
|
|
||||||
|
// Errors used or returned by this file.
|
||||||
|
var (
|
||||||
|
ErrNotDirectory = errors.New("not a directory")
|
||||||
|
ErrDirNotExists = errors.New("no such directory")
|
||||||
|
ErrCannotCopyDir = errors.New("cannot copy directory")
|
||||||
|
ErrInvalidCopySource = errors.New("invalid copy source content")
|
||||||
|
)
|
||||||
|
|
||||||
|
// PreserveTrailingDotOrSeparator returns the given cleaned path (after
|
||||||
|
// processing using any utility functions from the path or filepath stdlib
|
||||||
|
// packages) and appends a trailing `/.` or `/` if its corresponding original
|
||||||
|
// path (from before being processed by utility functions from the path or
|
||||||
|
// filepath stdlib packages) ends with a trailing `/.` or `/`. If the cleaned
|
||||||
|
// path already ends in a `.` path segment, then another is not added. If the
|
||||||
|
// clean path already ends in a path separator, then another is not added.
|
||||||
|
func PreserveTrailingDotOrSeparator(cleanedPath, originalPath string) string {
|
||||||
|
// Ensure paths are in platform semantics
|
||||||
|
cleanedPath = normalizePath(cleanedPath)
|
||||||
|
originalPath = normalizePath(originalPath)
|
||||||
|
|
||||||
|
if !specifiesCurrentDir(cleanedPath) && specifiesCurrentDir(originalPath) {
|
||||||
|
if !hasTrailingPathSeparator(cleanedPath) {
|
||||||
|
// Add a separator if it doesn't already end with one (a cleaned
|
||||||
|
// path would only end in a separator if it is the root).
|
||||||
|
cleanedPath += string(filepath.Separator)
|
||||||
|
}
|
||||||
|
cleanedPath += "."
|
||||||
|
}
|
||||||
|
|
||||||
|
if !hasTrailingPathSeparator(cleanedPath) && hasTrailingPathSeparator(originalPath) {
|
||||||
|
cleanedPath += string(filepath.Separator)
|
||||||
|
}
|
||||||
|
|
||||||
|
return cleanedPath
|
||||||
|
}
|
||||||
|
|
||||||
|
// assertsDirectory returns whether the given path is
|
||||||
|
// asserted to be a directory, i.e., the path ends with
|
||||||
|
// a trailing '/' or `/.`, assuming a path separator of `/`.
|
||||||
|
func assertsDirectory(path string) bool {
|
||||||
|
return hasTrailingPathSeparator(path) || specifiesCurrentDir(path)
|
||||||
|
}
|
||||||
|
|
||||||
|
// hasTrailingPathSeparator returns whether the given
|
||||||
|
// path ends with the system's path separator character.
|
||||||
|
func hasTrailingPathSeparator(path string) bool {
|
||||||
|
return len(path) > 0 && os.IsPathSeparator(path[len(path)-1])
|
||||||
|
}
|
||||||
|
|
||||||
|
// specifiesCurrentDir returns whether the given path specifies
|
||||||
|
// a "current directory", i.e., the last path segment is `.`.
|
||||||
|
func specifiesCurrentDir(path string) bool {
|
||||||
|
return filepath.Base(path) == "."
|
||||||
|
}
|
||||||
|
|
||||||
|
// SplitPathDirEntry splits the given path between its directory name and its
|
||||||
|
// basename by first cleaning the path but preserves a trailing "." if the
|
||||||
|
// original path specified the current directory.
|
||||||
|
func SplitPathDirEntry(path string) (dir, base string) {
|
||||||
|
cleanedPath := filepath.Clean(normalizePath(path))
|
||||||
|
|
||||||
|
if specifiesCurrentDir(path) {
|
||||||
|
cleanedPath += string(filepath.Separator) + "."
|
||||||
|
}
|
||||||
|
|
||||||
|
return filepath.Dir(cleanedPath), filepath.Base(cleanedPath)
|
||||||
|
}
|
||||||
|
|
||||||
|
// TarResource archives the resource described by the given CopyInfo to a Tar
|
||||||
|
// archive. A non-nil error is returned if sourcePath does not exist or is
|
||||||
|
// asserted to be a directory but exists as another type of file.
|
||||||
|
//
|
||||||
|
// This function acts as a convenient wrapper around TarWithOptions, which
|
||||||
|
// requires a directory as the source path. TarResource accepts either a
|
||||||
|
// directory or a file path and correctly sets the Tar options.
|
||||||
|
func TarResource(sourceInfo CopyInfo) (content Archive, err error) {
|
||||||
|
return TarResourceRebase(sourceInfo.Path, sourceInfo.RebaseName)
|
||||||
|
}
|
||||||
|
|
||||||
|
// TarResourceRebase is like TarResource but renames the first path element of
|
||||||
|
// items in the resulting tar archive to match the given rebaseName if not "".
|
||||||
|
func TarResourceRebase(sourcePath, rebaseName string) (content Archive, err error) {
|
||||||
|
sourcePath = normalizePath(sourcePath)
|
||||||
|
if _, err = os.Lstat(sourcePath); err != nil {
|
||||||
|
// Catches the case where the source does not exist or is not a
|
||||||
|
// directory if asserted to be a directory, as this also causes an
|
||||||
|
// error.
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
// Separate the source path between it's directory and
|
||||||
|
// the entry in that directory which we are archiving.
|
||||||
|
sourceDir, sourceBase := SplitPathDirEntry(sourcePath)
|
||||||
|
|
||||||
|
filter := []string{sourceBase}
|
||||||
|
|
||||||
|
logrus.Debugf("copying %q from %q", sourceBase, sourceDir)
|
||||||
|
|
||||||
|
return TarWithOptions(sourceDir, &TarOptions{
|
||||||
|
Compression: Uncompressed,
|
||||||
|
IncludeFiles: filter,
|
||||||
|
IncludeSourceDir: true,
|
||||||
|
RebaseNames: map[string]string{
|
||||||
|
sourceBase: rebaseName,
|
||||||
|
},
|
||||||
|
})
|
||||||
|
}
|
||||||
|
|
||||||
|
// CopyInfo holds basic info about the source
|
||||||
|
// or destination path of a copy operation.
|
||||||
|
type CopyInfo struct {
|
||||||
|
Path string
|
||||||
|
Exists bool
|
||||||
|
IsDir bool
|
||||||
|
RebaseName string
|
||||||
|
}
|
||||||
|
|
||||||
|
// CopyInfoSourcePath stats the given path to create a CopyInfo
|
||||||
|
// struct representing that resource for the source of an archive copy
|
||||||
|
// operation. The given path should be an absolute local path. A source path
|
||||||
|
// has all symlinks evaluated that appear before the last path separator ("/"
|
||||||
|
// on Unix). As it is to be a copy source, the path must exist.
|
||||||
|
func CopyInfoSourcePath(path string, followLink bool) (CopyInfo, error) {
|
||||||
|
// normalize the file path and then evaluate the symbol link
|
||||||
|
// we will use the target file instead of the symbol link if
|
||||||
|
// followLink is set
|
||||||
|
path = normalizePath(path)
|
||||||
|
|
||||||
|
resolvedPath, rebaseName, err := ResolveHostSourcePath(path, followLink)
|
||||||
|
if err != nil {
|
||||||
|
return CopyInfo{}, err
|
||||||
|
}
|
||||||
|
|
||||||
|
stat, err := os.Lstat(resolvedPath)
|
||||||
|
if err != nil {
|
||||||
|
return CopyInfo{}, err
|
||||||
|
}
|
||||||
|
|
||||||
|
return CopyInfo{
|
||||||
|
Path: resolvedPath,
|
||||||
|
Exists: true,
|
||||||
|
IsDir: stat.IsDir(),
|
||||||
|
RebaseName: rebaseName,
|
||||||
|
}, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// CopyInfoDestinationPath stats the given path to create a CopyInfo
|
||||||
|
// struct representing that resource for the destination of an archive copy
|
||||||
|
// operation. The given path should be an absolute local path.
|
||||||
|
func CopyInfoDestinationPath(path string) (info CopyInfo, err error) {
|
||||||
|
maxSymlinkIter := 10 // filepath.EvalSymlinks uses 255, but 10 already seems like a lot.
|
||||||
|
path = normalizePath(path)
|
||||||
|
originalPath := path
|
||||||
|
|
||||||
|
stat, err := os.Lstat(path)
|
||||||
|
|
||||||
|
if err == nil && stat.Mode()&os.ModeSymlink == 0 {
|
||||||
|
// The path exists and is not a symlink.
|
||||||
|
return CopyInfo{
|
||||||
|
Path: path,
|
||||||
|
Exists: true,
|
||||||
|
IsDir: stat.IsDir(),
|
||||||
|
}, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// While the path is a symlink.
|
||||||
|
for n := 0; err == nil && stat.Mode()&os.ModeSymlink != 0; n++ {
|
||||||
|
if n > maxSymlinkIter {
|
||||||
|
// Don't follow symlinks more than this arbitrary number of times.
|
||||||
|
return CopyInfo{}, errors.New("too many symlinks in " + originalPath)
|
||||||
|
}
|
||||||
|
|
||||||
|
// The path is a symbolic link. We need to evaluate it so that the
|
||||||
|
// destination of the copy operation is the link target and not the
|
||||||
|
// link itself. This is notably different than CopyInfoSourcePath which
|
||||||
|
// only evaluates symlinks before the last appearing path separator.
|
||||||
|
// Also note that it is okay if the last path element is a broken
|
||||||
|
// symlink as the copy operation should create the target.
|
||||||
|
var linkTarget string
|
||||||
|
|
||||||
|
linkTarget, err = os.Readlink(path)
|
||||||
|
if err != nil {
|
||||||
|
return CopyInfo{}, err
|
||||||
|
}
|
||||||
|
|
||||||
|
if !system.IsAbs(linkTarget) {
|
||||||
|
// Join with the parent directory.
|
||||||
|
dstParent, _ := SplitPathDirEntry(path)
|
||||||
|
linkTarget = filepath.Join(dstParent, linkTarget)
|
||||||
|
}
|
||||||
|
|
||||||
|
path = linkTarget
|
||||||
|
stat, err = os.Lstat(path)
|
||||||
|
}
|
||||||
|
|
||||||
|
if err != nil {
|
||||||
|
// It's okay if the destination path doesn't exist. We can still
|
||||||
|
// continue the copy operation if the parent directory exists.
|
||||||
|
if !os.IsNotExist(err) {
|
||||||
|
return CopyInfo{}, err
|
||||||
|
}
|
||||||
|
|
||||||
|
// Ensure destination parent dir exists.
|
||||||
|
dstParent, _ := SplitPathDirEntry(path)
|
||||||
|
|
||||||
|
parentDirStat, err := os.Lstat(dstParent)
|
||||||
|
if err != nil {
|
||||||
|
return CopyInfo{}, err
|
||||||
|
}
|
||||||
|
if !parentDirStat.IsDir() {
|
||||||
|
return CopyInfo{}, ErrNotDirectory
|
||||||
|
}
|
||||||
|
|
||||||
|
return CopyInfo{Path: path}, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// The path exists after resolving symlinks.
|
||||||
|
return CopyInfo{
|
||||||
|
Path: path,
|
||||||
|
Exists: true,
|
||||||
|
IsDir: stat.IsDir(),
|
||||||
|
}, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// PrepareArchiveCopy prepares the given srcContent archive, which should
|
||||||
|
// contain the archived resource described by srcInfo, to the destination
|
||||||
|
// described by dstInfo. Returns the possibly modified content archive along
|
||||||
|
// with the path to the destination directory which it should be extracted to.
|
||||||
|
func PrepareArchiveCopy(srcContent Reader, srcInfo, dstInfo CopyInfo) (dstDir string, content Archive, err error) {
|
||||||
|
// Ensure in platform semantics
|
||||||
|
srcInfo.Path = normalizePath(srcInfo.Path)
|
||||||
|
dstInfo.Path = normalizePath(dstInfo.Path)
|
||||||
|
|
||||||
|
// Separate the destination path between its directory and base
|
||||||
|
// components in case the source archive contents need to be rebased.
|
||||||
|
dstDir, dstBase := SplitPathDirEntry(dstInfo.Path)
|
||||||
|
_, srcBase := SplitPathDirEntry(srcInfo.Path)
|
||||||
|
|
||||||
|
switch {
|
||||||
|
case dstInfo.Exists && dstInfo.IsDir:
|
||||||
|
// The destination exists as a directory. No alteration
|
||||||
|
// to srcContent is needed as its contents can be
|
||||||
|
// simply extracted to the destination directory.
|
||||||
|
return dstInfo.Path, ioutil.NopCloser(srcContent), nil
|
||||||
|
case dstInfo.Exists && srcInfo.IsDir:
|
||||||
|
// The destination exists as some type of file and the source
|
||||||
|
// content is a directory. This is an error condition since
|
||||||
|
// you cannot copy a directory to an existing file location.
|
||||||
|
return "", nil, ErrCannotCopyDir
|
||||||
|
case dstInfo.Exists:
|
||||||
|
// The destination exists as some type of file and the source content
|
||||||
|
// is also a file. The source content entry will have to be renamed to
|
||||||
|
// have a basename which matches the destination path's basename.
|
||||||
|
if len(srcInfo.RebaseName) != 0 {
|
||||||
|
srcBase = srcInfo.RebaseName
|
||||||
|
}
|
||||||
|
return dstDir, RebaseArchiveEntries(srcContent, srcBase, dstBase), nil
|
||||||
|
case srcInfo.IsDir:
|
||||||
|
// The destination does not exist and the source content is an archive
|
||||||
|
// of a directory. The archive should be extracted to the parent of
|
||||||
|
// the destination path instead, and when it is, the directory that is
|
||||||
|
// created as a result should take the name of the destination path.
|
||||||
|
// The source content entries will have to be renamed to have a
|
||||||
|
// basename which matches the destination path's basename.
|
||||||
|
if len(srcInfo.RebaseName) != 0 {
|
||||||
|
srcBase = srcInfo.RebaseName
|
||||||
|
}
|
||||||
|
return dstDir, RebaseArchiveEntries(srcContent, srcBase, dstBase), nil
|
||||||
|
case assertsDirectory(dstInfo.Path):
|
||||||
|
// The destination does not exist and is asserted to be created as a
|
||||||
|
// directory, but the source content is not a directory. This is an
|
||||||
|
// error condition since you cannot create a directory from a file
|
||||||
|
// source.
|
||||||
|
return "", nil, ErrDirNotExists
|
||||||
|
default:
|
||||||
|
// The last remaining case is when the destination does not exist, is
|
||||||
|
// not asserted to be a directory, and the source content is not an
|
||||||
|
// archive of a directory. It this case, the destination file will need
|
||||||
|
// to be created when the archive is extracted and the source content
|
||||||
|
// entry will have to be renamed to have a basename which matches the
|
||||||
|
// destination path's basename.
|
||||||
|
if len(srcInfo.RebaseName) != 0 {
|
||||||
|
srcBase = srcInfo.RebaseName
|
||||||
|
}
|
||||||
|
return dstDir, RebaseArchiveEntries(srcContent, srcBase, dstBase), nil
|
||||||
|
}
|
||||||
|
|
||||||
|
}
|
||||||
|
|
||||||
|
// RebaseArchiveEntries rewrites the given srcContent archive replacing
|
||||||
|
// an occurrence of oldBase with newBase at the beginning of entry names.
|
||||||
|
func RebaseArchiveEntries(srcContent Reader, oldBase, newBase string) Archive {
|
||||||
|
if oldBase == string(os.PathSeparator) {
|
||||||
|
// If oldBase specifies the root directory, use an empty string as
|
||||||
|
// oldBase instead so that newBase doesn't replace the path separator
|
||||||
|
// that all paths will start with.
|
||||||
|
oldBase = ""
|
||||||
|
}
|
||||||
|
|
||||||
|
rebased, w := io.Pipe()
|
||||||
|
|
||||||
|
go func() {
|
||||||
|
srcTar := tar.NewReader(srcContent)
|
||||||
|
rebasedTar := tar.NewWriter(w)
|
||||||
|
|
||||||
|
for {
|
||||||
|
hdr, err := srcTar.Next()
|
||||||
|
if err == io.EOF {
|
||||||
|
// Signals end of archive.
|
||||||
|
rebasedTar.Close()
|
||||||
|
w.Close()
|
||||||
|
return
|
||||||
|
}
|
||||||
|
if err != nil {
|
||||||
|
w.CloseWithError(err)
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
hdr.Name = strings.Replace(hdr.Name, oldBase, newBase, 1)
|
||||||
|
|
||||||
|
if err = rebasedTar.WriteHeader(hdr); err != nil {
|
||||||
|
w.CloseWithError(err)
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
if _, err = io.Copy(rebasedTar, srcTar); err != nil {
|
||||||
|
w.CloseWithError(err)
|
||||||
|
return
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}()
|
||||||
|
|
||||||
|
return rebased
|
||||||
|
}
|
||||||
|
|
||||||
|
// CopyResource performs an archive copy from the given source path to the
|
||||||
|
// given destination path. The source path MUST exist and the destination
|
||||||
|
// path's parent directory must exist.
|
||||||
|
func CopyResource(srcPath, dstPath string, followLink bool) error {
|
||||||
|
var (
|
||||||
|
srcInfo CopyInfo
|
||||||
|
err error
|
||||||
|
)
|
||||||
|
|
||||||
|
// Ensure in platform semantics
|
||||||
|
srcPath = normalizePath(srcPath)
|
||||||
|
dstPath = normalizePath(dstPath)
|
||||||
|
|
||||||
|
// Clean the source and destination paths.
|
||||||
|
srcPath = PreserveTrailingDotOrSeparator(filepath.Clean(srcPath), srcPath)
|
||||||
|
dstPath = PreserveTrailingDotOrSeparator(filepath.Clean(dstPath), dstPath)
|
||||||
|
|
||||||
|
if srcInfo, err = CopyInfoSourcePath(srcPath, followLink); err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
content, err := TarResource(srcInfo)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
defer content.Close()
|
||||||
|
|
||||||
|
return CopyTo(content, srcInfo, dstPath)
|
||||||
|
}
|
||||||
|
|
||||||
|
// CopyTo handles extracting the given content whose
|
||||||
|
// entries should be sourced from srcInfo to dstPath.
|
||||||
|
func CopyTo(content Reader, srcInfo CopyInfo, dstPath string) error {
|
||||||
|
// The destination path need not exist, but CopyInfoDestinationPath will
|
||||||
|
// ensure that at least the parent directory exists.
|
||||||
|
dstInfo, err := CopyInfoDestinationPath(normalizePath(dstPath))
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
dstDir, copyArchive, err := PrepareArchiveCopy(content, srcInfo, dstInfo)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
defer copyArchive.Close()
|
||||||
|
|
||||||
|
options := &TarOptions{
|
||||||
|
NoLchown: true,
|
||||||
|
NoOverwriteDirNonDir: true,
|
||||||
|
}
|
||||||
|
|
||||||
|
return Untar(copyArchive, dstDir, options)
|
||||||
|
}
|
||||||
|
|
||||||
|
// ResolveHostSourcePath decides real path need to be copied with parameters such as
|
||||||
|
// whether to follow symbol link or not, if followLink is true, resolvedPath will return
|
||||||
|
// link target of any symbol link file, else it will only resolve symlink of directory
|
||||||
|
// but return symbol link file itself without resolving.
|
||||||
|
func ResolveHostSourcePath(path string, followLink bool) (resolvedPath, rebaseName string, err error) {
|
||||||
|
if followLink {
|
||||||
|
resolvedPath, err = filepath.EvalSymlinks(path)
|
||||||
|
if err != nil {
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
resolvedPath, rebaseName = GetRebaseName(path, resolvedPath)
|
||||||
|
} else {
|
||||||
|
dirPath, basePath := filepath.Split(path)
|
||||||
|
|
||||||
|
// if not follow symbol link, then resolve symbol link of parent dir
|
||||||
|
var resolvedDirPath string
|
||||||
|
resolvedDirPath, err = filepath.EvalSymlinks(dirPath)
|
||||||
|
if err != nil {
|
||||||
|
return
|
||||||
|
}
|
||||||
|
// resolvedDirPath will have been cleaned (no trailing path separators) so
|
||||||
|
// we can manually join it with the base path element.
|
||||||
|
resolvedPath = resolvedDirPath + string(filepath.Separator) + basePath
|
||||||
|
if hasTrailingPathSeparator(path) && filepath.Base(path) != filepath.Base(resolvedPath) {
|
||||||
|
rebaseName = filepath.Base(path)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return resolvedPath, rebaseName, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// GetRebaseName normalizes and compares path and resolvedPath,
|
||||||
|
// return completed resolved path and rebased file name
|
||||||
|
func GetRebaseName(path, resolvedPath string) (string, string) {
|
||||||
|
// linkTarget will have been cleaned (no trailing path separators and dot) so
|
||||||
|
// we can manually join it with them
|
||||||
|
var rebaseName string
|
||||||
|
if specifiesCurrentDir(path) && !specifiesCurrentDir(resolvedPath) {
|
||||||
|
resolvedPath += string(filepath.Separator) + "."
|
||||||
|
}
|
||||||
|
|
||||||
|
if hasTrailingPathSeparator(path) && !hasTrailingPathSeparator(resolvedPath) {
|
||||||
|
resolvedPath += string(filepath.Separator)
|
||||||
|
}
|
||||||
|
|
||||||
|
if filepath.Base(path) != filepath.Base(resolvedPath) {
|
||||||
|
// In the case where the path had a trailing separator and a symlink
|
||||||
|
// evaluation has changed the last path component, we will need to
|
||||||
|
// rebase the name in the archive that is being copied to match the
|
||||||
|
// originally requested name.
|
||||||
|
rebaseName = filepath.Base(path)
|
||||||
|
}
|
||||||
|
return resolvedPath, rebaseName
|
||||||
|
}
|
|
@ -0,0 +1,11 @@
|
||||||
|
// +build !windows
|
||||||
|
|
||||||
|
package archive
|
||||||
|
|
||||||
|
import (
|
||||||
|
"path/filepath"
|
||||||
|
)
|
||||||
|
|
||||||
|
func normalizePath(path string) string {
|
||||||
|
return filepath.ToSlash(path)
|
||||||
|
}
|
|
@ -0,0 +1,9 @@
|
||||||
|
package archive
|
||||||
|
|
||||||
|
import (
|
||||||
|
"path/filepath"
|
||||||
|
)
|
||||||
|
|
||||||
|
func normalizePath(path string) string {
|
||||||
|
return filepath.FromSlash(path)
|
||||||
|
}
|
|
@ -0,0 +1,279 @@
|
||||||
|
package archive
|
||||||
|
|
||||||
|
import (
|
||||||
|
"archive/tar"
|
||||||
|
"fmt"
|
||||||
|
"io"
|
||||||
|
"io/ioutil"
|
||||||
|
"os"
|
||||||
|
"path/filepath"
|
||||||
|
"runtime"
|
||||||
|
"strings"
|
||||||
|
|
||||||
|
"github.com/Sirupsen/logrus"
|
||||||
|
"github.com/docker/docker/pkg/idtools"
|
||||||
|
"github.com/docker/docker/pkg/pools"
|
||||||
|
"github.com/docker/docker/pkg/system"
|
||||||
|
)
|
||||||
|
|
||||||
|
// UnpackLayer unpack `layer` to a `dest`. The stream `layer` can be
|
||||||
|
// compressed or uncompressed.
|
||||||
|
// Returns the size in bytes of the contents of the layer.
|
||||||
|
func UnpackLayer(dest string, layer Reader, options *TarOptions) (size int64, err error) {
|
||||||
|
tr := tar.NewReader(layer)
|
||||||
|
trBuf := pools.BufioReader32KPool.Get(tr)
|
||||||
|
defer pools.BufioReader32KPool.Put(trBuf)
|
||||||
|
|
||||||
|
var dirs []*tar.Header
|
||||||
|
unpackedPaths := make(map[string]struct{})
|
||||||
|
|
||||||
|
if options == nil {
|
||||||
|
options = &TarOptions{}
|
||||||
|
}
|
||||||
|
if options.ExcludePatterns == nil {
|
||||||
|
options.ExcludePatterns = []string{}
|
||||||
|
}
|
||||||
|
remappedRootUID, remappedRootGID, err := idtools.GetRootUIDGID(options.UIDMaps, options.GIDMaps)
|
||||||
|
if err != nil {
|
||||||
|
return 0, err
|
||||||
|
}
|
||||||
|
|
||||||
|
aufsTempdir := ""
|
||||||
|
aufsHardlinks := make(map[string]*tar.Header)
|
||||||
|
|
||||||
|
if options == nil {
|
||||||
|
options = &TarOptions{}
|
||||||
|
}
|
||||||
|
// Iterate through the files in the archive.
|
||||||
|
for {
|
||||||
|
hdr, err := tr.Next()
|
||||||
|
if err == io.EOF {
|
||||||
|
// end of tar archive
|
||||||
|
break
|
||||||
|
}
|
||||||
|
if err != nil {
|
||||||
|
return 0, err
|
||||||
|
}
|
||||||
|
|
||||||
|
size += hdr.Size
|
||||||
|
|
||||||
|
// Normalize name, for safety and for a simple is-root check
|
||||||
|
hdr.Name = filepath.Clean(hdr.Name)
|
||||||
|
|
||||||
|
// Windows does not support filenames with colons in them. Ignore
|
||||||
|
// these files. This is not a problem though (although it might
|
||||||
|
// appear that it is). Let's suppose a client is running docker pull.
|
||||||
|
// The daemon it points to is Windows. Would it make sense for the
|
||||||
|
// client to be doing a docker pull Ubuntu for example (which has files
|
||||||
|
// with colons in the name under /usr/share/man/man3)? No, absolutely
|
||||||
|
// not as it would really only make sense that they were pulling a
|
||||||
|
// Windows image. However, for development, it is necessary to be able
|
||||||
|
// to pull Linux images which are in the repository.
|
||||||
|
//
|
||||||
|
// TODO Windows. Once the registry is aware of what images are Windows-
|
||||||
|
// specific or Linux-specific, this warning should be changed to an error
|
||||||
|
// to cater for the situation where someone does manage to upload a Linux
|
||||||
|
// image but have it tagged as Windows inadvertently.
|
||||||
|
if runtime.GOOS == "windows" {
|
||||||
|
if strings.Contains(hdr.Name, ":") {
|
||||||
|
logrus.Warnf("Windows: Ignoring %s (is this a Linux image?)", hdr.Name)
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// Note as these operations are platform specific, so must the slash be.
|
||||||
|
if !strings.HasSuffix(hdr.Name, string(os.PathSeparator)) {
|
||||||
|
// Not the root directory, ensure that the parent directory exists.
|
||||||
|
// This happened in some tests where an image had a tarfile without any
|
||||||
|
// parent directories.
|
||||||
|
parent := filepath.Dir(hdr.Name)
|
||||||
|
parentPath := filepath.Join(dest, parent)
|
||||||
|
|
||||||
|
if _, err := os.Lstat(parentPath); err != nil && os.IsNotExist(err) {
|
||||||
|
err = system.MkdirAll(parentPath, 0600)
|
||||||
|
if err != nil {
|
||||||
|
return 0, err
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// Skip AUFS metadata dirs
|
||||||
|
if strings.HasPrefix(hdr.Name, WhiteoutMetaPrefix) {
|
||||||
|
// Regular files inside /.wh..wh.plnk can be used as hardlink targets
|
||||||
|
// We don't want this directory, but we need the files in them so that
|
||||||
|
// such hardlinks can be resolved.
|
||||||
|
if strings.HasPrefix(hdr.Name, WhiteoutLinkDir) && hdr.Typeflag == tar.TypeReg {
|
||||||
|
basename := filepath.Base(hdr.Name)
|
||||||
|
aufsHardlinks[basename] = hdr
|
||||||
|
if aufsTempdir == "" {
|
||||||
|
if aufsTempdir, err = ioutil.TempDir("", "dockerplnk"); err != nil {
|
||||||
|
return 0, err
|
||||||
|
}
|
||||||
|
defer os.RemoveAll(aufsTempdir)
|
||||||
|
}
|
||||||
|
if err := createTarFile(filepath.Join(aufsTempdir, basename), dest, hdr, tr, true, nil); err != nil {
|
||||||
|
return 0, err
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
if hdr.Name != WhiteoutOpaqueDir {
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
}
|
||||||
|
path := filepath.Join(dest, hdr.Name)
|
||||||
|
rel, err := filepath.Rel(dest, path)
|
||||||
|
if err != nil {
|
||||||
|
return 0, err
|
||||||
|
}
|
||||||
|
|
||||||
|
// Note as these operations are platform specific, so must the slash be.
|
||||||
|
if strings.HasPrefix(rel, ".."+string(os.PathSeparator)) {
|
||||||
|
return 0, breakoutError(fmt.Errorf("%q is outside of %q", hdr.Name, dest))
|
||||||
|
}
|
||||||
|
base := filepath.Base(path)
|
||||||
|
|
||||||
|
if strings.HasPrefix(base, WhiteoutPrefix) {
|
||||||
|
dir := filepath.Dir(path)
|
||||||
|
if base == WhiteoutOpaqueDir {
|
||||||
|
_, err := os.Lstat(dir)
|
||||||
|
if err != nil {
|
||||||
|
return 0, err
|
||||||
|
}
|
||||||
|
err = filepath.Walk(dir, func(path string, info os.FileInfo, err error) error {
|
||||||
|
if err != nil {
|
||||||
|
if os.IsNotExist(err) {
|
||||||
|
err = nil // parent was deleted
|
||||||
|
}
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
if path == dir {
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
if _, exists := unpackedPaths[path]; !exists {
|
||||||
|
err := os.RemoveAll(path)
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
return nil
|
||||||
|
})
|
||||||
|
if err != nil {
|
||||||
|
return 0, err
|
||||||
|
}
|
||||||
|
} else {
|
||||||
|
originalBase := base[len(WhiteoutPrefix):]
|
||||||
|
originalPath := filepath.Join(dir, originalBase)
|
||||||
|
if err := os.RemoveAll(originalPath); err != nil {
|
||||||
|
return 0, err
|
||||||
|
}
|
||||||
|
}
|
||||||
|
} else {
|
||||||
|
// If path exits we almost always just want to remove and replace it.
|
||||||
|
// The only exception is when it is a directory *and* the file from
|
||||||
|
// the layer is also a directory. Then we want to merge them (i.e.
|
||||||
|
// just apply the metadata from the layer).
|
||||||
|
if fi, err := os.Lstat(path); err == nil {
|
||||||
|
if !(fi.IsDir() && hdr.Typeflag == tar.TypeDir) {
|
||||||
|
if err := os.RemoveAll(path); err != nil {
|
||||||
|
return 0, err
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
trBuf.Reset(tr)
|
||||||
|
srcData := io.Reader(trBuf)
|
||||||
|
srcHdr := hdr
|
||||||
|
|
||||||
|
// Hard links into /.wh..wh.plnk don't work, as we don't extract that directory, so
|
||||||
|
// we manually retarget these into the temporary files we extracted them into
|
||||||
|
if hdr.Typeflag == tar.TypeLink && strings.HasPrefix(filepath.Clean(hdr.Linkname), WhiteoutLinkDir) {
|
||||||
|
linkBasename := filepath.Base(hdr.Linkname)
|
||||||
|
srcHdr = aufsHardlinks[linkBasename]
|
||||||
|
if srcHdr == nil {
|
||||||
|
return 0, fmt.Errorf("Invalid aufs hardlink")
|
||||||
|
}
|
||||||
|
tmpFile, err := os.Open(filepath.Join(aufsTempdir, linkBasename))
|
||||||
|
if err != nil {
|
||||||
|
return 0, err
|
||||||
|
}
|
||||||
|
defer tmpFile.Close()
|
||||||
|
srcData = tmpFile
|
||||||
|
}
|
||||||
|
|
||||||
|
// if the options contain a uid & gid maps, convert header uid/gid
|
||||||
|
// entries using the maps such that lchown sets the proper mapped
|
||||||
|
// uid/gid after writing the file. We only perform this mapping if
|
||||||
|
// the file isn't already owned by the remapped root UID or GID, as
|
||||||
|
// that specific uid/gid has no mapping from container -> host, and
|
||||||
|
// those files already have the proper ownership for inside the
|
||||||
|
// container.
|
||||||
|
if srcHdr.Uid != remappedRootUID {
|
||||||
|
xUID, err := idtools.ToHost(srcHdr.Uid, options.UIDMaps)
|
||||||
|
if err != nil {
|
||||||
|
return 0, err
|
||||||
|
}
|
||||||
|
srcHdr.Uid = xUID
|
||||||
|
}
|
||||||
|
if srcHdr.Gid != remappedRootGID {
|
||||||
|
xGID, err := idtools.ToHost(srcHdr.Gid, options.GIDMaps)
|
||||||
|
if err != nil {
|
||||||
|
return 0, err
|
||||||
|
}
|
||||||
|
srcHdr.Gid = xGID
|
||||||
|
}
|
||||||
|
if err := createTarFile(path, dest, srcHdr, srcData, true, nil); err != nil {
|
||||||
|
return 0, err
|
||||||
|
}
|
||||||
|
|
||||||
|
// Directory mtimes must be handled at the end to avoid further
|
||||||
|
// file creation in them to modify the directory mtime
|
||||||
|
if hdr.Typeflag == tar.TypeDir {
|
||||||
|
dirs = append(dirs, hdr)
|
||||||
|
}
|
||||||
|
unpackedPaths[path] = struct{}{}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
for _, hdr := range dirs {
|
||||||
|
path := filepath.Join(dest, hdr.Name)
|
||||||
|
if err := system.Chtimes(path, hdr.AccessTime, hdr.ModTime); err != nil {
|
||||||
|
return 0, err
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
return size, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// ApplyLayer parses a diff in the standard layer format from `layer`,
|
||||||
|
// and applies it to the directory `dest`. The stream `layer` can be
|
||||||
|
// compressed or uncompressed.
|
||||||
|
// Returns the size in bytes of the contents of the layer.
|
||||||
|
func ApplyLayer(dest string, layer Reader) (int64, error) {
|
||||||
|
return applyLayerHandler(dest, layer, &TarOptions{}, true)
|
||||||
|
}
|
||||||
|
|
||||||
|
// ApplyUncompressedLayer parses a diff in the standard layer format from
|
||||||
|
// `layer`, and applies it to the directory `dest`. The stream `layer`
|
||||||
|
// can only be uncompressed.
|
||||||
|
// Returns the size in bytes of the contents of the layer.
|
||||||
|
func ApplyUncompressedLayer(dest string, layer Reader, options *TarOptions) (int64, error) {
|
||||||
|
return applyLayerHandler(dest, layer, options, false)
|
||||||
|
}
|
||||||
|
|
||||||
|
// do the bulk load of ApplyLayer, but allow for not calling DecompressStream
|
||||||
|
func applyLayerHandler(dest string, layer Reader, options *TarOptions, decompress bool) (int64, error) {
|
||||||
|
dest = filepath.Clean(dest)
|
||||||
|
|
||||||
|
// We need to be able to set any perms
|
||||||
|
oldmask, err := system.Umask(0)
|
||||||
|
if err != nil {
|
||||||
|
return 0, err
|
||||||
|
}
|
||||||
|
defer system.Umask(oldmask) // ignore err, ErrNotSupportedPlatform
|
||||||
|
|
||||||
|
if decompress {
|
||||||
|
layer, err = DecompressStream(layer)
|
||||||
|
if err != nil {
|
||||||
|
return 0, err
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return UnpackLayer(dest, layer, options)
|
||||||
|
}
|
|
@ -0,0 +1,97 @@
|
||||||
|
// +build ignore
|
||||||
|
|
||||||
|
// Simple tool to create an archive stream from an old and new directory
|
||||||
|
//
|
||||||
|
// By default it will stream the comparison of two temporary directories with junk files
|
||||||
|
package main
|
||||||
|
|
||||||
|
import (
|
||||||
|
"flag"
|
||||||
|
"fmt"
|
||||||
|
"io"
|
||||||
|
"io/ioutil"
|
||||||
|
"os"
|
||||||
|
"path"
|
||||||
|
|
||||||
|
"github.com/Sirupsen/logrus"
|
||||||
|
"github.com/docker/docker/pkg/archive"
|
||||||
|
)
|
||||||
|
|
||||||
|
var (
|
||||||
|
flDebug = flag.Bool("D", false, "debugging output")
|
||||||
|
flNewDir = flag.String("newdir", "", "")
|
||||||
|
flOldDir = flag.String("olddir", "", "")
|
||||||
|
log = logrus.New()
|
||||||
|
)
|
||||||
|
|
||||||
|
func main() {
|
||||||
|
flag.Usage = func() {
|
||||||
|
fmt.Println("Produce a tar from comparing two directory paths. By default a demo tar is created of around 200 files (including hardlinks)")
|
||||||
|
fmt.Printf("%s [OPTIONS]\n", os.Args[0])
|
||||||
|
flag.PrintDefaults()
|
||||||
|
}
|
||||||
|
flag.Parse()
|
||||||
|
log.Out = os.Stderr
|
||||||
|
if (len(os.Getenv("DEBUG")) > 0) || *flDebug {
|
||||||
|
logrus.SetLevel(logrus.DebugLevel)
|
||||||
|
}
|
||||||
|
var newDir, oldDir string
|
||||||
|
|
||||||
|
if len(*flNewDir) == 0 {
|
||||||
|
var err error
|
||||||
|
newDir, err = ioutil.TempDir("", "docker-test-newDir")
|
||||||
|
if err != nil {
|
||||||
|
log.Fatal(err)
|
||||||
|
}
|
||||||
|
defer os.RemoveAll(newDir)
|
||||||
|
if _, err := prepareUntarSourceDirectory(100, newDir, true); err != nil {
|
||||||
|
log.Fatal(err)
|
||||||
|
}
|
||||||
|
} else {
|
||||||
|
newDir = *flNewDir
|
||||||
|
}
|
||||||
|
|
||||||
|
if len(*flOldDir) == 0 {
|
||||||
|
oldDir, err := ioutil.TempDir("", "docker-test-oldDir")
|
||||||
|
if err != nil {
|
||||||
|
log.Fatal(err)
|
||||||
|
}
|
||||||
|
defer os.RemoveAll(oldDir)
|
||||||
|
} else {
|
||||||
|
oldDir = *flOldDir
|
||||||
|
}
|
||||||
|
|
||||||
|
changes, err := archive.ChangesDirs(newDir, oldDir)
|
||||||
|
if err != nil {
|
||||||
|
log.Fatal(err)
|
||||||
|
}
|
||||||
|
|
||||||
|
a, err := archive.ExportChanges(newDir, changes)
|
||||||
|
if err != nil {
|
||||||
|
log.Fatal(err)
|
||||||
|
}
|
||||||
|
defer a.Close()
|
||||||
|
|
||||||
|
i, err := io.Copy(os.Stdout, a)
|
||||||
|
if err != nil && err != io.EOF {
|
||||||
|
log.Fatal(err)
|
||||||
|
}
|
||||||
|
fmt.Fprintf(os.Stderr, "wrote archive of %d bytes", i)
|
||||||
|
}
|
||||||
|
|
||||||
|
func prepareUntarSourceDirectory(numberOfFiles int, targetPath string, makeLinks bool) (int, error) {
|
||||||
|
fileData := []byte("fooo")
|
||||||
|
for n := 0; n < numberOfFiles; n++ {
|
||||||
|
fileName := fmt.Sprintf("file-%d", n)
|
||||||
|
if err := ioutil.WriteFile(path.Join(targetPath, fileName), fileData, 0700); err != nil {
|
||||||
|
return 0, err
|
||||||
|
}
|
||||||
|
if makeLinks {
|
||||||
|
if err := os.Link(path.Join(targetPath, fileName), path.Join(targetPath, fileName+"-link")); err != nil {
|
||||||
|
return 0, err
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
totalSize := numberOfFiles * len(fileData)
|
||||||
|
return totalSize, nil
|
||||||
|
}
|
|
@ -0,0 +1,16 @@
|
||||||
|
package archive
|
||||||
|
|
||||||
|
import (
|
||||||
|
"syscall"
|
||||||
|
"time"
|
||||||
|
)
|
||||||
|
|
||||||
|
func timeToTimespec(time time.Time) (ts syscall.Timespec) {
|
||||||
|
if time.IsZero() {
|
||||||
|
// Return UTIME_OMIT special value
|
||||||
|
ts.Sec = 0
|
||||||
|
ts.Nsec = ((1 << 30) - 2)
|
||||||
|
return
|
||||||
|
}
|
||||||
|
return syscall.NsecToTimespec(time.UnixNano())
|
||||||
|
}
|
|
@ -0,0 +1,16 @@
|
||||||
|
// +build !linux
|
||||||
|
|
||||||
|
package archive
|
||||||
|
|
||||||
|
import (
|
||||||
|
"syscall"
|
||||||
|
"time"
|
||||||
|
)
|
||||||
|
|
||||||
|
func timeToTimespec(time time.Time) (ts syscall.Timespec) {
|
||||||
|
nsec := int64(0)
|
||||||
|
if !time.IsZero() {
|
||||||
|
nsec = time.UnixNano()
|
||||||
|
}
|
||||||
|
return syscall.NsecToTimespec(nsec)
|
||||||
|
}
|
|
@ -0,0 +1,23 @@
|
||||||
|
package archive
|
||||||
|
|
||||||
|
// Whiteouts are files with a special meaning for the layered filesystem.
|
||||||
|
// Docker uses AUFS whiteout files inside exported archives. In other
|
||||||
|
// filesystems these files are generated/handled on tar creation/extraction.
|
||||||
|
|
||||||
|
// WhiteoutPrefix prefix means file is a whiteout. If this is followed by a
|
||||||
|
// filename this means that file has been removed from the base layer.
|
||||||
|
const WhiteoutPrefix = ".wh."
|
||||||
|
|
||||||
|
// WhiteoutMetaPrefix prefix means whiteout has a special meaning and is not
|
||||||
|
// for removing an actual file. Normally these files are excluded from exported
|
||||||
|
// archives.
|
||||||
|
const WhiteoutMetaPrefix = WhiteoutPrefix + WhiteoutPrefix
|
||||||
|
|
||||||
|
// WhiteoutLinkDir is a directory AUFS uses for storing hardlink links to other
|
||||||
|
// layers. Normally these should not go into exported archives and all changed
|
||||||
|
// hardlinks should be copied to the top layer.
|
||||||
|
const WhiteoutLinkDir = WhiteoutMetaPrefix + "plnk"
|
||||||
|
|
||||||
|
// WhiteoutOpaqueDir file means directory has been made opaque - meaning
|
||||||
|
// readdir calls to this directory do not follow to lower layers.
|
||||||
|
const WhiteoutOpaqueDir = WhiteoutMetaPrefix + ".opq"
|
|
@ -0,0 +1,59 @@
|
||||||
|
package archive
|
||||||
|
|
||||||
|
import (
|
||||||
|
"archive/tar"
|
||||||
|
"bytes"
|
||||||
|
"io/ioutil"
|
||||||
|
)
|
||||||
|
|
||||||
|
// Generate generates a new archive from the content provided
|
||||||
|
// as input.
|
||||||
|
//
|
||||||
|
// `files` is a sequence of path/content pairs. A new file is
|
||||||
|
// added to the archive for each pair.
|
||||||
|
// If the last pair is incomplete, the file is created with an
|
||||||
|
// empty content. For example:
|
||||||
|
//
|
||||||
|
// Generate("foo.txt", "hello world", "emptyfile")
|
||||||
|
//
|
||||||
|
// The above call will return an archive with 2 files:
|
||||||
|
// * ./foo.txt with content "hello world"
|
||||||
|
// * ./empty with empty content
|
||||||
|
//
|
||||||
|
// FIXME: stream content instead of buffering
|
||||||
|
// FIXME: specify permissions and other archive metadata
|
||||||
|
func Generate(input ...string) (Archive, error) {
|
||||||
|
files := parseStringPairs(input...)
|
||||||
|
buf := new(bytes.Buffer)
|
||||||
|
tw := tar.NewWriter(buf)
|
||||||
|
for _, file := range files {
|
||||||
|
name, content := file[0], file[1]
|
||||||
|
hdr := &tar.Header{
|
||||||
|
Name: name,
|
||||||
|
Size: int64(len(content)),
|
||||||
|
}
|
||||||
|
if err := tw.WriteHeader(hdr); err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
if _, err := tw.Write([]byte(content)); err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
}
|
||||||
|
if err := tw.Close(); err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
return ioutil.NopCloser(buf), nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func parseStringPairs(input ...string) (output [][2]string) {
|
||||||
|
output = make([][2]string, 0, len(input)/2+1)
|
||||||
|
for i := 0; i < len(input); i += 2 {
|
||||||
|
var pair [2]string
|
||||||
|
pair[0] = input[i]
|
||||||
|
if i+1 < len(input) {
|
||||||
|
pair[1] = input[i+1]
|
||||||
|
}
|
||||||
|
output = append(output, pair)
|
||||||
|
}
|
||||||
|
return
|
||||||
|
}
|
|
@ -0,0 +1,283 @@
|
||||||
|
package fileutils
|
||||||
|
|
||||||
|
import (
|
||||||
|
"errors"
|
||||||
|
"fmt"
|
||||||
|
"io"
|
||||||
|
"os"
|
||||||
|
"path/filepath"
|
||||||
|
"regexp"
|
||||||
|
"strings"
|
||||||
|
"text/scanner"
|
||||||
|
|
||||||
|
"github.com/Sirupsen/logrus"
|
||||||
|
)
|
||||||
|
|
||||||
|
// exclusion returns true if the specified pattern is an exclusion
|
||||||
|
func exclusion(pattern string) bool {
|
||||||
|
return pattern[0] == '!'
|
||||||
|
}
|
||||||
|
|
||||||
|
// empty returns true if the specified pattern is empty
|
||||||
|
func empty(pattern string) bool {
|
||||||
|
return pattern == ""
|
||||||
|
}
|
||||||
|
|
||||||
|
// CleanPatterns takes a slice of patterns returns a new
|
||||||
|
// slice of patterns cleaned with filepath.Clean, stripped
|
||||||
|
// of any empty patterns and lets the caller know whether the
|
||||||
|
// slice contains any exception patterns (prefixed with !).
|
||||||
|
func CleanPatterns(patterns []string) ([]string, [][]string, bool, error) {
|
||||||
|
// Loop over exclusion patterns and:
|
||||||
|
// 1. Clean them up.
|
||||||
|
// 2. Indicate whether we are dealing with any exception rules.
|
||||||
|
// 3. Error if we see a single exclusion marker on it's own (!).
|
||||||
|
cleanedPatterns := []string{}
|
||||||
|
patternDirs := [][]string{}
|
||||||
|
exceptions := false
|
||||||
|
for _, pattern := range patterns {
|
||||||
|
// Eliminate leading and trailing whitespace.
|
||||||
|
pattern = strings.TrimSpace(pattern)
|
||||||
|
if empty(pattern) {
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
if exclusion(pattern) {
|
||||||
|
if len(pattern) == 1 {
|
||||||
|
return nil, nil, false, errors.New("Illegal exclusion pattern: !")
|
||||||
|
}
|
||||||
|
exceptions = true
|
||||||
|
}
|
||||||
|
pattern = filepath.Clean(pattern)
|
||||||
|
cleanedPatterns = append(cleanedPatterns, pattern)
|
||||||
|
if exclusion(pattern) {
|
||||||
|
pattern = pattern[1:]
|
||||||
|
}
|
||||||
|
patternDirs = append(patternDirs, strings.Split(pattern, string(os.PathSeparator)))
|
||||||
|
}
|
||||||
|
|
||||||
|
return cleanedPatterns, patternDirs, exceptions, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// Matches returns true if file matches any of the patterns
|
||||||
|
// and isn't excluded by any of the subsequent patterns.
|
||||||
|
func Matches(file string, patterns []string) (bool, error) {
|
||||||
|
file = filepath.Clean(file)
|
||||||
|
|
||||||
|
if file == "." {
|
||||||
|
// Don't let them exclude everything, kind of silly.
|
||||||
|
return false, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
patterns, patDirs, _, err := CleanPatterns(patterns)
|
||||||
|
if err != nil {
|
||||||
|
return false, err
|
||||||
|
}
|
||||||
|
|
||||||
|
return OptimizedMatches(file, patterns, patDirs)
|
||||||
|
}
|
||||||
|
|
||||||
|
// OptimizedMatches is basically the same as fileutils.Matches() but optimized for archive.go.
|
||||||
|
// It will assume that the inputs have been preprocessed and therefore the function
|
||||||
|
// doesn't need to do as much error checking and clean-up. This was done to avoid
|
||||||
|
// repeating these steps on each file being checked during the archive process.
|
||||||
|
// The more generic fileutils.Matches() can't make these assumptions.
|
||||||
|
func OptimizedMatches(file string, patterns []string, patDirs [][]string) (bool, error) {
|
||||||
|
matched := false
|
||||||
|
file = filepath.FromSlash(file)
|
||||||
|
parentPath := filepath.Dir(file)
|
||||||
|
parentPathDirs := strings.Split(parentPath, string(os.PathSeparator))
|
||||||
|
|
||||||
|
for i, pattern := range patterns {
|
||||||
|
negative := false
|
||||||
|
|
||||||
|
if exclusion(pattern) {
|
||||||
|
negative = true
|
||||||
|
pattern = pattern[1:]
|
||||||
|
}
|
||||||
|
|
||||||
|
match, err := regexpMatch(pattern, file)
|
||||||
|
if err != nil {
|
||||||
|
return false, fmt.Errorf("Error in pattern (%s): %s", pattern, err)
|
||||||
|
}
|
||||||
|
|
||||||
|
if !match && parentPath != "." {
|
||||||
|
// Check to see if the pattern matches one of our parent dirs.
|
||||||
|
if len(patDirs[i]) <= len(parentPathDirs) {
|
||||||
|
match, _ = regexpMatch(strings.Join(patDirs[i], string(os.PathSeparator)),
|
||||||
|
strings.Join(parentPathDirs[:len(patDirs[i])], string(os.PathSeparator)))
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
if match {
|
||||||
|
matched = !negative
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
if matched {
|
||||||
|
logrus.Debugf("Skipping excluded path: %s", file)
|
||||||
|
}
|
||||||
|
|
||||||
|
return matched, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// regexpMatch tries to match the logic of filepath.Match but
|
||||||
|
// does so using regexp logic. We do this so that we can expand the
|
||||||
|
// wildcard set to include other things, like "**" to mean any number
|
||||||
|
// of directories. This means that we should be backwards compatible
|
||||||
|
// with filepath.Match(). We'll end up supporting more stuff, due to
|
||||||
|
// the fact that we're using regexp, but that's ok - it does no harm.
|
||||||
|
//
|
||||||
|
// As per the comment in golangs filepath.Match, on Windows, escaping
|
||||||
|
// is disabled. Instead, '\\' is treated as path separator.
|
||||||
|
func regexpMatch(pattern, path string) (bool, error) {
|
||||||
|
regStr := "^"
|
||||||
|
|
||||||
|
// Do some syntax checking on the pattern.
|
||||||
|
// filepath's Match() has some really weird rules that are inconsistent
|
||||||
|
// so instead of trying to dup their logic, just call Match() for its
|
||||||
|
// error state and if there is an error in the pattern return it.
|
||||||
|
// If this becomes an issue we can remove this since its really only
|
||||||
|
// needed in the error (syntax) case - which isn't really critical.
|
||||||
|
if _, err := filepath.Match(pattern, path); err != nil {
|
||||||
|
return false, err
|
||||||
|
}
|
||||||
|
|
||||||
|
// Go through the pattern and convert it to a regexp.
|
||||||
|
// We use a scanner so we can support utf-8 chars.
|
||||||
|
var scan scanner.Scanner
|
||||||
|
scan.Init(strings.NewReader(pattern))
|
||||||
|
|
||||||
|
sl := string(os.PathSeparator)
|
||||||
|
escSL := sl
|
||||||
|
if sl == `\` {
|
||||||
|
escSL += `\`
|
||||||
|
}
|
||||||
|
|
||||||
|
for scan.Peek() != scanner.EOF {
|
||||||
|
ch := scan.Next()
|
||||||
|
|
||||||
|
if ch == '*' {
|
||||||
|
if scan.Peek() == '*' {
|
||||||
|
// is some flavor of "**"
|
||||||
|
scan.Next()
|
||||||
|
|
||||||
|
if scan.Peek() == scanner.EOF {
|
||||||
|
// is "**EOF" - to align with .gitignore just accept all
|
||||||
|
regStr += ".*"
|
||||||
|
} else {
|
||||||
|
// is "**"
|
||||||
|
regStr += "((.*" + escSL + ")|([^" + escSL + "]*))"
|
||||||
|
}
|
||||||
|
|
||||||
|
// Treat **/ as ** so eat the "/"
|
||||||
|
if string(scan.Peek()) == sl {
|
||||||
|
scan.Next()
|
||||||
|
}
|
||||||
|
} else {
|
||||||
|
// is "*" so map it to anything but "/"
|
||||||
|
regStr += "[^" + escSL + "]*"
|
||||||
|
}
|
||||||
|
} else if ch == '?' {
|
||||||
|
// "?" is any char except "/"
|
||||||
|
regStr += "[^" + escSL + "]"
|
||||||
|
} else if strings.Index(".$", string(ch)) != -1 {
|
||||||
|
// Escape some regexp special chars that have no meaning
|
||||||
|
// in golang's filepath.Match
|
||||||
|
regStr += `\` + string(ch)
|
||||||
|
} else if ch == '\\' {
|
||||||
|
// escape next char. Note that a trailing \ in the pattern
|
||||||
|
// will be left alone (but need to escape it)
|
||||||
|
if sl == `\` {
|
||||||
|
// On windows map "\" to "\\", meaning an escaped backslash,
|
||||||
|
// and then just continue because filepath.Match on
|
||||||
|
// Windows doesn't allow escaping at all
|
||||||
|
regStr += escSL
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
if scan.Peek() != scanner.EOF {
|
||||||
|
regStr += `\` + string(scan.Next())
|
||||||
|
} else {
|
||||||
|
regStr += `\`
|
||||||
|
}
|
||||||
|
} else {
|
||||||
|
regStr += string(ch)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
regStr += "$"
|
||||||
|
|
||||||
|
res, err := regexp.MatchString(regStr, path)
|
||||||
|
|
||||||
|
// Map regexp's error to filepath's so no one knows we're not using filepath
|
||||||
|
if err != nil {
|
||||||
|
err = filepath.ErrBadPattern
|
||||||
|
}
|
||||||
|
|
||||||
|
return res, err
|
||||||
|
}
|
||||||
|
|
||||||
|
// CopyFile copies from src to dst until either EOF is reached
|
||||||
|
// on src or an error occurs. It verifies src exists and removes
|
||||||
|
// the dst if it exists.
|
||||||
|
func CopyFile(src, dst string) (int64, error) {
|
||||||
|
cleanSrc := filepath.Clean(src)
|
||||||
|
cleanDst := filepath.Clean(dst)
|
||||||
|
if cleanSrc == cleanDst {
|
||||||
|
return 0, nil
|
||||||
|
}
|
||||||
|
sf, err := os.Open(cleanSrc)
|
||||||
|
if err != nil {
|
||||||
|
return 0, err
|
||||||
|
}
|
||||||
|
defer sf.Close()
|
||||||
|
if err := os.Remove(cleanDst); err != nil && !os.IsNotExist(err) {
|
||||||
|
return 0, err
|
||||||
|
}
|
||||||
|
df, err := os.Create(cleanDst)
|
||||||
|
if err != nil {
|
||||||
|
return 0, err
|
||||||
|
}
|
||||||
|
defer df.Close()
|
||||||
|
return io.Copy(df, sf)
|
||||||
|
}
|
||||||
|
|
||||||
|
// ReadSymlinkedDirectory returns the target directory of a symlink.
|
||||||
|
// The target of the symbolic link may not be a file.
|
||||||
|
func ReadSymlinkedDirectory(path string) (string, error) {
|
||||||
|
var realPath string
|
||||||
|
var err error
|
||||||
|
if realPath, err = filepath.Abs(path); err != nil {
|
||||||
|
return "", fmt.Errorf("unable to get absolute path for %s: %s", path, err)
|
||||||
|
}
|
||||||
|
if realPath, err = filepath.EvalSymlinks(realPath); err != nil {
|
||||||
|
return "", fmt.Errorf("failed to canonicalise path for %s: %s", path, err)
|
||||||
|
}
|
||||||
|
realPathInfo, err := os.Stat(realPath)
|
||||||
|
if err != nil {
|
||||||
|
return "", fmt.Errorf("failed to stat target '%s' of '%s': %s", realPath, path, err)
|
||||||
|
}
|
||||||
|
if !realPathInfo.Mode().IsDir() {
|
||||||
|
return "", fmt.Errorf("canonical path points to a file '%s'", realPath)
|
||||||
|
}
|
||||||
|
return realPath, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// CreateIfNotExists creates a file or a directory only if it does not already exist.
|
||||||
|
func CreateIfNotExists(path string, isDir bool) error {
|
||||||
|
if _, err := os.Stat(path); err != nil {
|
||||||
|
if os.IsNotExist(err) {
|
||||||
|
if isDir {
|
||||||
|
return os.MkdirAll(path, 0755)
|
||||||
|
}
|
||||||
|
if err := os.MkdirAll(filepath.Dir(path), 0755); err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
f, err := os.OpenFile(path, os.O_CREATE, 0755)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
f.Close()
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return nil
|
||||||
|
}
|
|
@ -0,0 +1,7 @@
|
||||||
|
package fileutils
|
||||||
|
|
||||||
|
// GetTotalUsedFds Returns the number of used File Descriptors.
|
||||||
|
// On Solaris these limits are per process and not systemwide
|
||||||
|
func GetTotalUsedFds() int {
|
||||||
|
return -1
|
||||||
|
}
|
|
@ -0,0 +1,22 @@
|
||||||
|
// +build linux freebsd
|
||||||
|
|
||||||
|
package fileutils
|
||||||
|
|
||||||
|
import (
|
||||||
|
"fmt"
|
||||||
|
"io/ioutil"
|
||||||
|
"os"
|
||||||
|
|
||||||
|
"github.com/Sirupsen/logrus"
|
||||||
|
)
|
||||||
|
|
||||||
|
// GetTotalUsedFds Returns the number of used File Descriptors by
|
||||||
|
// reading it via /proc filesystem.
|
||||||
|
func GetTotalUsedFds() int {
|
||||||
|
if fds, err := ioutil.ReadDir(fmt.Sprintf("/proc/%d/fd", os.Getpid())); err != nil {
|
||||||
|
logrus.Errorf("Error opening /proc/%d/fd: %s", os.Getpid(), err)
|
||||||
|
} else {
|
||||||
|
return len(fds)
|
||||||
|
}
|
||||||
|
return -1
|
||||||
|
}
|
|
@ -0,0 +1,7 @@
|
||||||
|
package fileutils
|
||||||
|
|
||||||
|
// GetTotalUsedFds Returns the number of used File Descriptors. Not supported
|
||||||
|
// on Windows.
|
||||||
|
func GetTotalUsedFds() int {
|
||||||
|
return -1
|
||||||
|
}
|
|
@ -0,0 +1,39 @@
|
||||||
|
package homedir
|
||||||
|
|
||||||
|
import (
|
||||||
|
"os"
|
||||||
|
"runtime"
|
||||||
|
|
||||||
|
"github.com/opencontainers/runc/libcontainer/user"
|
||||||
|
)
|
||||||
|
|
||||||
|
// Key returns the env var name for the user's home dir based on
|
||||||
|
// the platform being run on
|
||||||
|
func Key() string {
|
||||||
|
if runtime.GOOS == "windows" {
|
||||||
|
return "USERPROFILE"
|
||||||
|
}
|
||||||
|
return "HOME"
|
||||||
|
}
|
||||||
|
|
||||||
|
// Get returns the home directory of the current user with the help of
|
||||||
|
// environment variables depending on the target operating system.
|
||||||
|
// Returned path should be used with "path/filepath" to form new paths.
|
||||||
|
func Get() string {
|
||||||
|
home := os.Getenv(Key())
|
||||||
|
if home == "" && runtime.GOOS != "windows" {
|
||||||
|
if u, err := user.CurrentUser(); err == nil {
|
||||||
|
return u.Home
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return home
|
||||||
|
}
|
||||||
|
|
||||||
|
// GetShortcutString returns the string that is shortcut to user's home directory
|
||||||
|
// in the native shell of the platform running on.
|
||||||
|
func GetShortcutString() string {
|
||||||
|
if runtime.GOOS == "windows" {
|
||||||
|
return "%USERPROFILE%" // be careful while using in format functions
|
||||||
|
}
|
||||||
|
return "~"
|
||||||
|
}
|
|
@ -0,0 +1,197 @@
|
||||||
|
package idtools
|
||||||
|
|
||||||
|
import (
|
||||||
|
"bufio"
|
||||||
|
"fmt"
|
||||||
|
"os"
|
||||||
|
"sort"
|
||||||
|
"strconv"
|
||||||
|
"strings"
|
||||||
|
)
|
||||||
|
|
||||||
|
// IDMap contains a single entry for user namespace range remapping. An array
|
||||||
|
// of IDMap entries represents the structure that will be provided to the Linux
|
||||||
|
// kernel for creating a user namespace.
|
||||||
|
type IDMap struct {
|
||||||
|
ContainerID int `json:"container_id"`
|
||||||
|
HostID int `json:"host_id"`
|
||||||
|
Size int `json:"size"`
|
||||||
|
}
|
||||||
|
|
||||||
|
type subIDRange struct {
|
||||||
|
Start int
|
||||||
|
Length int
|
||||||
|
}
|
||||||
|
|
||||||
|
type ranges []subIDRange
|
||||||
|
|
||||||
|
func (e ranges) Len() int { return len(e) }
|
||||||
|
func (e ranges) Swap(i, j int) { e[i], e[j] = e[j], e[i] }
|
||||||
|
func (e ranges) Less(i, j int) bool { return e[i].Start < e[j].Start }
|
||||||
|
|
||||||
|
const (
|
||||||
|
subuidFileName string = "/etc/subuid"
|
||||||
|
subgidFileName string = "/etc/subgid"
|
||||||
|
)
|
||||||
|
|
||||||
|
// MkdirAllAs creates a directory (include any along the path) and then modifies
|
||||||
|
// ownership to the requested uid/gid. If the directory already exists, this
|
||||||
|
// function will still change ownership to the requested uid/gid pair.
|
||||||
|
func MkdirAllAs(path string, mode os.FileMode, ownerUID, ownerGID int) error {
|
||||||
|
return mkdirAs(path, mode, ownerUID, ownerGID, true, true)
|
||||||
|
}
|
||||||
|
|
||||||
|
// MkdirAllNewAs creates a directory (include any along the path) and then modifies
|
||||||
|
// ownership ONLY of newly created directories to the requested uid/gid. If the
|
||||||
|
// directories along the path exist, no change of ownership will be performed
|
||||||
|
func MkdirAllNewAs(path string, mode os.FileMode, ownerUID, ownerGID int) error {
|
||||||
|
return mkdirAs(path, mode, ownerUID, ownerGID, true, false)
|
||||||
|
}
|
||||||
|
|
||||||
|
// MkdirAs creates a directory and then modifies ownership to the requested uid/gid.
|
||||||
|
// If the directory already exists, this function still changes ownership
|
||||||
|
func MkdirAs(path string, mode os.FileMode, ownerUID, ownerGID int) error {
|
||||||
|
return mkdirAs(path, mode, ownerUID, ownerGID, false, true)
|
||||||
|
}
|
||||||
|
|
||||||
|
// GetRootUIDGID retrieves the remapped root uid/gid pair from the set of maps.
|
||||||
|
// If the maps are empty, then the root uid/gid will default to "real" 0/0
|
||||||
|
func GetRootUIDGID(uidMap, gidMap []IDMap) (int, int, error) {
|
||||||
|
var uid, gid int
|
||||||
|
|
||||||
|
if uidMap != nil {
|
||||||
|
xUID, err := ToHost(0, uidMap)
|
||||||
|
if err != nil {
|
||||||
|
return -1, -1, err
|
||||||
|
}
|
||||||
|
uid = xUID
|
||||||
|
}
|
||||||
|
if gidMap != nil {
|
||||||
|
xGID, err := ToHost(0, gidMap)
|
||||||
|
if err != nil {
|
||||||
|
return -1, -1, err
|
||||||
|
}
|
||||||
|
gid = xGID
|
||||||
|
}
|
||||||
|
return uid, gid, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// ToContainer takes an id mapping, and uses it to translate a
|
||||||
|
// host ID to the remapped ID. If no map is provided, then the translation
|
||||||
|
// assumes a 1-to-1 mapping and returns the passed in id
|
||||||
|
func ToContainer(hostID int, idMap []IDMap) (int, error) {
|
||||||
|
if idMap == nil {
|
||||||
|
return hostID, nil
|
||||||
|
}
|
||||||
|
for _, m := range idMap {
|
||||||
|
if (hostID >= m.HostID) && (hostID <= (m.HostID + m.Size - 1)) {
|
||||||
|
contID := m.ContainerID + (hostID - m.HostID)
|
||||||
|
return contID, nil
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return -1, fmt.Errorf("Host ID %d cannot be mapped to a container ID", hostID)
|
||||||
|
}
|
||||||
|
|
||||||
|
// ToHost takes an id mapping and a remapped ID, and translates the
|
||||||
|
// ID to the mapped host ID. If no map is provided, then the translation
|
||||||
|
// assumes a 1-to-1 mapping and returns the passed in id #
|
||||||
|
func ToHost(contID int, idMap []IDMap) (int, error) {
|
||||||
|
if idMap == nil {
|
||||||
|
return contID, nil
|
||||||
|
}
|
||||||
|
for _, m := range idMap {
|
||||||
|
if (contID >= m.ContainerID) && (contID <= (m.ContainerID + m.Size - 1)) {
|
||||||
|
hostID := m.HostID + (contID - m.ContainerID)
|
||||||
|
return hostID, nil
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return -1, fmt.Errorf("Container ID %d cannot be mapped to a host ID", contID)
|
||||||
|
}
|
||||||
|
|
||||||
|
// CreateIDMappings takes a requested user and group name and
|
||||||
|
// using the data from /etc/sub{uid,gid} ranges, creates the
|
||||||
|
// proper uid and gid remapping ranges for that user/group pair
|
||||||
|
func CreateIDMappings(username, groupname string) ([]IDMap, []IDMap, error) {
|
||||||
|
subuidRanges, err := parseSubuid(username)
|
||||||
|
if err != nil {
|
||||||
|
return nil, nil, err
|
||||||
|
}
|
||||||
|
subgidRanges, err := parseSubgid(groupname)
|
||||||
|
if err != nil {
|
||||||
|
return nil, nil, err
|
||||||
|
}
|
||||||
|
if len(subuidRanges) == 0 {
|
||||||
|
return nil, nil, fmt.Errorf("No subuid ranges found for user %q", username)
|
||||||
|
}
|
||||||
|
if len(subgidRanges) == 0 {
|
||||||
|
return nil, nil, fmt.Errorf("No subgid ranges found for group %q", groupname)
|
||||||
|
}
|
||||||
|
|
||||||
|
return createIDMap(subuidRanges), createIDMap(subgidRanges), nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func createIDMap(subidRanges ranges) []IDMap {
|
||||||
|
idMap := []IDMap{}
|
||||||
|
|
||||||
|
// sort the ranges by lowest ID first
|
||||||
|
sort.Sort(subidRanges)
|
||||||
|
containerID := 0
|
||||||
|
for _, idrange := range subidRanges {
|
||||||
|
idMap = append(idMap, IDMap{
|
||||||
|
ContainerID: containerID,
|
||||||
|
HostID: idrange.Start,
|
||||||
|
Size: idrange.Length,
|
||||||
|
})
|
||||||
|
containerID = containerID + idrange.Length
|
||||||
|
}
|
||||||
|
return idMap
|
||||||
|
}
|
||||||
|
|
||||||
|
func parseSubuid(username string) (ranges, error) {
|
||||||
|
return parseSubidFile(subuidFileName, username)
|
||||||
|
}
|
||||||
|
|
||||||
|
func parseSubgid(username string) (ranges, error) {
|
||||||
|
return parseSubidFile(subgidFileName, username)
|
||||||
|
}
|
||||||
|
|
||||||
|
// parseSubidFile will read the appropriate file (/etc/subuid or /etc/subgid)
|
||||||
|
// and return all found ranges for a specified username. If the special value
|
||||||
|
// "ALL" is supplied for username, then all ranges in the file will be returned
|
||||||
|
func parseSubidFile(path, username string) (ranges, error) {
|
||||||
|
var rangeList ranges
|
||||||
|
|
||||||
|
subidFile, err := os.Open(path)
|
||||||
|
if err != nil {
|
||||||
|
return rangeList, err
|
||||||
|
}
|
||||||
|
defer subidFile.Close()
|
||||||
|
|
||||||
|
s := bufio.NewScanner(subidFile)
|
||||||
|
for s.Scan() {
|
||||||
|
if err := s.Err(); err != nil {
|
||||||
|
return rangeList, err
|
||||||
|
}
|
||||||
|
|
||||||
|
text := strings.TrimSpace(s.Text())
|
||||||
|
if text == "" || strings.HasPrefix(text, "#") {
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
parts := strings.Split(text, ":")
|
||||||
|
if len(parts) != 3 {
|
||||||
|
return rangeList, fmt.Errorf("Cannot parse subuid/gid information: Format not correct for %s file", path)
|
||||||
|
}
|
||||||
|
if parts[0] == username || username == "ALL" {
|
||||||
|
startid, err := strconv.Atoi(parts[1])
|
||||||
|
if err != nil {
|
||||||
|
return rangeList, fmt.Errorf("String to int conversion failed during subuid/gid parsing of %s: %v", path, err)
|
||||||
|
}
|
||||||
|
length, err := strconv.Atoi(parts[2])
|
||||||
|
if err != nil {
|
||||||
|
return rangeList, fmt.Errorf("String to int conversion failed during subuid/gid parsing of %s: %v", path, err)
|
||||||
|
}
|
||||||
|
rangeList = append(rangeList, subIDRange{startid, length})
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return rangeList, nil
|
||||||
|
}
|
|
@ -0,0 +1,60 @@
|
||||||
|
// +build !windows
|
||||||
|
|
||||||
|
package idtools
|
||||||
|
|
||||||
|
import (
|
||||||
|
"os"
|
||||||
|
"path/filepath"
|
||||||
|
|
||||||
|
"github.com/docker/docker/pkg/system"
|
||||||
|
)
|
||||||
|
|
||||||
|
func mkdirAs(path string, mode os.FileMode, ownerUID, ownerGID int, mkAll, chownExisting bool) error {
|
||||||
|
// make an array containing the original path asked for, plus (for mkAll == true)
|
||||||
|
// all path components leading up to the complete path that don't exist before we MkdirAll
|
||||||
|
// so that we can chown all of them properly at the end. If chownExisting is false, we won't
|
||||||
|
// chown the full directory path if it exists
|
||||||
|
var paths []string
|
||||||
|
if _, err := os.Stat(path); err != nil && os.IsNotExist(err) {
|
||||||
|
paths = []string{path}
|
||||||
|
} else if err == nil && chownExisting {
|
||||||
|
if err := os.Chown(path, ownerUID, ownerGID); err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
// short-circuit--we were called with an existing directory and chown was requested
|
||||||
|
return nil
|
||||||
|
} else if err == nil {
|
||||||
|
// nothing to do; directory path fully exists already and chown was NOT requested
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
if mkAll {
|
||||||
|
// walk back to "/" looking for directories which do not exist
|
||||||
|
// and add them to the paths array for chown after creation
|
||||||
|
dirPath := path
|
||||||
|
for {
|
||||||
|
dirPath = filepath.Dir(dirPath)
|
||||||
|
if dirPath == "/" {
|
||||||
|
break
|
||||||
|
}
|
||||||
|
if _, err := os.Stat(dirPath); err != nil && os.IsNotExist(err) {
|
||||||
|
paths = append(paths, dirPath)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
if err := system.MkdirAll(path, mode); err != nil && !os.IsExist(err) {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
} else {
|
||||||
|
if err := os.Mkdir(path, mode); err != nil && !os.IsExist(err) {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
}
|
||||||
|
// even if it existed, we will chown the requested path + any subpaths that
|
||||||
|
// didn't exist when we called MkdirAll
|
||||||
|
for _, pathComponent := range paths {
|
||||||
|
if err := os.Chown(pathComponent, ownerUID, ownerGID); err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return nil
|
||||||
|
}
|
|
@ -0,0 +1,18 @@
|
||||||
|
// +build windows
|
||||||
|
|
||||||
|
package idtools
|
||||||
|
|
||||||
|
import (
|
||||||
|
"os"
|
||||||
|
|
||||||
|
"github.com/docker/docker/pkg/system"
|
||||||
|
)
|
||||||
|
|
||||||
|
// Platforms such as Windows do not support the UID/GID concept. So make this
|
||||||
|
// just a wrapper around system.MkdirAll.
|
||||||
|
func mkdirAs(path string, mode os.FileMode, ownerUID, ownerGID int, mkAll, chownExisting bool) error {
|
||||||
|
if err := system.MkdirAll(path, mode); err != nil && !os.IsExist(err) {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
return nil
|
||||||
|
}
|
188
vendor/github.com/docker/docker/pkg/idtools/usergroupadd_linux.go
generated
vendored
Normal file
188
vendor/github.com/docker/docker/pkg/idtools/usergroupadd_linux.go
generated
vendored
Normal file
|
@ -0,0 +1,188 @@
|
||||||
|
package idtools
|
||||||
|
|
||||||
|
import (
|
||||||
|
"fmt"
|
||||||
|
"os/exec"
|
||||||
|
"path/filepath"
|
||||||
|
"regexp"
|
||||||
|
"sort"
|
||||||
|
"strconv"
|
||||||
|
"strings"
|
||||||
|
"sync"
|
||||||
|
)
|
||||||
|
|
||||||
|
// add a user and/or group to Linux /etc/passwd, /etc/group using standard
|
||||||
|
// Linux distribution commands:
|
||||||
|
// adduser --system --shell /bin/false --disabled-login --disabled-password --no-create-home --group <username>
|
||||||
|
// useradd -r -s /bin/false <username>
|
||||||
|
|
||||||
|
var (
|
||||||
|
once sync.Once
|
||||||
|
userCommand string
|
||||||
|
|
||||||
|
cmdTemplates = map[string]string{
|
||||||
|
"adduser": "--system --shell /bin/false --no-create-home --disabled-login --disabled-password --group %s",
|
||||||
|
"useradd": "-r -s /bin/false %s",
|
||||||
|
"usermod": "-%s %d-%d %s",
|
||||||
|
}
|
||||||
|
|
||||||
|
idOutRegexp = regexp.MustCompile(`uid=([0-9]+).*gid=([0-9]+)`)
|
||||||
|
// default length for a UID/GID subordinate range
|
||||||
|
defaultRangeLen = 65536
|
||||||
|
defaultRangeStart = 100000
|
||||||
|
userMod = "usermod"
|
||||||
|
)
|
||||||
|
|
||||||
|
func resolveBinary(binname string) (string, error) {
|
||||||
|
binaryPath, err := exec.LookPath(binname)
|
||||||
|
if err != nil {
|
||||||
|
return "", err
|
||||||
|
}
|
||||||
|
resolvedPath, err := filepath.EvalSymlinks(binaryPath)
|
||||||
|
if err != nil {
|
||||||
|
return "", err
|
||||||
|
}
|
||||||
|
//only return no error if the final resolved binary basename
|
||||||
|
//matches what was searched for
|
||||||
|
if filepath.Base(resolvedPath) == binname {
|
||||||
|
return resolvedPath, nil
|
||||||
|
}
|
||||||
|
return "", fmt.Errorf("Binary %q does not resolve to a binary of that name in $PATH (%q)", binname, resolvedPath)
|
||||||
|
}
|
||||||
|
|
||||||
|
// AddNamespaceRangesUser takes a username and uses the standard system
|
||||||
|
// utility to create a system user/group pair used to hold the
|
||||||
|
// /etc/sub{uid,gid} ranges which will be used for user namespace
|
||||||
|
// mapping ranges in containers.
|
||||||
|
func AddNamespaceRangesUser(name string) (int, int, error) {
|
||||||
|
if err := addUser(name); err != nil {
|
||||||
|
return -1, -1, fmt.Errorf("Error adding user %q: %v", name, err)
|
||||||
|
}
|
||||||
|
|
||||||
|
// Query the system for the created uid and gid pair
|
||||||
|
out, err := execCmd("id", name)
|
||||||
|
if err != nil {
|
||||||
|
return -1, -1, fmt.Errorf("Error trying to find uid/gid for new user %q: %v", name, err)
|
||||||
|
}
|
||||||
|
matches := idOutRegexp.FindStringSubmatch(strings.TrimSpace(string(out)))
|
||||||
|
if len(matches) != 3 {
|
||||||
|
return -1, -1, fmt.Errorf("Can't find uid, gid from `id` output: %q", string(out))
|
||||||
|
}
|
||||||
|
uid, err := strconv.Atoi(matches[1])
|
||||||
|
if err != nil {
|
||||||
|
return -1, -1, fmt.Errorf("Can't convert found uid (%s) to int: %v", matches[1], err)
|
||||||
|
}
|
||||||
|
gid, err := strconv.Atoi(matches[2])
|
||||||
|
if err != nil {
|
||||||
|
return -1, -1, fmt.Errorf("Can't convert found gid (%s) to int: %v", matches[2], err)
|
||||||
|
}
|
||||||
|
|
||||||
|
// Now we need to create the subuid/subgid ranges for our new user/group (system users
|
||||||
|
// do not get auto-created ranges in subuid/subgid)
|
||||||
|
|
||||||
|
if err := createSubordinateRanges(name); err != nil {
|
||||||
|
return -1, -1, fmt.Errorf("Couldn't create subordinate ID ranges: %v", err)
|
||||||
|
}
|
||||||
|
return uid, gid, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func addUser(userName string) error {
|
||||||
|
once.Do(func() {
|
||||||
|
// set up which commands are used for adding users/groups dependent on distro
|
||||||
|
if _, err := resolveBinary("adduser"); err == nil {
|
||||||
|
userCommand = "adduser"
|
||||||
|
} else if _, err := resolveBinary("useradd"); err == nil {
|
||||||
|
userCommand = "useradd"
|
||||||
|
}
|
||||||
|
})
|
||||||
|
if userCommand == "" {
|
||||||
|
return fmt.Errorf("Cannot add user; no useradd/adduser binary found")
|
||||||
|
}
|
||||||
|
args := fmt.Sprintf(cmdTemplates[userCommand], userName)
|
||||||
|
out, err := execCmd(userCommand, args)
|
||||||
|
if err != nil {
|
||||||
|
return fmt.Errorf("Failed to add user with error: %v; output: %q", err, string(out))
|
||||||
|
}
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func createSubordinateRanges(name string) error {
|
||||||
|
|
||||||
|
// first, we should verify that ranges weren't automatically created
|
||||||
|
// by the distro tooling
|
||||||
|
ranges, err := parseSubuid(name)
|
||||||
|
if err != nil {
|
||||||
|
return fmt.Errorf("Error while looking for subuid ranges for user %q: %v", name, err)
|
||||||
|
}
|
||||||
|
if len(ranges) == 0 {
|
||||||
|
// no UID ranges; let's create one
|
||||||
|
startID, err := findNextUIDRange()
|
||||||
|
if err != nil {
|
||||||
|
return fmt.Errorf("Can't find available subuid range: %v", err)
|
||||||
|
}
|
||||||
|
out, err := execCmd(userMod, fmt.Sprintf(cmdTemplates[userMod], "v", startID, startID+defaultRangeLen-1, name))
|
||||||
|
if err != nil {
|
||||||
|
return fmt.Errorf("Unable to add subuid range to user: %q; output: %s, err: %v", name, out, err)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
ranges, err = parseSubgid(name)
|
||||||
|
if err != nil {
|
||||||
|
return fmt.Errorf("Error while looking for subgid ranges for user %q: %v", name, err)
|
||||||
|
}
|
||||||
|
if len(ranges) == 0 {
|
||||||
|
// no GID ranges; let's create one
|
||||||
|
startID, err := findNextGIDRange()
|
||||||
|
if err != nil {
|
||||||
|
return fmt.Errorf("Can't find available subgid range: %v", err)
|
||||||
|
}
|
||||||
|
out, err := execCmd(userMod, fmt.Sprintf(cmdTemplates[userMod], "w", startID, startID+defaultRangeLen-1, name))
|
||||||
|
if err != nil {
|
||||||
|
return fmt.Errorf("Unable to add subgid range to user: %q; output: %s, err: %v", name, out, err)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func findNextUIDRange() (int, error) {
|
||||||
|
ranges, err := parseSubuid("ALL")
|
||||||
|
if err != nil {
|
||||||
|
return -1, fmt.Errorf("Couldn't parse all ranges in /etc/subuid file: %v", err)
|
||||||
|
}
|
||||||
|
sort.Sort(ranges)
|
||||||
|
return findNextRangeStart(ranges)
|
||||||
|
}
|
||||||
|
|
||||||
|
func findNextGIDRange() (int, error) {
|
||||||
|
ranges, err := parseSubgid("ALL")
|
||||||
|
if err != nil {
|
||||||
|
return -1, fmt.Errorf("Couldn't parse all ranges in /etc/subgid file: %v", err)
|
||||||
|
}
|
||||||
|
sort.Sort(ranges)
|
||||||
|
return findNextRangeStart(ranges)
|
||||||
|
}
|
||||||
|
|
||||||
|
func findNextRangeStart(rangeList ranges) (int, error) {
|
||||||
|
startID := defaultRangeStart
|
||||||
|
for _, arange := range rangeList {
|
||||||
|
if wouldOverlap(arange, startID) {
|
||||||
|
startID = arange.Start + arange.Length
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return startID, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func wouldOverlap(arange subIDRange, ID int) bool {
|
||||||
|
low := ID
|
||||||
|
high := ID + defaultRangeLen
|
||||||
|
if (low >= arange.Start && low <= arange.Start+arange.Length) ||
|
||||||
|
(high <= arange.Start+arange.Length && high >= arange.Start) {
|
||||||
|
return true
|
||||||
|
}
|
||||||
|
return false
|
||||||
|
}
|
||||||
|
|
||||||
|
func execCmd(cmd, args string) ([]byte, error) {
|
||||||
|
execCmd := exec.Command(cmd, strings.Split(args, " ")...)
|
||||||
|
return execCmd.CombinedOutput()
|
||||||
|
}
|
12
vendor/github.com/docker/docker/pkg/idtools/usergroupadd_unsupported.go
generated
vendored
Normal file
12
vendor/github.com/docker/docker/pkg/idtools/usergroupadd_unsupported.go
generated
vendored
Normal file
|
@ -0,0 +1,12 @@
|
||||||
|
// +build !linux
|
||||||
|
|
||||||
|
package idtools
|
||||||
|
|
||||||
|
import "fmt"
|
||||||
|
|
||||||
|
// AddNamespaceRangesUser takes a name and finds an unused uid, gid pair
|
||||||
|
// and calls the appropriate helper function to add the group and then
|
||||||
|
// the user to the group in /etc/group and /etc/passwd respectively.
|
||||||
|
func AddNamespaceRangesUser(name string) (int, int, error) {
|
||||||
|
return -1, -1, fmt.Errorf("No support for adding users or groups on this OS")
|
||||||
|
}
|
|
@ -0,0 +1,51 @@
|
||||||
|
package ioutils
|
||||||
|
|
||||||
|
import (
|
||||||
|
"errors"
|
||||||
|
"io"
|
||||||
|
)
|
||||||
|
|
||||||
|
var errBufferFull = errors.New("buffer is full")
|
||||||
|
|
||||||
|
type fixedBuffer struct {
|
||||||
|
buf []byte
|
||||||
|
pos int
|
||||||
|
lastRead int
|
||||||
|
}
|
||||||
|
|
||||||
|
func (b *fixedBuffer) Write(p []byte) (int, error) {
|
||||||
|
n := copy(b.buf[b.pos:cap(b.buf)], p)
|
||||||
|
b.pos += n
|
||||||
|
|
||||||
|
if n < len(p) {
|
||||||
|
if b.pos == cap(b.buf) {
|
||||||
|
return n, errBufferFull
|
||||||
|
}
|
||||||
|
return n, io.ErrShortWrite
|
||||||
|
}
|
||||||
|
return n, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func (b *fixedBuffer) Read(p []byte) (int, error) {
|
||||||
|
n := copy(p, b.buf[b.lastRead:b.pos])
|
||||||
|
b.lastRead += n
|
||||||
|
return n, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func (b *fixedBuffer) Len() int {
|
||||||
|
return b.pos - b.lastRead
|
||||||
|
}
|
||||||
|
|
||||||
|
func (b *fixedBuffer) Cap() int {
|
||||||
|
return cap(b.buf)
|
||||||
|
}
|
||||||
|
|
||||||
|
func (b *fixedBuffer) Reset() {
|
||||||
|
b.pos = 0
|
||||||
|
b.lastRead = 0
|
||||||
|
b.buf = b.buf[:0]
|
||||||
|
}
|
||||||
|
|
||||||
|
func (b *fixedBuffer) String() string {
|
||||||
|
return string(b.buf[b.lastRead:b.pos])
|
||||||
|
}
|
|
@ -0,0 +1,185 @@
|
||||||
|
package ioutils
|
||||||
|
|
||||||
|
import (
|
||||||
|
"errors"
|
||||||
|
"io"
|
||||||
|
"sync"
|
||||||
|
)
|
||||||
|
|
||||||
|
// maxCap is the highest capacity to use in byte slices that buffer data.
|
||||||
|
const maxCap = 1e6
|
||||||
|
|
||||||
|
// minCap is the lowest capacity to use in byte slices that buffer data
|
||||||
|
const minCap = 64
|
||||||
|
|
||||||
|
// blockThreshold is the minimum number of bytes in the buffer which will cause
|
||||||
|
// a write to BytesPipe to block when allocating a new slice.
|
||||||
|
const blockThreshold = 1e6
|
||||||
|
|
||||||
|
var (
|
||||||
|
// ErrClosed is returned when Write is called on a closed BytesPipe.
|
||||||
|
ErrClosed = errors.New("write to closed BytesPipe")
|
||||||
|
|
||||||
|
bufPools = make(map[int]*sync.Pool)
|
||||||
|
bufPoolsLock sync.Mutex
|
||||||
|
)
|
||||||
|
|
||||||
|
// BytesPipe is io.ReadWriteCloser which works similarly to pipe(queue).
|
||||||
|
// All written data may be read at most once. Also, BytesPipe allocates
|
||||||
|
// and releases new byte slices to adjust to current needs, so the buffer
|
||||||
|
// won't be overgrown after peak loads.
|
||||||
|
type BytesPipe struct {
|
||||||
|
mu sync.Mutex
|
||||||
|
wait *sync.Cond
|
||||||
|
buf []*fixedBuffer
|
||||||
|
bufLen int
|
||||||
|
closeErr error // error to return from next Read. set to nil if not closed.
|
||||||
|
}
|
||||||
|
|
||||||
|
// NewBytesPipe creates new BytesPipe, initialized by specified slice.
|
||||||
|
// If buf is nil, then it will be initialized with slice which cap is 64.
|
||||||
|
// buf will be adjusted in a way that len(buf) == 0, cap(buf) == cap(buf).
|
||||||
|
func NewBytesPipe() *BytesPipe {
|
||||||
|
bp := &BytesPipe{}
|
||||||
|
bp.buf = append(bp.buf, getBuffer(minCap))
|
||||||
|
bp.wait = sync.NewCond(&bp.mu)
|
||||||
|
return bp
|
||||||
|
}
|
||||||
|
|
||||||
|
// Write writes p to BytesPipe.
|
||||||
|
// It can allocate new []byte slices in a process of writing.
|
||||||
|
func (bp *BytesPipe) Write(p []byte) (int, error) {
|
||||||
|
bp.mu.Lock()
|
||||||
|
|
||||||
|
written := 0
|
||||||
|
loop0:
|
||||||
|
for {
|
||||||
|
if bp.closeErr != nil {
|
||||||
|
bp.mu.Unlock()
|
||||||
|
return written, ErrClosed
|
||||||
|
}
|
||||||
|
|
||||||
|
if len(bp.buf) == 0 {
|
||||||
|
bp.buf = append(bp.buf, getBuffer(64))
|
||||||
|
}
|
||||||
|
// get the last buffer
|
||||||
|
b := bp.buf[len(bp.buf)-1]
|
||||||
|
|
||||||
|
n, err := b.Write(p)
|
||||||
|
written += n
|
||||||
|
bp.bufLen += n
|
||||||
|
|
||||||
|
// errBufferFull is an error we expect to get if the buffer is full
|
||||||
|
if err != nil && err != errBufferFull {
|
||||||
|
bp.wait.Broadcast()
|
||||||
|
bp.mu.Unlock()
|
||||||
|
return written, err
|
||||||
|
}
|
||||||
|
|
||||||
|
// if there was enough room to write all then break
|
||||||
|
if len(p) == n {
|
||||||
|
break
|
||||||
|
}
|
||||||
|
|
||||||
|
// more data: write to the next slice
|
||||||
|
p = p[n:]
|
||||||
|
|
||||||
|
// make sure the buffer doesn't grow too big from this write
|
||||||
|
for bp.bufLen >= blockThreshold {
|
||||||
|
bp.wait.Wait()
|
||||||
|
if bp.closeErr != nil {
|
||||||
|
continue loop0
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// add new byte slice to the buffers slice and continue writing
|
||||||
|
nextCap := b.Cap() * 2
|
||||||
|
if nextCap > maxCap {
|
||||||
|
nextCap = maxCap
|
||||||
|
}
|
||||||
|
bp.buf = append(bp.buf, getBuffer(nextCap))
|
||||||
|
}
|
||||||
|
bp.wait.Broadcast()
|
||||||
|
bp.mu.Unlock()
|
||||||
|
return written, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// CloseWithError causes further reads from a BytesPipe to return immediately.
|
||||||
|
func (bp *BytesPipe) CloseWithError(err error) error {
|
||||||
|
bp.mu.Lock()
|
||||||
|
if err != nil {
|
||||||
|
bp.closeErr = err
|
||||||
|
} else {
|
||||||
|
bp.closeErr = io.EOF
|
||||||
|
}
|
||||||
|
bp.wait.Broadcast()
|
||||||
|
bp.mu.Unlock()
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// Close causes further reads from a BytesPipe to return immediately.
|
||||||
|
func (bp *BytesPipe) Close() error {
|
||||||
|
return bp.CloseWithError(nil)
|
||||||
|
}
|
||||||
|
|
||||||
|
// Read reads bytes from BytesPipe.
|
||||||
|
// Data could be read only once.
|
||||||
|
func (bp *BytesPipe) Read(p []byte) (n int, err error) {
|
||||||
|
bp.mu.Lock()
|
||||||
|
if bp.bufLen == 0 {
|
||||||
|
if bp.closeErr != nil {
|
||||||
|
bp.mu.Unlock()
|
||||||
|
return 0, bp.closeErr
|
||||||
|
}
|
||||||
|
bp.wait.Wait()
|
||||||
|
if bp.bufLen == 0 && bp.closeErr != nil {
|
||||||
|
bp.mu.Unlock()
|
||||||
|
return 0, bp.closeErr
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
for bp.bufLen > 0 {
|
||||||
|
b := bp.buf[0]
|
||||||
|
read, _ := b.Read(p) // ignore error since fixedBuffer doesn't really return an error
|
||||||
|
n += read
|
||||||
|
bp.bufLen -= read
|
||||||
|
|
||||||
|
if b.Len() == 0 {
|
||||||
|
// it's empty so return it to the pool and move to the next one
|
||||||
|
returnBuffer(b)
|
||||||
|
bp.buf[0] = nil
|
||||||
|
bp.buf = bp.buf[1:]
|
||||||
|
}
|
||||||
|
|
||||||
|
if len(p) == read {
|
||||||
|
break
|
||||||
|
}
|
||||||
|
|
||||||
|
p = p[read:]
|
||||||
|
}
|
||||||
|
|
||||||
|
bp.wait.Broadcast()
|
||||||
|
bp.mu.Unlock()
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
func returnBuffer(b *fixedBuffer) {
|
||||||
|
b.Reset()
|
||||||
|
bufPoolsLock.Lock()
|
||||||
|
pool := bufPools[b.Cap()]
|
||||||
|
bufPoolsLock.Unlock()
|
||||||
|
if pool != nil {
|
||||||
|
pool.Put(b)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func getBuffer(size int) *fixedBuffer {
|
||||||
|
bufPoolsLock.Lock()
|
||||||
|
pool, ok := bufPools[size]
|
||||||
|
if !ok {
|
||||||
|
pool = &sync.Pool{New: func() interface{} { return &fixedBuffer{buf: make([]byte, 0, size)} }}
|
||||||
|
bufPools[size] = pool
|
||||||
|
}
|
||||||
|
bufPoolsLock.Unlock()
|
||||||
|
return pool.Get().(*fixedBuffer)
|
||||||
|
}
|
|
@ -0,0 +1,22 @@
|
||||||
|
package ioutils
|
||||||
|
|
||||||
|
import (
|
||||||
|
"fmt"
|
||||||
|
"io"
|
||||||
|
)
|
||||||
|
|
||||||
|
// FprintfIfNotEmpty prints the string value if it's not empty
|
||||||
|
func FprintfIfNotEmpty(w io.Writer, format, value string) (int, error) {
|
||||||
|
if value != "" {
|
||||||
|
return fmt.Fprintf(w, format, value)
|
||||||
|
}
|
||||||
|
return 0, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// FprintfIfTrue prints the boolean value if it's true
|
||||||
|
func FprintfIfTrue(w io.Writer, format string, ok bool) (int, error) {
|
||||||
|
if ok {
|
||||||
|
return fmt.Fprintf(w, format, ok)
|
||||||
|
}
|
||||||
|
return 0, nil
|
||||||
|
}
|
|
@ -0,0 +1,75 @@
|
||||||
|
package ioutils
|
||||||
|
|
||||||
|
import (
|
||||||
|
"io"
|
||||||
|
"io/ioutil"
|
||||||
|
"os"
|
||||||
|
"path/filepath"
|
||||||
|
)
|
||||||
|
|
||||||
|
// NewAtomicFileWriter returns WriteCloser so that writing to it writes to a
|
||||||
|
// temporary file and closing it atomically changes the temporary file to
|
||||||
|
// destination path. Writing and closing concurrently is not allowed.
|
||||||
|
func NewAtomicFileWriter(filename string, perm os.FileMode) (io.WriteCloser, error) {
|
||||||
|
f, err := ioutil.TempFile(filepath.Dir(filename), ".tmp-"+filepath.Base(filename))
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
abspath, err := filepath.Abs(filename)
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
return &atomicFileWriter{
|
||||||
|
f: f,
|
||||||
|
fn: abspath,
|
||||||
|
}, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// AtomicWriteFile atomically writes data to a file named by filename.
|
||||||
|
func AtomicWriteFile(filename string, data []byte, perm os.FileMode) error {
|
||||||
|
f, err := NewAtomicFileWriter(filename, perm)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
n, err := f.Write(data)
|
||||||
|
if err == nil && n < len(data) {
|
||||||
|
err = io.ErrShortWrite
|
||||||
|
}
|
||||||
|
if err1 := f.Close(); err == nil {
|
||||||
|
err = err1
|
||||||
|
}
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
type atomicFileWriter struct {
|
||||||
|
f *os.File
|
||||||
|
fn string
|
||||||
|
writeErr error
|
||||||
|
}
|
||||||
|
|
||||||
|
func (w *atomicFileWriter) Write(dt []byte) (int, error) {
|
||||||
|
n, err := w.f.Write(dt)
|
||||||
|
if err != nil {
|
||||||
|
w.writeErr = err
|
||||||
|
}
|
||||||
|
return n, err
|
||||||
|
}
|
||||||
|
|
||||||
|
func (w *atomicFileWriter) Close() (retErr error) {
|
||||||
|
defer func() {
|
||||||
|
if retErr != nil {
|
||||||
|
os.Remove(w.f.Name())
|
||||||
|
}
|
||||||
|
}()
|
||||||
|
if err := w.f.Sync(); err != nil {
|
||||||
|
w.f.Close()
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
if err := w.f.Close(); err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
if w.writeErr == nil {
|
||||||
|
return os.Rename(w.f.Name(), w.fn)
|
||||||
|
}
|
||||||
|
return nil
|
||||||
|
}
|
|
@ -0,0 +1,226 @@
|
||||||
|
package ioutils
|
||||||
|
|
||||||
|
import (
|
||||||
|
"bytes"
|
||||||
|
"fmt"
|
||||||
|
"io"
|
||||||
|
"os"
|
||||||
|
)
|
||||||
|
|
||||||
|
type pos struct {
|
||||||
|
idx int
|
||||||
|
offset int64
|
||||||
|
}
|
||||||
|
|
||||||
|
type multiReadSeeker struct {
|
||||||
|
readers []io.ReadSeeker
|
||||||
|
pos *pos
|
||||||
|
posIdx map[io.ReadSeeker]int
|
||||||
|
}
|
||||||
|
|
||||||
|
func (r *multiReadSeeker) Seek(offset int64, whence int) (int64, error) {
|
||||||
|
var tmpOffset int64
|
||||||
|
switch whence {
|
||||||
|
case os.SEEK_SET:
|
||||||
|
for i, rdr := range r.readers {
|
||||||
|
// get size of the current reader
|
||||||
|
s, err := rdr.Seek(0, os.SEEK_END)
|
||||||
|
if err != nil {
|
||||||
|
return -1, err
|
||||||
|
}
|
||||||
|
|
||||||
|
if offset > tmpOffset+s {
|
||||||
|
if i == len(r.readers)-1 {
|
||||||
|
rdrOffset := s + (offset - tmpOffset)
|
||||||
|
if _, err := rdr.Seek(rdrOffset, os.SEEK_SET); err != nil {
|
||||||
|
return -1, err
|
||||||
|
}
|
||||||
|
r.pos = &pos{i, rdrOffset}
|
||||||
|
return offset, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
tmpOffset += s
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
|
||||||
|
rdrOffset := offset - tmpOffset
|
||||||
|
idx := i
|
||||||
|
|
||||||
|
rdr.Seek(rdrOffset, os.SEEK_SET)
|
||||||
|
// make sure all following readers are at 0
|
||||||
|
for _, rdr := range r.readers[i+1:] {
|
||||||
|
rdr.Seek(0, os.SEEK_SET)
|
||||||
|
}
|
||||||
|
|
||||||
|
if rdrOffset == s && i != len(r.readers)-1 {
|
||||||
|
idx++
|
||||||
|
rdrOffset = 0
|
||||||
|
}
|
||||||
|
r.pos = &pos{idx, rdrOffset}
|
||||||
|
return offset, nil
|
||||||
|
}
|
||||||
|
case os.SEEK_END:
|
||||||
|
for _, rdr := range r.readers {
|
||||||
|
s, err := rdr.Seek(0, os.SEEK_END)
|
||||||
|
if err != nil {
|
||||||
|
return -1, err
|
||||||
|
}
|
||||||
|
tmpOffset += s
|
||||||
|
}
|
||||||
|
r.Seek(tmpOffset+offset, os.SEEK_SET)
|
||||||
|
return tmpOffset + offset, nil
|
||||||
|
case os.SEEK_CUR:
|
||||||
|
if r.pos == nil {
|
||||||
|
return r.Seek(offset, os.SEEK_SET)
|
||||||
|
}
|
||||||
|
// Just return the current offset
|
||||||
|
if offset == 0 {
|
||||||
|
return r.getCurOffset()
|
||||||
|
}
|
||||||
|
|
||||||
|
curOffset, err := r.getCurOffset()
|
||||||
|
if err != nil {
|
||||||
|
return -1, err
|
||||||
|
}
|
||||||
|
rdr, rdrOffset, err := r.getReaderForOffset(curOffset + offset)
|
||||||
|
if err != nil {
|
||||||
|
return -1, err
|
||||||
|
}
|
||||||
|
|
||||||
|
r.pos = &pos{r.posIdx[rdr], rdrOffset}
|
||||||
|
return curOffset + offset, nil
|
||||||
|
default:
|
||||||
|
return -1, fmt.Errorf("Invalid whence: %d", whence)
|
||||||
|
}
|
||||||
|
|
||||||
|
return -1, fmt.Errorf("Error seeking for whence: %d, offset: %d", whence, offset)
|
||||||
|
}
|
||||||
|
|
||||||
|
func (r *multiReadSeeker) getReaderForOffset(offset int64) (io.ReadSeeker, int64, error) {
|
||||||
|
var rdr io.ReadSeeker
|
||||||
|
var rdrOffset int64
|
||||||
|
|
||||||
|
for i, rdr := range r.readers {
|
||||||
|
offsetTo, err := r.getOffsetToReader(rdr)
|
||||||
|
if err != nil {
|
||||||
|
return nil, -1, err
|
||||||
|
}
|
||||||
|
if offsetTo > offset {
|
||||||
|
rdr = r.readers[i-1]
|
||||||
|
rdrOffset = offsetTo - offset
|
||||||
|
break
|
||||||
|
}
|
||||||
|
|
||||||
|
if rdr == r.readers[len(r.readers)-1] {
|
||||||
|
rdrOffset = offsetTo + offset
|
||||||
|
break
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
return rdr, rdrOffset, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func (r *multiReadSeeker) getCurOffset() (int64, error) {
|
||||||
|
var totalSize int64
|
||||||
|
for _, rdr := range r.readers[:r.pos.idx+1] {
|
||||||
|
if r.posIdx[rdr] == r.pos.idx {
|
||||||
|
totalSize += r.pos.offset
|
||||||
|
break
|
||||||
|
}
|
||||||
|
|
||||||
|
size, err := getReadSeekerSize(rdr)
|
||||||
|
if err != nil {
|
||||||
|
return -1, fmt.Errorf("error getting seeker size: %v", err)
|
||||||
|
}
|
||||||
|
totalSize += size
|
||||||
|
}
|
||||||
|
return totalSize, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func (r *multiReadSeeker) getOffsetToReader(rdr io.ReadSeeker) (int64, error) {
|
||||||
|
var offset int64
|
||||||
|
for _, r := range r.readers {
|
||||||
|
if r == rdr {
|
||||||
|
break
|
||||||
|
}
|
||||||
|
|
||||||
|
size, err := getReadSeekerSize(rdr)
|
||||||
|
if err != nil {
|
||||||
|
return -1, err
|
||||||
|
}
|
||||||
|
offset += size
|
||||||
|
}
|
||||||
|
return offset, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func (r *multiReadSeeker) Read(b []byte) (int, error) {
|
||||||
|
if r.pos == nil {
|
||||||
|
r.pos = &pos{0, 0}
|
||||||
|
}
|
||||||
|
|
||||||
|
bCap := int64(cap(b))
|
||||||
|
buf := bytes.NewBuffer(nil)
|
||||||
|
var rdr io.ReadSeeker
|
||||||
|
|
||||||
|
for _, rdr = range r.readers[r.pos.idx:] {
|
||||||
|
readBytes, err := io.CopyN(buf, rdr, bCap)
|
||||||
|
if err != nil && err != io.EOF {
|
||||||
|
return -1, err
|
||||||
|
}
|
||||||
|
bCap -= readBytes
|
||||||
|
|
||||||
|
if bCap == 0 {
|
||||||
|
break
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
rdrPos, err := rdr.Seek(0, os.SEEK_CUR)
|
||||||
|
if err != nil {
|
||||||
|
return -1, err
|
||||||
|
}
|
||||||
|
r.pos = &pos{r.posIdx[rdr], rdrPos}
|
||||||
|
return buf.Read(b)
|
||||||
|
}
|
||||||
|
|
||||||
|
func getReadSeekerSize(rdr io.ReadSeeker) (int64, error) {
|
||||||
|
// save the current position
|
||||||
|
pos, err := rdr.Seek(0, os.SEEK_CUR)
|
||||||
|
if err != nil {
|
||||||
|
return -1, err
|
||||||
|
}
|
||||||
|
|
||||||
|
// get the size
|
||||||
|
size, err := rdr.Seek(0, os.SEEK_END)
|
||||||
|
if err != nil {
|
||||||
|
return -1, err
|
||||||
|
}
|
||||||
|
|
||||||
|
// reset the position
|
||||||
|
if _, err := rdr.Seek(pos, os.SEEK_SET); err != nil {
|
||||||
|
return -1, err
|
||||||
|
}
|
||||||
|
return size, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// MultiReadSeeker returns a ReadSeeker that's the logical concatenation of the provided
|
||||||
|
// input readseekers. After calling this method the initial position is set to the
|
||||||
|
// beginning of the first ReadSeeker. At the end of a ReadSeeker, Read always advances
|
||||||
|
// to the beginning of the next ReadSeeker and returns EOF at the end of the last ReadSeeker.
|
||||||
|
// Seek can be used over the sum of lengths of all readseekers.
|
||||||
|
//
|
||||||
|
// When a MultiReadSeeker is used, no Read and Seek operations should be made on
|
||||||
|
// its ReadSeeker components. Also, users should make no assumption on the state
|
||||||
|
// of individual readseekers while the MultiReadSeeker is used.
|
||||||
|
func MultiReadSeeker(readers ...io.ReadSeeker) io.ReadSeeker {
|
||||||
|
if len(readers) == 1 {
|
||||||
|
return readers[0]
|
||||||
|
}
|
||||||
|
idx := make(map[io.ReadSeeker]int)
|
||||||
|
for i, rdr := range readers {
|
||||||
|
idx[rdr] = i
|
||||||
|
}
|
||||||
|
return &multiReadSeeker{
|
||||||
|
readers: readers,
|
||||||
|
posIdx: idx,
|
||||||
|
}
|
||||||
|
}
|
|
@ -0,0 +1,154 @@
|
||||||
|
package ioutils
|
||||||
|
|
||||||
|
import (
|
||||||
|
"crypto/sha256"
|
||||||
|
"encoding/hex"
|
||||||
|
"io"
|
||||||
|
|
||||||
|
"golang.org/x/net/context"
|
||||||
|
)
|
||||||
|
|
||||||
|
type readCloserWrapper struct {
|
||||||
|
io.Reader
|
||||||
|
closer func() error
|
||||||
|
}
|
||||||
|
|
||||||
|
func (r *readCloserWrapper) Close() error {
|
||||||
|
return r.closer()
|
||||||
|
}
|
||||||
|
|
||||||
|
// NewReadCloserWrapper returns a new io.ReadCloser.
|
||||||
|
func NewReadCloserWrapper(r io.Reader, closer func() error) io.ReadCloser {
|
||||||
|
return &readCloserWrapper{
|
||||||
|
Reader: r,
|
||||||
|
closer: closer,
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
type readerErrWrapper struct {
|
||||||
|
reader io.Reader
|
||||||
|
closer func()
|
||||||
|
}
|
||||||
|
|
||||||
|
func (r *readerErrWrapper) Read(p []byte) (int, error) {
|
||||||
|
n, err := r.reader.Read(p)
|
||||||
|
if err != nil {
|
||||||
|
r.closer()
|
||||||
|
}
|
||||||
|
return n, err
|
||||||
|
}
|
||||||
|
|
||||||
|
// NewReaderErrWrapper returns a new io.Reader.
|
||||||
|
func NewReaderErrWrapper(r io.Reader, closer func()) io.Reader {
|
||||||
|
return &readerErrWrapper{
|
||||||
|
reader: r,
|
||||||
|
closer: closer,
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// HashData returns the sha256 sum of src.
|
||||||
|
func HashData(src io.Reader) (string, error) {
|
||||||
|
h := sha256.New()
|
||||||
|
if _, err := io.Copy(h, src); err != nil {
|
||||||
|
return "", err
|
||||||
|
}
|
||||||
|
return "sha256:" + hex.EncodeToString(h.Sum(nil)), nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// OnEOFReader wraps a io.ReadCloser and a function
|
||||||
|
// the function will run at the end of file or close the file.
|
||||||
|
type OnEOFReader struct {
|
||||||
|
Rc io.ReadCloser
|
||||||
|
Fn func()
|
||||||
|
}
|
||||||
|
|
||||||
|
func (r *OnEOFReader) Read(p []byte) (n int, err error) {
|
||||||
|
n, err = r.Rc.Read(p)
|
||||||
|
if err == io.EOF {
|
||||||
|
r.runFunc()
|
||||||
|
}
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
// Close closes the file and run the function.
|
||||||
|
func (r *OnEOFReader) Close() error {
|
||||||
|
err := r.Rc.Close()
|
||||||
|
r.runFunc()
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
func (r *OnEOFReader) runFunc() {
|
||||||
|
if fn := r.Fn; fn != nil {
|
||||||
|
fn()
|
||||||
|
r.Fn = nil
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// cancelReadCloser wraps an io.ReadCloser with a context for cancelling read
|
||||||
|
// operations.
|
||||||
|
type cancelReadCloser struct {
|
||||||
|
cancel func()
|
||||||
|
pR *io.PipeReader // Stream to read from
|
||||||
|
pW *io.PipeWriter
|
||||||
|
}
|
||||||
|
|
||||||
|
// NewCancelReadCloser creates a wrapper that closes the ReadCloser when the
|
||||||
|
// context is cancelled. The returned io.ReadCloser must be closed when it is
|
||||||
|
// no longer needed.
|
||||||
|
func NewCancelReadCloser(ctx context.Context, in io.ReadCloser) io.ReadCloser {
|
||||||
|
pR, pW := io.Pipe()
|
||||||
|
|
||||||
|
// Create a context used to signal when the pipe is closed
|
||||||
|
doneCtx, cancel := context.WithCancel(context.Background())
|
||||||
|
|
||||||
|
p := &cancelReadCloser{
|
||||||
|
cancel: cancel,
|
||||||
|
pR: pR,
|
||||||
|
pW: pW,
|
||||||
|
}
|
||||||
|
|
||||||
|
go func() {
|
||||||
|
_, err := io.Copy(pW, in)
|
||||||
|
select {
|
||||||
|
case <-ctx.Done():
|
||||||
|
// If the context was closed, p.closeWithError
|
||||||
|
// was already called. Calling it again would
|
||||||
|
// change the error that Read returns.
|
||||||
|
default:
|
||||||
|
p.closeWithError(err)
|
||||||
|
}
|
||||||
|
in.Close()
|
||||||
|
}()
|
||||||
|
go func() {
|
||||||
|
for {
|
||||||
|
select {
|
||||||
|
case <-ctx.Done():
|
||||||
|
p.closeWithError(ctx.Err())
|
||||||
|
case <-doneCtx.Done():
|
||||||
|
return
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}()
|
||||||
|
|
||||||
|
return p
|
||||||
|
}
|
||||||
|
|
||||||
|
// Read wraps the Read method of the pipe that provides data from the wrapped
|
||||||
|
// ReadCloser.
|
||||||
|
func (p *cancelReadCloser) Read(buf []byte) (n int, err error) {
|
||||||
|
return p.pR.Read(buf)
|
||||||
|
}
|
||||||
|
|
||||||
|
// closeWithError closes the wrapper and its underlying reader. It will
|
||||||
|
// cause future calls to Read to return err.
|
||||||
|
func (p *cancelReadCloser) closeWithError(err error) {
|
||||||
|
p.pW.CloseWithError(err)
|
||||||
|
p.cancel()
|
||||||
|
}
|
||||||
|
|
||||||
|
// Close closes the wrapper its underlying reader. It will cause
|
||||||
|
// future calls to Read to return io.EOF.
|
||||||
|
func (p *cancelReadCloser) Close() error {
|
||||||
|
p.closeWithError(io.EOF)
|
||||||
|
return nil
|
||||||
|
}
|
|
@ -0,0 +1,10 @@
|
||||||
|
// +build !windows
|
||||||
|
|
||||||
|
package ioutils
|
||||||
|
|
||||||
|
import "io/ioutil"
|
||||||
|
|
||||||
|
// TempDir on Unix systems is equivalent to ioutil.TempDir.
|
||||||
|
func TempDir(dir, prefix string) (string, error) {
|
||||||
|
return ioutil.TempDir(dir, prefix)
|
||||||
|
}
|
|
@ -0,0 +1,18 @@
|
||||||
|
// +build windows
|
||||||
|
|
||||||
|
package ioutils
|
||||||
|
|
||||||
|
import (
|
||||||
|
"io/ioutil"
|
||||||
|
|
||||||
|
"github.com/docker/docker/pkg/longpath"
|
||||||
|
)
|
||||||
|
|
||||||
|
// TempDir is the equivalent of ioutil.TempDir, except that the result is in Windows longpath format.
|
||||||
|
func TempDir(dir, prefix string) (string, error) {
|
||||||
|
tempDir, err := ioutil.TempDir(dir, prefix)
|
||||||
|
if err != nil {
|
||||||
|
return "", err
|
||||||
|
}
|
||||||
|
return longpath.AddPrefix(tempDir), nil
|
||||||
|
}
|
|
@ -0,0 +1,92 @@
|
||||||
|
package ioutils
|
||||||
|
|
||||||
|
import (
|
||||||
|
"io"
|
||||||
|
"sync"
|
||||||
|
)
|
||||||
|
|
||||||
|
// WriteFlusher wraps the Write and Flush operation ensuring that every write
|
||||||
|
// is a flush. In addition, the Close method can be called to intercept
|
||||||
|
// Read/Write calls if the targets lifecycle has already ended.
|
||||||
|
type WriteFlusher struct {
|
||||||
|
w io.Writer
|
||||||
|
flusher flusher
|
||||||
|
flushed chan struct{}
|
||||||
|
flushedOnce sync.Once
|
||||||
|
closed chan struct{}
|
||||||
|
closeLock sync.Mutex
|
||||||
|
}
|
||||||
|
|
||||||
|
type flusher interface {
|
||||||
|
Flush()
|
||||||
|
}
|
||||||
|
|
||||||
|
var errWriteFlusherClosed = io.EOF
|
||||||
|
|
||||||
|
func (wf *WriteFlusher) Write(b []byte) (n int, err error) {
|
||||||
|
select {
|
||||||
|
case <-wf.closed:
|
||||||
|
return 0, errWriteFlusherClosed
|
||||||
|
default:
|
||||||
|
}
|
||||||
|
|
||||||
|
n, err = wf.w.Write(b)
|
||||||
|
wf.Flush() // every write is a flush.
|
||||||
|
return n, err
|
||||||
|
}
|
||||||
|
|
||||||
|
// Flush the stream immediately.
|
||||||
|
func (wf *WriteFlusher) Flush() {
|
||||||
|
select {
|
||||||
|
case <-wf.closed:
|
||||||
|
return
|
||||||
|
default:
|
||||||
|
}
|
||||||
|
|
||||||
|
wf.flushedOnce.Do(func() {
|
||||||
|
close(wf.flushed)
|
||||||
|
})
|
||||||
|
wf.flusher.Flush()
|
||||||
|
}
|
||||||
|
|
||||||
|
// Flushed returns the state of flushed.
|
||||||
|
// If it's flushed, return true, or else it return false.
|
||||||
|
func (wf *WriteFlusher) Flushed() bool {
|
||||||
|
// BUG(stevvooe): Remove this method. Its use is inherently racy. Seems to
|
||||||
|
// be used to detect whether or a response code has been issued or not.
|
||||||
|
// Another hook should be used instead.
|
||||||
|
var flushed bool
|
||||||
|
select {
|
||||||
|
case <-wf.flushed:
|
||||||
|
flushed = true
|
||||||
|
default:
|
||||||
|
}
|
||||||
|
return flushed
|
||||||
|
}
|
||||||
|
|
||||||
|
// Close closes the write flusher, disallowing any further writes to the
|
||||||
|
// target. After the flusher is closed, all calls to write or flush will
|
||||||
|
// result in an error.
|
||||||
|
func (wf *WriteFlusher) Close() error {
|
||||||
|
wf.closeLock.Lock()
|
||||||
|
defer wf.closeLock.Unlock()
|
||||||
|
|
||||||
|
select {
|
||||||
|
case <-wf.closed:
|
||||||
|
return errWriteFlusherClosed
|
||||||
|
default:
|
||||||
|
close(wf.closed)
|
||||||
|
}
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// NewWriteFlusher returns a new WriteFlusher.
|
||||||
|
func NewWriteFlusher(w io.Writer) *WriteFlusher {
|
||||||
|
var fl flusher
|
||||||
|
if f, ok := w.(flusher); ok {
|
||||||
|
fl = f
|
||||||
|
} else {
|
||||||
|
fl = &NopFlusher{}
|
||||||
|
}
|
||||||
|
return &WriteFlusher{w: w, flusher: fl, closed: make(chan struct{}), flushed: make(chan struct{})}
|
||||||
|
}
|
|
@ -0,0 +1,66 @@
|
||||||
|
package ioutils
|
||||||
|
|
||||||
|
import "io"
|
||||||
|
|
||||||
|
// NopWriter represents a type which write operation is nop.
|
||||||
|
type NopWriter struct{}
|
||||||
|
|
||||||
|
func (*NopWriter) Write(buf []byte) (int, error) {
|
||||||
|
return len(buf), nil
|
||||||
|
}
|
||||||
|
|
||||||
|
type nopWriteCloser struct {
|
||||||
|
io.Writer
|
||||||
|
}
|
||||||
|
|
||||||
|
func (w *nopWriteCloser) Close() error { return nil }
|
||||||
|
|
||||||
|
// NopWriteCloser returns a nopWriteCloser.
|
||||||
|
func NopWriteCloser(w io.Writer) io.WriteCloser {
|
||||||
|
return &nopWriteCloser{w}
|
||||||
|
}
|
||||||
|
|
||||||
|
// NopFlusher represents a type which flush operation is nop.
|
||||||
|
type NopFlusher struct{}
|
||||||
|
|
||||||
|
// Flush is a nop operation.
|
||||||
|
func (f *NopFlusher) Flush() {}
|
||||||
|
|
||||||
|
type writeCloserWrapper struct {
|
||||||
|
io.Writer
|
||||||
|
closer func() error
|
||||||
|
}
|
||||||
|
|
||||||
|
func (r *writeCloserWrapper) Close() error {
|
||||||
|
return r.closer()
|
||||||
|
}
|
||||||
|
|
||||||
|
// NewWriteCloserWrapper returns a new io.WriteCloser.
|
||||||
|
func NewWriteCloserWrapper(r io.Writer, closer func() error) io.WriteCloser {
|
||||||
|
return &writeCloserWrapper{
|
||||||
|
Writer: r,
|
||||||
|
closer: closer,
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// WriteCounter wraps a concrete io.Writer and hold a count of the number
|
||||||
|
// of bytes written to the writer during a "session".
|
||||||
|
// This can be convenient when write return is masked
|
||||||
|
// (e.g., json.Encoder.Encode())
|
||||||
|
type WriteCounter struct {
|
||||||
|
Count int64
|
||||||
|
Writer io.Writer
|
||||||
|
}
|
||||||
|
|
||||||
|
// NewWriteCounter returns a new WriteCounter.
|
||||||
|
func NewWriteCounter(w io.Writer) *WriteCounter {
|
||||||
|
return &WriteCounter{
|
||||||
|
Writer: w,
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func (wc *WriteCounter) Write(p []byte) (count int, err error) {
|
||||||
|
count, err = wc.Writer.Write(p)
|
||||||
|
wc.Count += int64(count)
|
||||||
|
return
|
||||||
|
}
|
|
@ -0,0 +1,26 @@
|
||||||
|
// longpath introduces some constants and helper functions for handling long paths
|
||||||
|
// in Windows, which are expected to be prepended with `\\?\` and followed by either
|
||||||
|
// a drive letter, a UNC server\share, or a volume identifier.
|
||||||
|
|
||||||
|
package longpath
|
||||||
|
|
||||||
|
import (
|
||||||
|
"strings"
|
||||||
|
)
|
||||||
|
|
||||||
|
// Prefix is the longpath prefix for Windows file paths.
|
||||||
|
const Prefix = `\\?\`
|
||||||
|
|
||||||
|
// AddPrefix will add the Windows long path prefix to the path provided if
|
||||||
|
// it does not already have it.
|
||||||
|
func AddPrefix(path string) string {
|
||||||
|
if !strings.HasPrefix(path, Prefix) {
|
||||||
|
if strings.HasPrefix(path, `\\`) {
|
||||||
|
// This is a UNC path, so we need to add 'UNC' to the path as well.
|
||||||
|
path = Prefix + `UNC` + path[1:]
|
||||||
|
} else {
|
||||||
|
path = Prefix + path
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return path
|
||||||
|
}
|
|
@ -0,0 +1,119 @@
|
||||||
|
// Package pools provides a collection of pools which provide various
|
||||||
|
// data types with buffers. These can be used to lower the number of
|
||||||
|
// memory allocations and reuse buffers.
|
||||||
|
//
|
||||||
|
// New pools should be added to this package to allow them to be
|
||||||
|
// shared across packages.
|
||||||
|
//
|
||||||
|
// Utility functions which operate on pools should be added to this
|
||||||
|
// package to allow them to be reused.
|
||||||
|
package pools
|
||||||
|
|
||||||
|
import (
|
||||||
|
"bufio"
|
||||||
|
"io"
|
||||||
|
"sync"
|
||||||
|
|
||||||
|
"github.com/docker/docker/pkg/ioutils"
|
||||||
|
)
|
||||||
|
|
||||||
|
var (
|
||||||
|
// BufioReader32KPool is a pool which returns bufio.Reader with a 32K buffer.
|
||||||
|
BufioReader32KPool *BufioReaderPool
|
||||||
|
// BufioWriter32KPool is a pool which returns bufio.Writer with a 32K buffer.
|
||||||
|
BufioWriter32KPool *BufioWriterPool
|
||||||
|
)
|
||||||
|
|
||||||
|
const buffer32K = 32 * 1024
|
||||||
|
|
||||||
|
// BufioReaderPool is a bufio reader that uses sync.Pool.
|
||||||
|
type BufioReaderPool struct {
|
||||||
|
pool sync.Pool
|
||||||
|
}
|
||||||
|
|
||||||
|
func init() {
|
||||||
|
BufioReader32KPool = newBufioReaderPoolWithSize(buffer32K)
|
||||||
|
BufioWriter32KPool = newBufioWriterPoolWithSize(buffer32K)
|
||||||
|
}
|
||||||
|
|
||||||
|
// newBufioReaderPoolWithSize is unexported because new pools should be
|
||||||
|
// added here to be shared where required.
|
||||||
|
func newBufioReaderPoolWithSize(size int) *BufioReaderPool {
|
||||||
|
pool := sync.Pool{
|
||||||
|
New: func() interface{} { return bufio.NewReaderSize(nil, size) },
|
||||||
|
}
|
||||||
|
return &BufioReaderPool{pool: pool}
|
||||||
|
}
|
||||||
|
|
||||||
|
// Get returns a bufio.Reader which reads from r. The buffer size is that of the pool.
|
||||||
|
func (bufPool *BufioReaderPool) Get(r io.Reader) *bufio.Reader {
|
||||||
|
buf := bufPool.pool.Get().(*bufio.Reader)
|
||||||
|
buf.Reset(r)
|
||||||
|
return buf
|
||||||
|
}
|
||||||
|
|
||||||
|
// Put puts the bufio.Reader back into the pool.
|
||||||
|
func (bufPool *BufioReaderPool) Put(b *bufio.Reader) {
|
||||||
|
b.Reset(nil)
|
||||||
|
bufPool.pool.Put(b)
|
||||||
|
}
|
||||||
|
|
||||||
|
// Copy is a convenience wrapper which uses a buffer to avoid allocation in io.Copy.
|
||||||
|
func Copy(dst io.Writer, src io.Reader) (written int64, err error) {
|
||||||
|
buf := BufioReader32KPool.Get(src)
|
||||||
|
written, err = io.Copy(dst, buf)
|
||||||
|
BufioReader32KPool.Put(buf)
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
// NewReadCloserWrapper returns a wrapper which puts the bufio.Reader back
|
||||||
|
// into the pool and closes the reader if it's an io.ReadCloser.
|
||||||
|
func (bufPool *BufioReaderPool) NewReadCloserWrapper(buf *bufio.Reader, r io.Reader) io.ReadCloser {
|
||||||
|
return ioutils.NewReadCloserWrapper(r, func() error {
|
||||||
|
if readCloser, ok := r.(io.ReadCloser); ok {
|
||||||
|
readCloser.Close()
|
||||||
|
}
|
||||||
|
bufPool.Put(buf)
|
||||||
|
return nil
|
||||||
|
})
|
||||||
|
}
|
||||||
|
|
||||||
|
// BufioWriterPool is a bufio writer that uses sync.Pool.
|
||||||
|
type BufioWriterPool struct {
|
||||||
|
pool sync.Pool
|
||||||
|
}
|
||||||
|
|
||||||
|
// newBufioWriterPoolWithSize is unexported because new pools should be
|
||||||
|
// added here to be shared where required.
|
||||||
|
func newBufioWriterPoolWithSize(size int) *BufioWriterPool {
|
||||||
|
pool := sync.Pool{
|
||||||
|
New: func() interface{} { return bufio.NewWriterSize(nil, size) },
|
||||||
|
}
|
||||||
|
return &BufioWriterPool{pool: pool}
|
||||||
|
}
|
||||||
|
|
||||||
|
// Get returns a bufio.Writer which writes to w. The buffer size is that of the pool.
|
||||||
|
func (bufPool *BufioWriterPool) Get(w io.Writer) *bufio.Writer {
|
||||||
|
buf := bufPool.pool.Get().(*bufio.Writer)
|
||||||
|
buf.Reset(w)
|
||||||
|
return buf
|
||||||
|
}
|
||||||
|
|
||||||
|
// Put puts the bufio.Writer back into the pool.
|
||||||
|
func (bufPool *BufioWriterPool) Put(b *bufio.Writer) {
|
||||||
|
b.Reset(nil)
|
||||||
|
bufPool.pool.Put(b)
|
||||||
|
}
|
||||||
|
|
||||||
|
// NewWriteCloserWrapper returns a wrapper which puts the bufio.Writer back
|
||||||
|
// into the pool and closes the writer if it's an io.Writecloser.
|
||||||
|
func (bufPool *BufioWriterPool) NewWriteCloserWrapper(buf *bufio.Writer, w io.Writer) io.WriteCloser {
|
||||||
|
return ioutils.NewWriteCloserWrapper(w, func() error {
|
||||||
|
buf.Flush()
|
||||||
|
if writeCloser, ok := w.(io.WriteCloser); ok {
|
||||||
|
writeCloser.Close()
|
||||||
|
}
|
||||||
|
bufPool.Put(buf)
|
||||||
|
return nil
|
||||||
|
})
|
||||||
|
}
|
|
@ -0,0 +1,11 @@
|
||||||
|
package promise
|
||||||
|
|
||||||
|
// Go is a basic promise implementation: it wraps calls a function in a goroutine,
|
||||||
|
// and returns a channel which will later return the function's return value.
|
||||||
|
func Go(f func() error) chan error {
|
||||||
|
ch := make(chan error, 1)
|
||||||
|
go func() {
|
||||||
|
ch <- f()
|
||||||
|
}()
|
||||||
|
return ch
|
||||||
|
}
|
|
@ -0,0 +1,185 @@
|
||||||
|
package stdcopy
|
||||||
|
|
||||||
|
import (
|
||||||
|
"bytes"
|
||||||
|
"encoding/binary"
|
||||||
|
"errors"
|
||||||
|
"fmt"
|
||||||
|
"io"
|
||||||
|
"sync"
|
||||||
|
|
||||||
|
"github.com/Sirupsen/logrus"
|
||||||
|
)
|
||||||
|
|
||||||
|
// StdType is the type of standard stream
|
||||||
|
// a writer can multiplex to.
|
||||||
|
type StdType byte
|
||||||
|
|
||||||
|
const (
|
||||||
|
// Stdin represents standard input stream type.
|
||||||
|
Stdin StdType = iota
|
||||||
|
// Stdout represents standard output stream type.
|
||||||
|
Stdout
|
||||||
|
// Stderr represents standard error steam type.
|
||||||
|
Stderr
|
||||||
|
|
||||||
|
stdWriterPrefixLen = 8
|
||||||
|
stdWriterFdIndex = 0
|
||||||
|
stdWriterSizeIndex = 4
|
||||||
|
|
||||||
|
startingBufLen = 32*1024 + stdWriterPrefixLen + 1
|
||||||
|
)
|
||||||
|
|
||||||
|
var bufPool = &sync.Pool{New: func() interface{} { return bytes.NewBuffer(nil) }}
|
||||||
|
|
||||||
|
// stdWriter is wrapper of io.Writer with extra customized info.
|
||||||
|
type stdWriter struct {
|
||||||
|
io.Writer
|
||||||
|
prefix byte
|
||||||
|
}
|
||||||
|
|
||||||
|
// Write sends the buffer to the underneath writer.
|
||||||
|
// It inserts the prefix header before the buffer,
|
||||||
|
// so stdcopy.StdCopy knows where to multiplex the output.
|
||||||
|
// It makes stdWriter to implement io.Writer.
|
||||||
|
func (w *stdWriter) Write(p []byte) (n int, err error) {
|
||||||
|
if w == nil || w.Writer == nil {
|
||||||
|
return 0, errors.New("Writer not instantiated")
|
||||||
|
}
|
||||||
|
if p == nil {
|
||||||
|
return 0, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
header := [stdWriterPrefixLen]byte{stdWriterFdIndex: w.prefix}
|
||||||
|
binary.BigEndian.PutUint32(header[stdWriterSizeIndex:], uint32(len(p)))
|
||||||
|
buf := bufPool.Get().(*bytes.Buffer)
|
||||||
|
buf.Write(header[:])
|
||||||
|
buf.Write(p)
|
||||||
|
|
||||||
|
n, err = w.Writer.Write(buf.Bytes())
|
||||||
|
n -= stdWriterPrefixLen
|
||||||
|
if n < 0 {
|
||||||
|
n = 0
|
||||||
|
}
|
||||||
|
|
||||||
|
buf.Reset()
|
||||||
|
bufPool.Put(buf)
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
// NewStdWriter instantiates a new Writer.
|
||||||
|
// Everything written to it will be encapsulated using a custom format,
|
||||||
|
// and written to the underlying `w` stream.
|
||||||
|
// This allows multiple write streams (e.g. stdout and stderr) to be muxed into a single connection.
|
||||||
|
// `t` indicates the id of the stream to encapsulate.
|
||||||
|
// It can be stdcopy.Stdin, stdcopy.Stdout, stdcopy.Stderr.
|
||||||
|
func NewStdWriter(w io.Writer, t StdType) io.Writer {
|
||||||
|
return &stdWriter{
|
||||||
|
Writer: w,
|
||||||
|
prefix: byte(t),
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// StdCopy is a modified version of io.Copy.
|
||||||
|
//
|
||||||
|
// StdCopy will demultiplex `src`, assuming that it contains two streams,
|
||||||
|
// previously multiplexed together using a StdWriter instance.
|
||||||
|
// As it reads from `src`, StdCopy will write to `dstout` and `dsterr`.
|
||||||
|
//
|
||||||
|
// StdCopy will read until it hits EOF on `src`. It will then return a nil error.
|
||||||
|
// In other words: if `err` is non nil, it indicates a real underlying error.
|
||||||
|
//
|
||||||
|
// `written` will hold the total number of bytes written to `dstout` and `dsterr`.
|
||||||
|
func StdCopy(dstout, dsterr io.Writer, src io.Reader) (written int64, err error) {
|
||||||
|
var (
|
||||||
|
buf = make([]byte, startingBufLen)
|
||||||
|
bufLen = len(buf)
|
||||||
|
nr, nw int
|
||||||
|
er, ew error
|
||||||
|
out io.Writer
|
||||||
|
frameSize int
|
||||||
|
)
|
||||||
|
|
||||||
|
for {
|
||||||
|
// Make sure we have at least a full header
|
||||||
|
for nr < stdWriterPrefixLen {
|
||||||
|
var nr2 int
|
||||||
|
nr2, er = src.Read(buf[nr:])
|
||||||
|
nr += nr2
|
||||||
|
if er == io.EOF {
|
||||||
|
if nr < stdWriterPrefixLen {
|
||||||
|
logrus.Debugf("Corrupted prefix: %v", buf[:nr])
|
||||||
|
return written, nil
|
||||||
|
}
|
||||||
|
break
|
||||||
|
}
|
||||||
|
if er != nil {
|
||||||
|
logrus.Debugf("Error reading header: %s", er)
|
||||||
|
return 0, er
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// Check the first byte to know where to write
|
||||||
|
switch StdType(buf[stdWriterFdIndex]) {
|
||||||
|
case Stdin:
|
||||||
|
fallthrough
|
||||||
|
case Stdout:
|
||||||
|
// Write on stdout
|
||||||
|
out = dstout
|
||||||
|
case Stderr:
|
||||||
|
// Write on stderr
|
||||||
|
out = dsterr
|
||||||
|
default:
|
||||||
|
logrus.Debugf("Error selecting output fd: (%d)", buf[stdWriterFdIndex])
|
||||||
|
return 0, fmt.Errorf("Unrecognized input header: %d", buf[stdWriterFdIndex])
|
||||||
|
}
|
||||||
|
|
||||||
|
// Retrieve the size of the frame
|
||||||
|
frameSize = int(binary.BigEndian.Uint32(buf[stdWriterSizeIndex : stdWriterSizeIndex+4]))
|
||||||
|
logrus.Debugf("framesize: %d", frameSize)
|
||||||
|
|
||||||
|
// Check if the buffer is big enough to read the frame.
|
||||||
|
// Extend it if necessary.
|
||||||
|
if frameSize+stdWriterPrefixLen > bufLen {
|
||||||
|
logrus.Debugf("Extending buffer cap by %d (was %d)", frameSize+stdWriterPrefixLen-bufLen+1, len(buf))
|
||||||
|
buf = append(buf, make([]byte, frameSize+stdWriterPrefixLen-bufLen+1)...)
|
||||||
|
bufLen = len(buf)
|
||||||
|
}
|
||||||
|
|
||||||
|
// While the amount of bytes read is less than the size of the frame + header, we keep reading
|
||||||
|
for nr < frameSize+stdWriterPrefixLen {
|
||||||
|
var nr2 int
|
||||||
|
nr2, er = src.Read(buf[nr:])
|
||||||
|
nr += nr2
|
||||||
|
if er == io.EOF {
|
||||||
|
if nr < frameSize+stdWriterPrefixLen {
|
||||||
|
logrus.Debugf("Corrupted frame: %v", buf[stdWriterPrefixLen:nr])
|
||||||
|
return written, nil
|
||||||
|
}
|
||||||
|
break
|
||||||
|
}
|
||||||
|
if er != nil {
|
||||||
|
logrus.Debugf("Error reading frame: %s", er)
|
||||||
|
return 0, er
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// Write the retrieved frame (without header)
|
||||||
|
nw, ew = out.Write(buf[stdWriterPrefixLen : frameSize+stdWriterPrefixLen])
|
||||||
|
if ew != nil {
|
||||||
|
logrus.Debugf("Error writing frame: %s", ew)
|
||||||
|
return 0, ew
|
||||||
|
}
|
||||||
|
// If the frame has not been fully written: error
|
||||||
|
if nw != frameSize {
|
||||||
|
logrus.Debugf("Error Short Write: (%d on %d)", nw, frameSize)
|
||||||
|
return 0, io.ErrShortWrite
|
||||||
|
}
|
||||||
|
written += int64(nw)
|
||||||
|
|
||||||
|
// Move the rest of the buffer to the beginning
|
||||||
|
copy(buf, buf[frameSize+stdWriterPrefixLen:])
|
||||||
|
// Move the index
|
||||||
|
nr -= frameSize + stdWriterPrefixLen
|
||||||
|
}
|
||||||
|
}
|
|
@ -0,0 +1,52 @@
|
||||||
|
package system
|
||||||
|
|
||||||
|
import (
|
||||||
|
"os"
|
||||||
|
"syscall"
|
||||||
|
"time"
|
||||||
|
"unsafe"
|
||||||
|
)
|
||||||
|
|
||||||
|
var (
|
||||||
|
maxTime time.Time
|
||||||
|
)
|
||||||
|
|
||||||
|
func init() {
|
||||||
|
if unsafe.Sizeof(syscall.Timespec{}.Nsec) == 8 {
|
||||||
|
// This is a 64 bit timespec
|
||||||
|
// os.Chtimes limits time to the following
|
||||||
|
maxTime = time.Unix(0, 1<<63-1)
|
||||||
|
} else {
|
||||||
|
// This is a 32 bit timespec
|
||||||
|
maxTime = time.Unix(1<<31-1, 0)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// Chtimes changes the access time and modified time of a file at the given path
|
||||||
|
func Chtimes(name string, atime time.Time, mtime time.Time) error {
|
||||||
|
unixMinTime := time.Unix(0, 0)
|
||||||
|
unixMaxTime := maxTime
|
||||||
|
|
||||||
|
// If the modified time is prior to the Unix Epoch, or after the
|
||||||
|
// end of Unix Time, os.Chtimes has undefined behavior
|
||||||
|
// default to Unix Epoch in this case, just in case
|
||||||
|
|
||||||
|
if atime.Before(unixMinTime) || atime.After(unixMaxTime) {
|
||||||
|
atime = unixMinTime
|
||||||
|
}
|
||||||
|
|
||||||
|
if mtime.Before(unixMinTime) || mtime.After(unixMaxTime) {
|
||||||
|
mtime = unixMinTime
|
||||||
|
}
|
||||||
|
|
||||||
|
if err := os.Chtimes(name, atime, mtime); err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
// Take platform specific action for setting create time.
|
||||||
|
if err := setCTime(name, mtime); err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
return nil
|
||||||
|
}
|
|
@ -0,0 +1,14 @@
|
||||||
|
// +build !windows
|
||||||
|
|
||||||
|
package system
|
||||||
|
|
||||||
|
import (
|
||||||
|
"time"
|
||||||
|
)
|
||||||
|
|
||||||
|
//setCTime will set the create time on a file. On Unix, the create
|
||||||
|
//time is updated as a side effect of setting the modified time, so
|
||||||
|
//no action is required.
|
||||||
|
func setCTime(path string, ctime time.Time) error {
|
||||||
|
return nil
|
||||||
|
}
|
|
@ -0,0 +1,27 @@
|
||||||
|
// +build windows
|
||||||
|
|
||||||
|
package system
|
||||||
|
|
||||||
|
import (
|
||||||
|
"syscall"
|
||||||
|
"time"
|
||||||
|
)
|
||||||
|
|
||||||
|
//setCTime will set the create time on a file. On Windows, this requires
|
||||||
|
//calling SetFileTime and explicitly including the create time.
|
||||||
|
func setCTime(path string, ctime time.Time) error {
|
||||||
|
ctimespec := syscall.NsecToTimespec(ctime.UnixNano())
|
||||||
|
pathp, e := syscall.UTF16PtrFromString(path)
|
||||||
|
if e != nil {
|
||||||
|
return e
|
||||||
|
}
|
||||||
|
h, e := syscall.CreateFile(pathp,
|
||||||
|
syscall.FILE_WRITE_ATTRIBUTES, syscall.FILE_SHARE_WRITE, nil,
|
||||||
|
syscall.OPEN_EXISTING, syscall.FILE_FLAG_BACKUP_SEMANTICS, 0)
|
||||||
|
if e != nil {
|
||||||
|
return e
|
||||||
|
}
|
||||||
|
defer syscall.Close(h)
|
||||||
|
c := syscall.NsecToFiletime(syscall.TimespecToNsec(ctimespec))
|
||||||
|
return syscall.SetFileTime(h, &c, nil, nil)
|
||||||
|
}
|
|
@ -0,0 +1,10 @@
|
||||||
|
package system
|
||||||
|
|
||||||
|
import (
|
||||||
|
"errors"
|
||||||
|
)
|
||||||
|
|
||||||
|
var (
|
||||||
|
// ErrNotSupportedPlatform means the platform is not supported.
|
||||||
|
ErrNotSupportedPlatform = errors.New("platform and architecture is not supported")
|
||||||
|
)
|
|
@ -0,0 +1,83 @@
|
||||||
|
package system
|
||||||
|
|
||||||
|
// This file implements syscalls for Win32 events which are not implemented
|
||||||
|
// in golang.
|
||||||
|
|
||||||
|
import (
|
||||||
|
"syscall"
|
||||||
|
"unsafe"
|
||||||
|
)
|
||||||
|
|
||||||
|
var (
|
||||||
|
procCreateEvent = modkernel32.NewProc("CreateEventW")
|
||||||
|
procOpenEvent = modkernel32.NewProc("OpenEventW")
|
||||||
|
procSetEvent = modkernel32.NewProc("SetEvent")
|
||||||
|
procResetEvent = modkernel32.NewProc("ResetEvent")
|
||||||
|
procPulseEvent = modkernel32.NewProc("PulseEvent")
|
||||||
|
)
|
||||||
|
|
||||||
|
// CreateEvent implements win32 CreateEventW func in golang. It will create an event object.
|
||||||
|
func CreateEvent(eventAttributes *syscall.SecurityAttributes, manualReset bool, initialState bool, name string) (handle syscall.Handle, err error) {
|
||||||
|
namep, _ := syscall.UTF16PtrFromString(name)
|
||||||
|
var _p1 uint32
|
||||||
|
if manualReset {
|
||||||
|
_p1 = 1
|
||||||
|
}
|
||||||
|
var _p2 uint32
|
||||||
|
if initialState {
|
||||||
|
_p2 = 1
|
||||||
|
}
|
||||||
|
r0, _, e1 := procCreateEvent.Call(uintptr(unsafe.Pointer(eventAttributes)), uintptr(_p1), uintptr(_p2), uintptr(unsafe.Pointer(namep)))
|
||||||
|
use(unsafe.Pointer(namep))
|
||||||
|
handle = syscall.Handle(r0)
|
||||||
|
if handle == syscall.InvalidHandle {
|
||||||
|
err = e1
|
||||||
|
}
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
// OpenEvent implements win32 OpenEventW func in golang. It opens an event object.
|
||||||
|
func OpenEvent(desiredAccess uint32, inheritHandle bool, name string) (handle syscall.Handle, err error) {
|
||||||
|
namep, _ := syscall.UTF16PtrFromString(name)
|
||||||
|
var _p1 uint32
|
||||||
|
if inheritHandle {
|
||||||
|
_p1 = 1
|
||||||
|
}
|
||||||
|
r0, _, e1 := procOpenEvent.Call(uintptr(desiredAccess), uintptr(_p1), uintptr(unsafe.Pointer(namep)))
|
||||||
|
use(unsafe.Pointer(namep))
|
||||||
|
handle = syscall.Handle(r0)
|
||||||
|
if handle == syscall.InvalidHandle {
|
||||||
|
err = e1
|
||||||
|
}
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
// SetEvent implements win32 SetEvent func in golang.
|
||||||
|
func SetEvent(handle syscall.Handle) (err error) {
|
||||||
|
return setResetPulse(handle, procSetEvent)
|
||||||
|
}
|
||||||
|
|
||||||
|
// ResetEvent implements win32 ResetEvent func in golang.
|
||||||
|
func ResetEvent(handle syscall.Handle) (err error) {
|
||||||
|
return setResetPulse(handle, procResetEvent)
|
||||||
|
}
|
||||||
|
|
||||||
|
// PulseEvent implements win32 PulseEvent func in golang.
|
||||||
|
func PulseEvent(handle syscall.Handle) (err error) {
|
||||||
|
return setResetPulse(handle, procPulseEvent)
|
||||||
|
}
|
||||||
|
|
||||||
|
func setResetPulse(handle syscall.Handle, proc *syscall.LazyProc) (err error) {
|
||||||
|
r0, _, _ := proc.Call(uintptr(handle))
|
||||||
|
if r0 != 0 {
|
||||||
|
err = syscall.Errno(r0)
|
||||||
|
}
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
var temp unsafe.Pointer
|
||||||
|
|
||||||
|
// use ensures a variable is kept alive without the GC freeing while still needed
|
||||||
|
func use(p unsafe.Pointer) {
|
||||||
|
temp = p
|
||||||
|
}
|
|
@ -0,0 +1,19 @@
|
||||||
|
// +build !windows
|
||||||
|
|
||||||
|
package system
|
||||||
|
|
||||||
|
import (
|
||||||
|
"os"
|
||||||
|
"path/filepath"
|
||||||
|
)
|
||||||
|
|
||||||
|
// MkdirAll creates a directory named path along with any necessary parents,
|
||||||
|
// with permission specified by attribute perm for all dir created.
|
||||||
|
func MkdirAll(path string, perm os.FileMode) error {
|
||||||
|
return os.MkdirAll(path, perm)
|
||||||
|
}
|
||||||
|
|
||||||
|
// IsAbs is a platform-specific wrapper for filepath.IsAbs.
|
||||||
|
func IsAbs(path string) bool {
|
||||||
|
return filepath.IsAbs(path)
|
||||||
|
}
|
|
@ -0,0 +1,82 @@
|
||||||
|
// +build windows
|
||||||
|
|
||||||
|
package system
|
||||||
|
|
||||||
|
import (
|
||||||
|
"os"
|
||||||
|
"path/filepath"
|
||||||
|
"regexp"
|
||||||
|
"strings"
|
||||||
|
"syscall"
|
||||||
|
)
|
||||||
|
|
||||||
|
// MkdirAll implementation that is volume path aware for Windows.
|
||||||
|
func MkdirAll(path string, perm os.FileMode) error {
|
||||||
|
if re := regexp.MustCompile(`^\\\\\?\\Volume{[a-z0-9-]+}$`); re.MatchString(path) {
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// The rest of this method is copied from os.MkdirAll and should be kept
|
||||||
|
// as-is to ensure compatibility.
|
||||||
|
|
||||||
|
// Fast path: if we can tell whether path is a directory or file, stop with success or error.
|
||||||
|
dir, err := os.Stat(path)
|
||||||
|
if err == nil {
|
||||||
|
if dir.IsDir() {
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
return &os.PathError{
|
||||||
|
Op: "mkdir",
|
||||||
|
Path: path,
|
||||||
|
Err: syscall.ENOTDIR,
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// Slow path: make sure parent exists and then call Mkdir for path.
|
||||||
|
i := len(path)
|
||||||
|
for i > 0 && os.IsPathSeparator(path[i-1]) { // Skip trailing path separator.
|
||||||
|
i--
|
||||||
|
}
|
||||||
|
|
||||||
|
j := i
|
||||||
|
for j > 0 && !os.IsPathSeparator(path[j-1]) { // Scan backward over element.
|
||||||
|
j--
|
||||||
|
}
|
||||||
|
|
||||||
|
if j > 1 {
|
||||||
|
// Create parent
|
||||||
|
err = MkdirAll(path[0:j-1], perm)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// Parent now exists; invoke Mkdir and use its result.
|
||||||
|
err = os.Mkdir(path, perm)
|
||||||
|
if err != nil {
|
||||||
|
// Handle arguments like "foo/." by
|
||||||
|
// double-checking that directory doesn't exist.
|
||||||
|
dir, err1 := os.Lstat(path)
|
||||||
|
if err1 == nil && dir.IsDir() {
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// IsAbs is a platform-specific wrapper for filepath.IsAbs. On Windows,
|
||||||
|
// golang filepath.IsAbs does not consider a path \windows\system32 as absolute
|
||||||
|
// as it doesn't start with a drive-letter/colon combination. However, in
|
||||||
|
// docker we need to verify things such as WORKDIR /windows/system32 in
|
||||||
|
// a Dockerfile (which gets translated to \windows\system32 when being processed
|
||||||
|
// by the daemon. This SHOULD be treated as absolute from a docker processing
|
||||||
|
// perspective.
|
||||||
|
func IsAbs(path string) bool {
|
||||||
|
if !filepath.IsAbs(path) {
|
||||||
|
if !strings.HasPrefix(path, string(os.PathSeparator)) {
|
||||||
|
return false
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return true
|
||||||
|
}
|
|
@ -0,0 +1,19 @@
|
||||||
|
// +build !windows
|
||||||
|
|
||||||
|
package system
|
||||||
|
|
||||||
|
import (
|
||||||
|
"syscall"
|
||||||
|
)
|
||||||
|
|
||||||
|
// Lstat takes a path to a file and returns
|
||||||
|
// a system.StatT type pertaining to that file.
|
||||||
|
//
|
||||||
|
// Throws an error if the file does not exist
|
||||||
|
func Lstat(path string) (*StatT, error) {
|
||||||
|
s := &syscall.Stat_t{}
|
||||||
|
if err := syscall.Lstat(path, s); err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
return fromStatT(s)
|
||||||
|
}
|
|
@ -0,0 +1,25 @@
|
||||||
|
// +build windows
|
||||||
|
|
||||||
|
package system
|
||||||
|
|
||||||
|
import (
|
||||||
|
"os"
|
||||||
|
)
|
||||||
|
|
||||||
|
// Lstat calls os.Lstat to get a fileinfo interface back.
|
||||||
|
// This is then copied into our own locally defined structure.
|
||||||
|
// Note the Linux version uses fromStatT to do the copy back,
|
||||||
|
// but that not strictly necessary when already in an OS specific module.
|
||||||
|
func Lstat(path string) (*StatT, error) {
|
||||||
|
fi, err := os.Lstat(path)
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
|
||||||
|
return &StatT{
|
||||||
|
name: fi.Name(),
|
||||||
|
size: fi.Size(),
|
||||||
|
mode: fi.Mode(),
|
||||||
|
modTime: fi.ModTime(),
|
||||||
|
isDir: fi.IsDir()}, nil
|
||||||
|
}
|
|
@ -0,0 +1,17 @@
|
||||||
|
package system
|
||||||
|
|
||||||
|
// MemInfo contains memory statistics of the host system.
|
||||||
|
type MemInfo struct {
|
||||||
|
// Total usable RAM (i.e. physical RAM minus a few reserved bits and the
|
||||||
|
// kernel binary code).
|
||||||
|
MemTotal int64
|
||||||
|
|
||||||
|
// Amount of free memory.
|
||||||
|
MemFree int64
|
||||||
|
|
||||||
|
// Total amount of swap space available.
|
||||||
|
SwapTotal int64
|
||||||
|
|
||||||
|
// Amount of swap space that is currently unused.
|
||||||
|
SwapFree int64
|
||||||
|
}
|
|
@ -0,0 +1,65 @@
|
||||||
|
package system
|
||||||
|
|
||||||
|
import (
|
||||||
|
"bufio"
|
||||||
|
"io"
|
||||||
|
"os"
|
||||||
|
"strconv"
|
||||||
|
"strings"
|
||||||
|
|
||||||
|
"github.com/docker/go-units"
|
||||||
|
)
|
||||||
|
|
||||||
|
// ReadMemInfo retrieves memory statistics of the host system and returns a
|
||||||
|
// MemInfo type.
|
||||||
|
func ReadMemInfo() (*MemInfo, error) {
|
||||||
|
file, err := os.Open("/proc/meminfo")
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
defer file.Close()
|
||||||
|
return parseMemInfo(file)
|
||||||
|
}
|
||||||
|
|
||||||
|
// parseMemInfo parses the /proc/meminfo file into
|
||||||
|
// a MemInfo object given an io.Reader to the file.
|
||||||
|
// Throws error if there are problems reading from the file
|
||||||
|
func parseMemInfo(reader io.Reader) (*MemInfo, error) {
|
||||||
|
meminfo := &MemInfo{}
|
||||||
|
scanner := bufio.NewScanner(reader)
|
||||||
|
for scanner.Scan() {
|
||||||
|
// Expected format: ["MemTotal:", "1234", "kB"]
|
||||||
|
parts := strings.Fields(scanner.Text())
|
||||||
|
|
||||||
|
// Sanity checks: Skip malformed entries.
|
||||||
|
if len(parts) < 3 || parts[2] != "kB" {
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
|
||||||
|
// Convert to bytes.
|
||||||
|
size, err := strconv.Atoi(parts[1])
|
||||||
|
if err != nil {
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
bytes := int64(size) * units.KiB
|
||||||
|
|
||||||
|
switch parts[0] {
|
||||||
|
case "MemTotal:":
|
||||||
|
meminfo.MemTotal = bytes
|
||||||
|
case "MemFree:":
|
||||||
|
meminfo.MemFree = bytes
|
||||||
|
case "SwapTotal:":
|
||||||
|
meminfo.SwapTotal = bytes
|
||||||
|
case "SwapFree:":
|
||||||
|
meminfo.SwapFree = bytes
|
||||||
|
}
|
||||||
|
|
||||||
|
}
|
||||||
|
|
||||||
|
// Handle errors that may have occurred during the reading of the file.
|
||||||
|
if err := scanner.Err(); err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
|
||||||
|
return meminfo, nil
|
||||||
|
}
|
|
@ -0,0 +1,128 @@
|
||||||
|
// +build solaris,cgo
|
||||||
|
|
||||||
|
package system
|
||||||
|
|
||||||
|
import (
|
||||||
|
"fmt"
|
||||||
|
"unsafe"
|
||||||
|
)
|
||||||
|
|
||||||
|
// #cgo LDFLAGS: -lkstat
|
||||||
|
// #include <unistd.h>
|
||||||
|
// #include <stdlib.h>
|
||||||
|
// #include <stdio.h>
|
||||||
|
// #include <kstat.h>
|
||||||
|
// #include <sys/swap.h>
|
||||||
|
// #include <sys/param.h>
|
||||||
|
// struct swaptable *allocSwaptable(int num) {
|
||||||
|
// struct swaptable *st;
|
||||||
|
// struct swapent *swapent;
|
||||||
|
// st = (struct swaptable *)malloc(num * sizeof(swapent_t) + sizeof (int));
|
||||||
|
// swapent = st->swt_ent;
|
||||||
|
// for (int i = 0; i < num; i++,swapent++) {
|
||||||
|
// swapent->ste_path = (char *)malloc(MAXPATHLEN * sizeof (char));
|
||||||
|
// }
|
||||||
|
// st->swt_n = num;
|
||||||
|
// return st;
|
||||||
|
//}
|
||||||
|
// void freeSwaptable (struct swaptable *st) {
|
||||||
|
// struct swapent *swapent = st->swt_ent;
|
||||||
|
// for (int i = 0; i < st->swt_n; i++,swapent++) {
|
||||||
|
// free(swapent->ste_path);
|
||||||
|
// }
|
||||||
|
// free(st);
|
||||||
|
// }
|
||||||
|
// swapent_t getSwapEnt(swapent_t *ent, int i) {
|
||||||
|
// return ent[i];
|
||||||
|
// }
|
||||||
|
// int64_t getPpKernel() {
|
||||||
|
// int64_t pp_kernel = 0;
|
||||||
|
// kstat_ctl_t *ksc;
|
||||||
|
// kstat_t *ks;
|
||||||
|
// kstat_named_t *knp;
|
||||||
|
// kid_t kid;
|
||||||
|
//
|
||||||
|
// if ((ksc = kstat_open()) == NULL) {
|
||||||
|
// return -1;
|
||||||
|
// }
|
||||||
|
// if ((ks = kstat_lookup(ksc, "unix", 0, "system_pages")) == NULL) {
|
||||||
|
// return -1;
|
||||||
|
// }
|
||||||
|
// if (((kid = kstat_read(ksc, ks, NULL)) == -1) ||
|
||||||
|
// ((knp = kstat_data_lookup(ks, "pp_kernel")) == NULL)) {
|
||||||
|
// return -1;
|
||||||
|
// }
|
||||||
|
// switch (knp->data_type) {
|
||||||
|
// case KSTAT_DATA_UINT64:
|
||||||
|
// pp_kernel = knp->value.ui64;
|
||||||
|
// break;
|
||||||
|
// case KSTAT_DATA_UINT32:
|
||||||
|
// pp_kernel = knp->value.ui32;
|
||||||
|
// break;
|
||||||
|
// }
|
||||||
|
// pp_kernel *= sysconf(_SC_PAGESIZE);
|
||||||
|
// return (pp_kernel > 0 ? pp_kernel : -1);
|
||||||
|
// }
|
||||||
|
import "C"
|
||||||
|
|
||||||
|
// Get the system memory info using sysconf same as prtconf
|
||||||
|
func getTotalMem() int64 {
|
||||||
|
pagesize := C.sysconf(C._SC_PAGESIZE)
|
||||||
|
npages := C.sysconf(C._SC_PHYS_PAGES)
|
||||||
|
return int64(pagesize * npages)
|
||||||
|
}
|
||||||
|
|
||||||
|
func getFreeMem() int64 {
|
||||||
|
pagesize := C.sysconf(C._SC_PAGESIZE)
|
||||||
|
npages := C.sysconf(C._SC_AVPHYS_PAGES)
|
||||||
|
return int64(pagesize * npages)
|
||||||
|
}
|
||||||
|
|
||||||
|
// ReadMemInfo retrieves memory statistics of the host system and returns a
|
||||||
|
// MemInfo type.
|
||||||
|
func ReadMemInfo() (*MemInfo, error) {
|
||||||
|
|
||||||
|
ppKernel := C.getPpKernel()
|
||||||
|
MemTotal := getTotalMem()
|
||||||
|
MemFree := getFreeMem()
|
||||||
|
SwapTotal, SwapFree, err := getSysSwap()
|
||||||
|
|
||||||
|
if ppKernel < 0 || MemTotal < 0 || MemFree < 0 || SwapTotal < 0 ||
|
||||||
|
SwapFree < 0 {
|
||||||
|
return nil, fmt.Errorf("Error getting system memory info %v\n", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
meminfo := &MemInfo{}
|
||||||
|
// Total memory is total physical memory less than memory locked by kernel
|
||||||
|
meminfo.MemTotal = MemTotal - int64(ppKernel)
|
||||||
|
meminfo.MemFree = MemFree
|
||||||
|
meminfo.SwapTotal = SwapTotal
|
||||||
|
meminfo.SwapFree = SwapFree
|
||||||
|
|
||||||
|
return meminfo, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func getSysSwap() (int64, int64, error) {
|
||||||
|
var tSwap int64
|
||||||
|
var fSwap int64
|
||||||
|
var diskblksPerPage int64
|
||||||
|
num, err := C.swapctl(C.SC_GETNSWP, nil)
|
||||||
|
if err != nil {
|
||||||
|
return -1, -1, err
|
||||||
|
}
|
||||||
|
st := C.allocSwaptable(num)
|
||||||
|
_, err = C.swapctl(C.SC_LIST, unsafe.Pointer(st))
|
||||||
|
if err != nil {
|
||||||
|
C.freeSwaptable(st)
|
||||||
|
return -1, -1, err
|
||||||
|
}
|
||||||
|
|
||||||
|
diskblksPerPage = int64(C.sysconf(C._SC_PAGESIZE) >> C.DEV_BSHIFT)
|
||||||
|
for i := 0; i < int(num); i++ {
|
||||||
|
swapent := C.getSwapEnt(&st.swt_ent[0], C.int(i))
|
||||||
|
tSwap += int64(swapent.ste_pages) * diskblksPerPage
|
||||||
|
fSwap += int64(swapent.ste_free) * diskblksPerPage
|
||||||
|
}
|
||||||
|
C.freeSwaptable(st)
|
||||||
|
return tSwap, fSwap, nil
|
||||||
|
}
|
|
@ -0,0 +1,8 @@
|
||||||
|
// +build !linux,!windows,!solaris
|
||||||
|
|
||||||
|
package system
|
||||||
|
|
||||||
|
// ReadMemInfo is not supported on platforms other than linux and windows.
|
||||||
|
func ReadMemInfo() (*MemInfo, error) {
|
||||||
|
return nil, ErrNotSupportedPlatform
|
||||||
|
}
|
|
@ -0,0 +1,44 @@
|
||||||
|
package system
|
||||||
|
|
||||||
|
import (
|
||||||
|
"syscall"
|
||||||
|
"unsafe"
|
||||||
|
)
|
||||||
|
|
||||||
|
var (
|
||||||
|
modkernel32 = syscall.NewLazyDLL("kernel32.dll")
|
||||||
|
|
||||||
|
procGlobalMemoryStatusEx = modkernel32.NewProc("GlobalMemoryStatusEx")
|
||||||
|
)
|
||||||
|
|
||||||
|
// https://msdn.microsoft.com/en-us/library/windows/desktop/aa366589(v=vs.85).aspx
|
||||||
|
// https://msdn.microsoft.com/en-us/library/windows/desktop/aa366770(v=vs.85).aspx
|
||||||
|
type memorystatusex struct {
|
||||||
|
dwLength uint32
|
||||||
|
dwMemoryLoad uint32
|
||||||
|
ullTotalPhys uint64
|
||||||
|
ullAvailPhys uint64
|
||||||
|
ullTotalPageFile uint64
|
||||||
|
ullAvailPageFile uint64
|
||||||
|
ullTotalVirtual uint64
|
||||||
|
ullAvailVirtual uint64
|
||||||
|
ullAvailExtendedVirtual uint64
|
||||||
|
}
|
||||||
|
|
||||||
|
// ReadMemInfo retrieves memory statistics of the host system and returns a
|
||||||
|
// MemInfo type.
|
||||||
|
func ReadMemInfo() (*MemInfo, error) {
|
||||||
|
msi := &memorystatusex{
|
||||||
|
dwLength: 64,
|
||||||
|
}
|
||||||
|
r1, _, _ := procGlobalMemoryStatusEx.Call(uintptr(unsafe.Pointer(msi)))
|
||||||
|
if r1 == 0 {
|
||||||
|
return &MemInfo{}, nil
|
||||||
|
}
|
||||||
|
return &MemInfo{
|
||||||
|
MemTotal: int64(msi.ullTotalPhys),
|
||||||
|
MemFree: int64(msi.ullAvailPhys),
|
||||||
|
SwapTotal: int64(msi.ullTotalPageFile),
|
||||||
|
SwapFree: int64(msi.ullAvailPageFile),
|
||||||
|
}, nil
|
||||||
|
}
|
Some files were not shown because too many files have changed in this diff Show More
Loading…
Reference in New Issue