From a0b70b0ec79939c2a6901d9e6972ef0cc0eac80d Mon Sep 17 00:00:00 2001 From: James Bardin Date: Thu, 19 Jan 2017 14:10:52 -0500 Subject: [PATCH 01/12] Sync the vendor folder with the manifest A number of PRs have come through which modified the vendor folder without recording the proper information. This resets everything back to the recorded version. --- .../CenturyLinkCloud/clc-sdk/.coveralls.yml | 1 - .../CenturyLinkCloud/clc-sdk/.gitignore | 3 - .../CenturyLinkCloud/clc-sdk/.travis.yml | 8 - .../CenturyLinkCloud/clc-sdk/cover.sh | 0 vendor/github.com/Unknwon/com/.gitignore | 24 - vendor/github.com/Unknwon/com/.travis.yml | 13 - vendor/github.com/Unknwon/macaron/.gitignore | 2 - .../apparentlymart/go-grafana-api/.gitignore | 24 - .../go-rundeck-api/rundeck/job.go | 129 +- .../go-rundeck-api/rundeck/job_test.go | 314 ----- vendor/github.com/armon/circbuf/.gitignore | 22 - vendor/github.com/armon/circbuf/LICENSE | 0 vendor/github.com/armon/go-radix/.gitignore | 22 - vendor/github.com/armon/go-radix/.travis.yml | 3 - .../aws-sdk-go/private/endpoints/endpoints.go | 70 ++ .../private/endpoints/endpoints.json | 82 ++ .../private/endpoints/endpoints_map.go | 95 ++ .../github.com/bgentry/speakeasy/.gitignore | 2 - vendor/github.com/cenkalti/backoff/.gitignore | 22 - .../github.com/cenkalti/backoff/.travis.yml | 2 - .../cihub/seelog/archive/archive_test.go | 178 --- .../cihub/seelog/archive/tar/tar_test.go | 104 -- .../cihub/seelog/archive/zip/zip_test.go | 99 -- .../davecgh/go-spew/spew/bypasssafe.go | 37 - vendor/github.com/fatih/structs/.gitignore | 23 - vendor/github.com/fatih/structs/.travis.yml | 11 - .../fsouza/go-dockerclient/.gitignore | 2 - .../fsouza/go-dockerclient/.travis.yml | 27 - .../github.com/fsouza/go-dockerclient/LICENSE | 2 +- .../golang.org/x/net/context/context.go | 2 +- .../external/golang.org/x/sys/unix/mkall.sh | 0 .../golang.org/x/sys/unix/mkerrors.sh | 0 .../golang.org/x/sys/unix/mksyscall.pl | 0 .../x/sys/unix/mksyscall_solaris.pl | 0 .../golang.org/x/sys/unix/mksysctl_openbsd.pl | 0 .../golang.org/x/sys/unix/mksysnum_darwin.pl | 0 .../x/sys/unix/mksysnum_dragonfly.pl | 0 .../golang.org/x/sys/unix/mksysnum_freebsd.pl | 0 .../golang.org/x/sys/unix/mksysnum_linux.pl | 0 .../golang.org/x/sys/unix/mksysnum_netbsd.pl | 0 .../golang.org/x/sys/unix/mksysnum_openbsd.pl | 0 .../external/golang.org/x/sys/unix/syscall.go | 2 +- vendor/github.com/go-ini/ini/.gitignore | 4 - .../golang/protobuf/proto/pointer_reflect.go | 479 -------- vendor/github.com/gosimple/slug/.gitignore | 2 - .../github.com/hashicorp/go-plugin/.gitignore | 1 - .../hashicorp/go-rootcerts/.travis.yml | 12 - .../hashicorp/go-version/.travis.yml | 11 - .../github.com/hashicorp/logutils/.gitignore | 22 - .../nomad/client/allocdir/alloc_dir.go | 399 ++++++ .../nomad/client/allocdir/alloc_dir_darwin.go | 26 + .../client/allocdir/alloc_dir_freebsd.go | 26 + .../nomad/client/allocdir/alloc_dir_linux.go | 79 ++ .../nomad/client/allocdir/alloc_dir_unix.go | 81 ++ .../client/allocdir/alloc_dir_windows.go | 45 + .../hashicorp/nomad/client/config/config.go | 221 ++++ .../hashicorp/nomad/client/driver/docker.go | 1085 +++++++++++++++++ .../nomad/client/driver/docker_default.go | 14 + .../nomad/client/driver/docker_windows.go | 13 + .../hashicorp/nomad/client/driver/driver.go | 192 +++ .../hashicorp/nomad/client/driver/env/env.go | 430 +++++++ .../hashicorp/nomad/client/driver/exec.go | 319 +++++ .../nomad/client/driver/exec_default.go | 12 + .../nomad/client/driver/exec_linux.go | 34 + .../nomad/client/driver/executor/checks.go | 206 ++++ .../client/driver/executor/checks_unix.go | 18 + .../client/driver/executor/checks_windows.go | 8 + .../nomad/client/driver/executor/executor.go | 856 +++++++++++++ .../client/driver/executor/executor_basic.go | 46 + .../client/driver/executor/executor_linux.go | 373 ++++++ .../client/driver/executor/executor_unix.go | 50 + .../driver/executor/executor_windows.go | 5 + .../executor/resource_container_default.go | 24 + .../executor/resource_container_linux.go | 42 + .../nomad/client/driver/executor_plugin.go | 181 +++ .../hashicorp/nomad/client/driver/java.go | 416 +++++++ .../driver/logging/collector_windows.go | 71 ++ .../nomad/client/driver/logging/rotator.go | 285 +++++ .../driver/logging/syslog_parser_unix.go | 158 +++ .../driver/logging/syslog_server_unix.go | 86 ++ .../driver/logging/syslog_server_windows.go | 10 + .../logging/universal_collector_unix.go | 207 ++++ .../hashicorp/nomad/client/driver/plugins.go | 51 + .../hashicorp/nomad/client/driver/qemu.go | 412 +++++++ .../hashicorp/nomad/client/driver/raw_exec.go | 307 +++++ .../hashicorp/nomad/client/driver/rkt.go | 436 +++++++ .../nomad/client/driver/structs/structs.go | 77 ++ .../client/driver/structs/structs_default.go | 12 + .../client/driver/structs/structs_linux.go | 10 + .../nomad/client/driver/syslog_plugin.go | 69 ++ .../hashicorp/nomad/client/driver/utils.go | 170 +++ .../nomad/client/driver/utils_unix.go | 18 + .../nomad/client/driver/utils_windows.go | 9 + .../nomad/client/fingerprint/arch.go | 26 + .../nomad/client/fingerprint/cgroup.go | 59 + .../nomad/client/fingerprint/cgroup_linux.go | 57 + .../nomad/client/fingerprint/consul.go | 100 ++ .../hashicorp/nomad/client/fingerprint/cpu.go | 52 + .../nomad/client/fingerprint/env_aws.go | 250 ++++ .../nomad/client/fingerprint/env_gce.go | 270 ++++ .../nomad/client/fingerprint/fingerprint.go | 87 ++ .../client/fingerprint/fingerprint_default.go | 6 + .../client/fingerprint/fingerprint_linux.go | 5 + .../nomad/client/fingerprint/host.go | 51 + .../nomad/client/fingerprint/memory.go | 43 + .../nomad/client/fingerprint/network.go | 167 +++ .../client/fingerprint/network_default.go | 8 + .../nomad/client/fingerprint/network_linux.go | 78 ++ .../client/fingerprint/network_windows.go | 52 + .../nomad/client/fingerprint/nomad.go | 26 + .../nomad/client/fingerprint/storage.go | 59 + .../nomad/client/fingerprint/storage_unix.go | 64 + .../client/fingerprint/storage_windows.go | 33 + .../client/fingerprint/zstorage_windows.go | 26 + .../hashicorp/nomad/client/stats/cpu.go | 62 + .../hashicorp/nomad/client/stats/host.go | 187 +++ .../hashicorp/nomad/client/structs/structs.go | 97 ++ .../nomad/command/agent/consul/check.go | 84 ++ .../nomad/command/agent/consul/syncer.go | 983 +++++++++++++++ .../nomad/helper/discover/discover.go | 60 + .../hashicorp/nomad/helper/fields/data.go | 169 +++ .../hashicorp/nomad/helper/fields/schema.go | 19 + .../hashicorp/nomad/helper/fields/type.go | 47 + .../hashicorp/nomad/helper/stats/cpu.go | 67 + .../hashicorp/nomad/jobspec/parse.go | 17 + .../nomad/nomad/structs/config/README.md | 7 + .../nomad/nomad/structs/config/consul.go | 190 +++ .../hashicorp/nomad/nomad/types/types.go | 3 + vendor/github.com/hashicorp/yamux/.gitignore | 23 - .../hmrc/vmware-govcd/.editorconfig | 26 - .../github.com/hmrc/vmware-govcd/.gitignore | 25 - .../github.com/hmrc/vmware-govcd/.travis.yml | 16 - vendor/github.com/imdario/mergo/.travis.yml | 2 - .../influxdata/influxdb/client/influxdb.go | 2 +- .../influxdata/influxdb/models/points.go | 2 +- .../influxdata/influxdb/pkg/escape/bytes.go | 2 +- .../jmespath/go-jmespath/.gitignore | 4 - .../jmespath/go-jmespath/.travis.yml | 9 - vendor/github.com/joyent/gocommon/.gitignore | 26 - .../jtopjian/cobblerclient/.gitignore | 1 - vendor/github.com/kardianos/osext/osext.go | 2 +- vendor/github.com/lib/pq/README.md | 105 ++ .../macaron-contrib/session/.gitignore | 2 - .../masterzen/simplexml/dom/document.go | 2 +- .../masterzen/simplexml/dom/element.go | 2 +- .../mattn/go-isatty/isatty_appengine.go | 9 - .../mitchellh/colorstring/.travis.yml | 15 - vendor/github.com/mitchellh/go-ps/LICENSE.md | 21 + vendor/github.com/mitchellh/go-ps/README.md | 34 + vendor/github.com/mitchellh/go-ps/Vagrantfile | 43 + vendor/github.com/mitchellh/go-ps/process.go | 40 + .../mitchellh/go-ps/process_darwin.go | 138 +++ .../mitchellh/go-ps/process_freebsd.go | 260 ++++ .../mitchellh/go-ps/process_unix.go | 129 ++ .../mitchellh/go-ps/process_windows.go | 119 ++ .../mitchellh/mapstructure/.travis.yml | 7 - vendor/github.com/nu7hatch/gouuid/.gitignore | 11 - .../github.com/pearkes/dnsimple/.travis.yml | 5 - vendor/github.com/pearkes/mailgun/.gitignore | 23 - vendor/github.com/pkg/errors/.gitignore | 24 - vendor/github.com/pkg/errors/.travis.yml | 10 - .../rackspace/gophercloud/CONTRIBUTING.md | 275 +++++ .../rackspace/gophercloud/CONTRIBUTORS.md | 13 + .../github.com/rackspace/gophercloud/LICENSE | 191 +++ .../rackspace/gophercloud/README.md | 160 +++ .../rackspace/gophercloud/UPGRADING.md | 338 +++++ .../rackspace/gophercloud/auth_options.go | 55 + .../rackspace/gophercloud/auth_results.go | 14 + .../github.com/rackspace/gophercloud/doc.go | 67 + .../rackspace/gophercloud/endpoint_search.go | 92 ++ .../gophercloud/openstack/auth_env.go | 61 + .../openstack/blockstorage/v1/volumes/doc.go | 5 + .../blockstorage/v1/volumes/requests.go | 236 ++++ .../blockstorage/v1/volumes/results.go | 113 ++ .../blockstorage/v1/volumes/testing/doc.go | 7 + .../v1/volumes/testing/fixtures.go | 115 ++ .../openstack/blockstorage/v1/volumes/urls.go | 23 + .../openstack/blockstorage/v1/volumes/util.go | 22 + .../rackspace/gophercloud/openstack/client.go | 346 ++++++ .../v2/extensions/volumeattach/testing/doc.go | 7 + .../volumeattach/testing/fixtures.go | 110 ++ .../openstack/endpoint_location.go | 91 ++ .../networking/v2/common/common_tests.go | 14 + .../rackspace/gophercloud/params.go | 271 ++++ .../rackspace/gophercloud/provider_client.go | 331 +++++ .../rackspace/gophercloud/results.go | 153 +++ .../rackspace/gophercloud/service_client.go | 32 + .../github.com/rackspace/gophercloud/util.go | 82 ++ .../github.com/rainycape/unidecode/.gitignore | 23 - .../github.com/ryanuber/columnize/.travis.yml | 3 - vendor/github.com/satori/go.uuid/.travis.yml | 11 - .../scaleway/scaleway-cli/pkg/api/helpers.go | 671 ++++++++++ .../github.com/sean-/postgresql-acl/README.md | 97 ++ .../github.com/soniah/dnsmadeeasy/.gitignore | 28 - .../github.com/soniah/dnsmadeeasy/.travis.yml | 13 - .../github.com/tent/http-link-go/.gitignore | 1 - .../github.com/tent/http-link-go/.travis.yml | 6 - vendor/github.com/vmware/govmomi/.drone.sec | 1 - vendor/github.com/vmware/govmomi/.drone.yml | 17 - vendor/github.com/vmware/govmomi/.gitignore | 1 - vendor/github.com/vmware/govmomi/.travis.yml | 12 - vendor/github.com/xanzy/ssh-agent/.gitignore | 24 - .../x/crypto/curve25519/curve25519.go | 841 ------------- vendor/golang.org/x/crypto/curve25519/doc.go | 2 +- .../golang.org/x/crypto/ssh/agent/client.go | 2 +- vendor/golang.org/x/crypto/ssh/doc.go | 2 +- vendor/golang.org/x/net/context/context.go | 2 +- .../x/net/context/ctxhttp/ctxhttp.go | 2 +- vendor/golang.org/x/oauth2/.travis.yml | 14 - .../golang.org/x/oauth2/client_appengine.go | 25 - .../x/oauth2/google/appengine_hook.go | 13 - vendor/golang.org/x/oauth2/google/google.go | 2 +- vendor/golang.org/x/oauth2/jws/jws.go | 2 +- vendor/golang.org/x/oauth2/oauth2.go | 2 +- vendor/golang.org/x/sys/unix/.gitignore | 1 - vendor/golang.org/x/sys/unix/mkall.sh | 0 vendor/golang.org/x/sys/unix/mkerrors.sh | 0 vendor/golang.org/x/sys/unix/mksyscall.pl | 0 .../x/sys/unix/mksyscall_solaris.pl | 0 .../golang.org/x/sys/unix/mksysctl_openbsd.pl | 0 .../golang.org/x/sys/unix/mksysnum_darwin.pl | 0 .../x/sys/unix/mksysnum_dragonfly.pl | 0 .../golang.org/x/sys/unix/mksysnum_freebsd.pl | 0 .../golang.org/x/sys/unix/mksysnum_linux.pl | 0 .../golang.org/x/sys/unix/mksysnum_netbsd.pl | 0 .../golang.org/x/sys/unix/mksysnum_openbsd.pl | 0 vendor/golang.org/x/sys/unix/syscall.go | 2 +- .../google.golang.org/appengine/.travis.yml | 14 - .../google.golang.org/appengine/appengine.go | 2 +- .../appengine/internal/api_classic.go | 98 -- .../appengine/internal/identity_classic.go | 23 - .../appengine/internal/regen.sh | 0 .../cloud/compute/metadata/metadata.go | 2 +- vendor/gopkg.in/ini.v1/.gitignore | 4 - vendor/vendor.json | 9 +- 235 files changed, 16280 insertions(+), 3011 deletions(-) delete mode 100644 vendor/github.com/CenturyLinkCloud/clc-sdk/.coveralls.yml delete mode 100644 vendor/github.com/CenturyLinkCloud/clc-sdk/.gitignore delete mode 100644 vendor/github.com/CenturyLinkCloud/clc-sdk/.travis.yml mode change 100644 => 100755 vendor/github.com/CenturyLinkCloud/clc-sdk/cover.sh delete mode 100644 vendor/github.com/Unknwon/com/.gitignore delete mode 100644 vendor/github.com/Unknwon/com/.travis.yml delete mode 100644 vendor/github.com/Unknwon/macaron/.gitignore delete mode 100644 vendor/github.com/apparentlymart/go-grafana-api/.gitignore delete mode 100644 vendor/github.com/apparentlymart/go-rundeck-api/rundeck/job_test.go delete mode 100644 vendor/github.com/armon/circbuf/.gitignore mode change 100644 => 100755 vendor/github.com/armon/circbuf/LICENSE delete mode 100644 vendor/github.com/armon/go-radix/.gitignore delete mode 100644 vendor/github.com/armon/go-radix/.travis.yml create mode 100644 vendor/github.com/aws/aws-sdk-go/private/endpoints/endpoints.go create mode 100644 vendor/github.com/aws/aws-sdk-go/private/endpoints/endpoints.json create mode 100644 vendor/github.com/aws/aws-sdk-go/private/endpoints/endpoints_map.go delete mode 100644 vendor/github.com/bgentry/speakeasy/.gitignore delete mode 100644 vendor/github.com/cenkalti/backoff/.gitignore delete mode 100644 vendor/github.com/cenkalti/backoff/.travis.yml delete mode 100644 vendor/github.com/cihub/seelog/archive/archive_test.go delete mode 100644 vendor/github.com/cihub/seelog/archive/tar/tar_test.go delete mode 100644 vendor/github.com/cihub/seelog/archive/zip/zip_test.go delete mode 100644 vendor/github.com/davecgh/go-spew/spew/bypasssafe.go delete mode 100644 vendor/github.com/fatih/structs/.gitignore delete mode 100644 vendor/github.com/fatih/structs/.travis.yml delete mode 100644 vendor/github.com/fsouza/go-dockerclient/.gitignore delete mode 100644 vendor/github.com/fsouza/go-dockerclient/.travis.yml mode change 100644 => 100755 vendor/github.com/fsouza/go-dockerclient/external/golang.org/x/sys/unix/mkall.sh mode change 100644 => 100755 vendor/github.com/fsouza/go-dockerclient/external/golang.org/x/sys/unix/mkerrors.sh mode change 100644 => 100755 vendor/github.com/fsouza/go-dockerclient/external/golang.org/x/sys/unix/mksyscall.pl mode change 100644 => 100755 vendor/github.com/fsouza/go-dockerclient/external/golang.org/x/sys/unix/mksyscall_solaris.pl mode change 100644 => 100755 vendor/github.com/fsouza/go-dockerclient/external/golang.org/x/sys/unix/mksysctl_openbsd.pl mode change 100644 => 100755 vendor/github.com/fsouza/go-dockerclient/external/golang.org/x/sys/unix/mksysnum_darwin.pl mode change 100644 => 100755 vendor/github.com/fsouza/go-dockerclient/external/golang.org/x/sys/unix/mksysnum_dragonfly.pl mode change 100644 => 100755 vendor/github.com/fsouza/go-dockerclient/external/golang.org/x/sys/unix/mksysnum_freebsd.pl mode change 100644 => 100755 vendor/github.com/fsouza/go-dockerclient/external/golang.org/x/sys/unix/mksysnum_linux.pl mode change 100644 => 100755 vendor/github.com/fsouza/go-dockerclient/external/golang.org/x/sys/unix/mksysnum_netbsd.pl mode change 100644 => 100755 vendor/github.com/fsouza/go-dockerclient/external/golang.org/x/sys/unix/mksysnum_openbsd.pl delete mode 100644 vendor/github.com/go-ini/ini/.gitignore delete mode 100644 vendor/github.com/golang/protobuf/proto/pointer_reflect.go delete mode 100644 vendor/github.com/gosimple/slug/.gitignore delete mode 100644 vendor/github.com/hashicorp/go-plugin/.gitignore delete mode 100644 vendor/github.com/hashicorp/go-rootcerts/.travis.yml delete mode 100644 vendor/github.com/hashicorp/go-version/.travis.yml delete mode 100644 vendor/github.com/hashicorp/logutils/.gitignore create mode 100644 vendor/github.com/hashicorp/nomad/client/allocdir/alloc_dir.go create mode 100644 vendor/github.com/hashicorp/nomad/client/allocdir/alloc_dir_darwin.go create mode 100644 vendor/github.com/hashicorp/nomad/client/allocdir/alloc_dir_freebsd.go create mode 100644 vendor/github.com/hashicorp/nomad/client/allocdir/alloc_dir_linux.go create mode 100644 vendor/github.com/hashicorp/nomad/client/allocdir/alloc_dir_unix.go create mode 100644 vendor/github.com/hashicorp/nomad/client/allocdir/alloc_dir_windows.go create mode 100644 vendor/github.com/hashicorp/nomad/client/config/config.go create mode 100644 vendor/github.com/hashicorp/nomad/client/driver/docker.go create mode 100644 vendor/github.com/hashicorp/nomad/client/driver/docker_default.go create mode 100644 vendor/github.com/hashicorp/nomad/client/driver/docker_windows.go create mode 100644 vendor/github.com/hashicorp/nomad/client/driver/driver.go create mode 100644 vendor/github.com/hashicorp/nomad/client/driver/env/env.go create mode 100644 vendor/github.com/hashicorp/nomad/client/driver/exec.go create mode 100644 vendor/github.com/hashicorp/nomad/client/driver/exec_default.go create mode 100644 vendor/github.com/hashicorp/nomad/client/driver/exec_linux.go create mode 100644 vendor/github.com/hashicorp/nomad/client/driver/executor/checks.go create mode 100644 vendor/github.com/hashicorp/nomad/client/driver/executor/checks_unix.go create mode 100644 vendor/github.com/hashicorp/nomad/client/driver/executor/checks_windows.go create mode 100644 vendor/github.com/hashicorp/nomad/client/driver/executor/executor.go create mode 100644 vendor/github.com/hashicorp/nomad/client/driver/executor/executor_basic.go create mode 100644 vendor/github.com/hashicorp/nomad/client/driver/executor/executor_linux.go create mode 100644 vendor/github.com/hashicorp/nomad/client/driver/executor/executor_unix.go create mode 100644 vendor/github.com/hashicorp/nomad/client/driver/executor/executor_windows.go create mode 100644 vendor/github.com/hashicorp/nomad/client/driver/executor/resource_container_default.go create mode 100644 vendor/github.com/hashicorp/nomad/client/driver/executor/resource_container_linux.go create mode 100644 vendor/github.com/hashicorp/nomad/client/driver/executor_plugin.go create mode 100644 vendor/github.com/hashicorp/nomad/client/driver/java.go create mode 100644 vendor/github.com/hashicorp/nomad/client/driver/logging/collector_windows.go create mode 100644 vendor/github.com/hashicorp/nomad/client/driver/logging/rotator.go create mode 100644 vendor/github.com/hashicorp/nomad/client/driver/logging/syslog_parser_unix.go create mode 100644 vendor/github.com/hashicorp/nomad/client/driver/logging/syslog_server_unix.go create mode 100644 vendor/github.com/hashicorp/nomad/client/driver/logging/syslog_server_windows.go create mode 100644 vendor/github.com/hashicorp/nomad/client/driver/logging/universal_collector_unix.go create mode 100644 vendor/github.com/hashicorp/nomad/client/driver/plugins.go create mode 100644 vendor/github.com/hashicorp/nomad/client/driver/qemu.go create mode 100644 vendor/github.com/hashicorp/nomad/client/driver/raw_exec.go create mode 100644 vendor/github.com/hashicorp/nomad/client/driver/rkt.go create mode 100644 vendor/github.com/hashicorp/nomad/client/driver/structs/structs.go create mode 100644 vendor/github.com/hashicorp/nomad/client/driver/structs/structs_default.go create mode 100644 vendor/github.com/hashicorp/nomad/client/driver/structs/structs_linux.go create mode 100644 vendor/github.com/hashicorp/nomad/client/driver/syslog_plugin.go create mode 100644 vendor/github.com/hashicorp/nomad/client/driver/utils.go create mode 100644 vendor/github.com/hashicorp/nomad/client/driver/utils_unix.go create mode 100644 vendor/github.com/hashicorp/nomad/client/driver/utils_windows.go create mode 100644 vendor/github.com/hashicorp/nomad/client/fingerprint/arch.go create mode 100644 vendor/github.com/hashicorp/nomad/client/fingerprint/cgroup.go create mode 100644 vendor/github.com/hashicorp/nomad/client/fingerprint/cgroup_linux.go create mode 100644 vendor/github.com/hashicorp/nomad/client/fingerprint/consul.go create mode 100644 vendor/github.com/hashicorp/nomad/client/fingerprint/cpu.go create mode 100644 vendor/github.com/hashicorp/nomad/client/fingerprint/env_aws.go create mode 100644 vendor/github.com/hashicorp/nomad/client/fingerprint/env_gce.go create mode 100644 vendor/github.com/hashicorp/nomad/client/fingerprint/fingerprint.go create mode 100644 vendor/github.com/hashicorp/nomad/client/fingerprint/fingerprint_default.go create mode 100644 vendor/github.com/hashicorp/nomad/client/fingerprint/fingerprint_linux.go create mode 100644 vendor/github.com/hashicorp/nomad/client/fingerprint/host.go create mode 100644 vendor/github.com/hashicorp/nomad/client/fingerprint/memory.go create mode 100644 vendor/github.com/hashicorp/nomad/client/fingerprint/network.go create mode 100644 vendor/github.com/hashicorp/nomad/client/fingerprint/network_default.go create mode 100644 vendor/github.com/hashicorp/nomad/client/fingerprint/network_linux.go create mode 100644 vendor/github.com/hashicorp/nomad/client/fingerprint/network_windows.go create mode 100644 vendor/github.com/hashicorp/nomad/client/fingerprint/nomad.go create mode 100644 vendor/github.com/hashicorp/nomad/client/fingerprint/storage.go create mode 100644 vendor/github.com/hashicorp/nomad/client/fingerprint/storage_unix.go create mode 100644 vendor/github.com/hashicorp/nomad/client/fingerprint/storage_windows.go create mode 100644 vendor/github.com/hashicorp/nomad/client/fingerprint/zstorage_windows.go create mode 100644 vendor/github.com/hashicorp/nomad/client/stats/cpu.go create mode 100644 vendor/github.com/hashicorp/nomad/client/stats/host.go create mode 100644 vendor/github.com/hashicorp/nomad/client/structs/structs.go create mode 100644 vendor/github.com/hashicorp/nomad/command/agent/consul/check.go create mode 100644 vendor/github.com/hashicorp/nomad/command/agent/consul/syncer.go create mode 100644 vendor/github.com/hashicorp/nomad/helper/discover/discover.go create mode 100644 vendor/github.com/hashicorp/nomad/helper/fields/data.go create mode 100644 vendor/github.com/hashicorp/nomad/helper/fields/schema.go create mode 100644 vendor/github.com/hashicorp/nomad/helper/fields/type.go create mode 100644 vendor/github.com/hashicorp/nomad/helper/stats/cpu.go create mode 100644 vendor/github.com/hashicorp/nomad/nomad/structs/config/README.md create mode 100644 vendor/github.com/hashicorp/nomad/nomad/structs/config/consul.go create mode 100644 vendor/github.com/hashicorp/nomad/nomad/types/types.go delete mode 100644 vendor/github.com/hashicorp/yamux/.gitignore delete mode 100644 vendor/github.com/hmrc/vmware-govcd/.editorconfig delete mode 100644 vendor/github.com/hmrc/vmware-govcd/.gitignore delete mode 100644 vendor/github.com/hmrc/vmware-govcd/.travis.yml delete mode 100644 vendor/github.com/imdario/mergo/.travis.yml delete mode 100644 vendor/github.com/jmespath/go-jmespath/.gitignore delete mode 100644 vendor/github.com/jmespath/go-jmespath/.travis.yml delete mode 100644 vendor/github.com/joyent/gocommon/.gitignore delete mode 100644 vendor/github.com/jtopjian/cobblerclient/.gitignore create mode 100644 vendor/github.com/lib/pq/README.md delete mode 100644 vendor/github.com/macaron-contrib/session/.gitignore delete mode 100644 vendor/github.com/mattn/go-isatty/isatty_appengine.go delete mode 100644 vendor/github.com/mitchellh/colorstring/.travis.yml create mode 100644 vendor/github.com/mitchellh/go-ps/LICENSE.md create mode 100644 vendor/github.com/mitchellh/go-ps/README.md create mode 100644 vendor/github.com/mitchellh/go-ps/Vagrantfile create mode 100644 vendor/github.com/mitchellh/go-ps/process.go create mode 100644 vendor/github.com/mitchellh/go-ps/process_darwin.go create mode 100644 vendor/github.com/mitchellh/go-ps/process_freebsd.go create mode 100644 vendor/github.com/mitchellh/go-ps/process_unix.go create mode 100644 vendor/github.com/mitchellh/go-ps/process_windows.go delete mode 100644 vendor/github.com/mitchellh/mapstructure/.travis.yml delete mode 100644 vendor/github.com/nu7hatch/gouuid/.gitignore delete mode 100644 vendor/github.com/pearkes/dnsimple/.travis.yml delete mode 100644 vendor/github.com/pearkes/mailgun/.gitignore delete mode 100644 vendor/github.com/pkg/errors/.gitignore delete mode 100644 vendor/github.com/pkg/errors/.travis.yml create mode 100644 vendor/github.com/rackspace/gophercloud/CONTRIBUTING.md create mode 100644 vendor/github.com/rackspace/gophercloud/CONTRIBUTORS.md create mode 100644 vendor/github.com/rackspace/gophercloud/LICENSE create mode 100644 vendor/github.com/rackspace/gophercloud/README.md create mode 100644 vendor/github.com/rackspace/gophercloud/UPGRADING.md create mode 100644 vendor/github.com/rackspace/gophercloud/auth_options.go create mode 100644 vendor/github.com/rackspace/gophercloud/auth_results.go create mode 100644 vendor/github.com/rackspace/gophercloud/doc.go create mode 100644 vendor/github.com/rackspace/gophercloud/endpoint_search.go create mode 100644 vendor/github.com/rackspace/gophercloud/openstack/auth_env.go create mode 100644 vendor/github.com/rackspace/gophercloud/openstack/blockstorage/v1/volumes/doc.go create mode 100644 vendor/github.com/rackspace/gophercloud/openstack/blockstorage/v1/volumes/requests.go create mode 100644 vendor/github.com/rackspace/gophercloud/openstack/blockstorage/v1/volumes/results.go create mode 100644 vendor/github.com/rackspace/gophercloud/openstack/blockstorage/v1/volumes/testing/doc.go create mode 100644 vendor/github.com/rackspace/gophercloud/openstack/blockstorage/v1/volumes/testing/fixtures.go create mode 100644 vendor/github.com/rackspace/gophercloud/openstack/blockstorage/v1/volumes/urls.go create mode 100644 vendor/github.com/rackspace/gophercloud/openstack/blockstorage/v1/volumes/util.go create mode 100644 vendor/github.com/rackspace/gophercloud/openstack/client.go create mode 100644 vendor/github.com/rackspace/gophercloud/openstack/compute/v2/extensions/volumeattach/testing/doc.go create mode 100644 vendor/github.com/rackspace/gophercloud/openstack/compute/v2/extensions/volumeattach/testing/fixtures.go create mode 100644 vendor/github.com/rackspace/gophercloud/openstack/endpoint_location.go create mode 100644 vendor/github.com/rackspace/gophercloud/openstack/networking/v2/common/common_tests.go create mode 100644 vendor/github.com/rackspace/gophercloud/params.go create mode 100644 vendor/github.com/rackspace/gophercloud/provider_client.go create mode 100644 vendor/github.com/rackspace/gophercloud/results.go create mode 100644 vendor/github.com/rackspace/gophercloud/service_client.go create mode 100644 vendor/github.com/rackspace/gophercloud/util.go delete mode 100644 vendor/github.com/rainycape/unidecode/.gitignore delete mode 100644 vendor/github.com/ryanuber/columnize/.travis.yml delete mode 100644 vendor/github.com/satori/go.uuid/.travis.yml create mode 100644 vendor/github.com/scaleway/scaleway-cli/pkg/api/helpers.go create mode 100644 vendor/github.com/sean-/postgresql-acl/README.md delete mode 100644 vendor/github.com/soniah/dnsmadeeasy/.gitignore delete mode 100644 vendor/github.com/soniah/dnsmadeeasy/.travis.yml delete mode 100644 vendor/github.com/tent/http-link-go/.gitignore delete mode 100644 vendor/github.com/tent/http-link-go/.travis.yml delete mode 100644 vendor/github.com/vmware/govmomi/.drone.sec delete mode 100644 vendor/github.com/vmware/govmomi/.drone.yml delete mode 100644 vendor/github.com/vmware/govmomi/.gitignore delete mode 100644 vendor/github.com/vmware/govmomi/.travis.yml delete mode 100644 vendor/github.com/xanzy/ssh-agent/.gitignore delete mode 100644 vendor/golang.org/x/crypto/curve25519/curve25519.go delete mode 100644 vendor/golang.org/x/oauth2/.travis.yml delete mode 100644 vendor/golang.org/x/oauth2/client_appengine.go delete mode 100644 vendor/golang.org/x/oauth2/google/appengine_hook.go delete mode 100644 vendor/golang.org/x/sys/unix/.gitignore mode change 100644 => 100755 vendor/golang.org/x/sys/unix/mkall.sh mode change 100644 => 100755 vendor/golang.org/x/sys/unix/mkerrors.sh mode change 100644 => 100755 vendor/golang.org/x/sys/unix/mksyscall.pl mode change 100644 => 100755 vendor/golang.org/x/sys/unix/mksyscall_solaris.pl mode change 100644 => 100755 vendor/golang.org/x/sys/unix/mksysctl_openbsd.pl mode change 100644 => 100755 vendor/golang.org/x/sys/unix/mksysnum_darwin.pl mode change 100644 => 100755 vendor/golang.org/x/sys/unix/mksysnum_dragonfly.pl mode change 100644 => 100755 vendor/golang.org/x/sys/unix/mksysnum_freebsd.pl mode change 100644 => 100755 vendor/golang.org/x/sys/unix/mksysnum_linux.pl mode change 100644 => 100755 vendor/golang.org/x/sys/unix/mksysnum_netbsd.pl mode change 100644 => 100755 vendor/golang.org/x/sys/unix/mksysnum_openbsd.pl delete mode 100644 vendor/google.golang.org/appengine/.travis.yml delete mode 100644 vendor/google.golang.org/appengine/internal/api_classic.go delete mode 100644 vendor/google.golang.org/appengine/internal/identity_classic.go mode change 100644 => 100755 vendor/google.golang.org/appengine/internal/regen.sh delete mode 100644 vendor/gopkg.in/ini.v1/.gitignore diff --git a/vendor/github.com/CenturyLinkCloud/clc-sdk/.coveralls.yml b/vendor/github.com/CenturyLinkCloud/clc-sdk/.coveralls.yml deleted file mode 100644 index 91600595a..000000000 --- a/vendor/github.com/CenturyLinkCloud/clc-sdk/.coveralls.yml +++ /dev/null @@ -1 +0,0 @@ -service_name: travis-ci diff --git a/vendor/github.com/CenturyLinkCloud/clc-sdk/.gitignore b/vendor/github.com/CenturyLinkCloud/clc-sdk/.gitignore deleted file mode 100644 index 0ecc34548..000000000 --- a/vendor/github.com/CenturyLinkCloud/clc-sdk/.gitignore +++ /dev/null @@ -1,3 +0,0 @@ -*.swp -*.cov -Godeps/_workspace diff --git a/vendor/github.com/CenturyLinkCloud/clc-sdk/.travis.yml b/vendor/github.com/CenturyLinkCloud/clc-sdk/.travis.yml deleted file mode 100644 index 09431f95a..000000000 --- a/vendor/github.com/CenturyLinkCloud/clc-sdk/.travis.yml +++ /dev/null @@ -1,8 +0,0 @@ -language: go - -go: - - 1.5 - -install: make deps -script: - - make test diff --git a/vendor/github.com/CenturyLinkCloud/clc-sdk/cover.sh b/vendor/github.com/CenturyLinkCloud/clc-sdk/cover.sh old mode 100644 new mode 100755 diff --git a/vendor/github.com/Unknwon/com/.gitignore b/vendor/github.com/Unknwon/com/.gitignore deleted file mode 100644 index 0da157fe9..000000000 --- a/vendor/github.com/Unknwon/com/.gitignore +++ /dev/null @@ -1,24 +0,0 @@ -# Compiled Object files, Static and Dynamic libs (Shared Objects) -*.o -*.a -*.so - -# Folders -_obj -_test -.idea - -# Architecture specific extensions/prefixes -*.[568vq] -[568vq].out - -*.cgo1.go -*.cgo2.c -_cgo_defun.c -_cgo_gotypes.go -_cgo_export.* - -_testmain.go - -*.exe -*.iml diff --git a/vendor/github.com/Unknwon/com/.travis.yml b/vendor/github.com/Unknwon/com/.travis.yml deleted file mode 100644 index 882eb2d12..000000000 --- a/vendor/github.com/Unknwon/com/.travis.yml +++ /dev/null @@ -1,13 +0,0 @@ -language: go - -go: - - 1.2 - - 1.3 - - 1.4 - - tip - -install: go get -v -t - -notifications: - email: - - u@gogs.io \ No newline at end of file diff --git a/vendor/github.com/Unknwon/macaron/.gitignore b/vendor/github.com/Unknwon/macaron/.gitignore deleted file mode 100644 index 57ee3ec50..000000000 --- a/vendor/github.com/Unknwon/macaron/.gitignore +++ /dev/null @@ -1,2 +0,0 @@ -macaron.sublime-project -macaron.sublime-workspace \ No newline at end of file diff --git a/vendor/github.com/apparentlymart/go-grafana-api/.gitignore b/vendor/github.com/apparentlymart/go-grafana-api/.gitignore deleted file mode 100644 index daf913b1b..000000000 --- a/vendor/github.com/apparentlymart/go-grafana-api/.gitignore +++ /dev/null @@ -1,24 +0,0 @@ -# Compiled Object files, Static and Dynamic libs (Shared Objects) -*.o -*.a -*.so - -# Folders -_obj -_test - -# Architecture specific extensions/prefixes -*.[568vq] -[568vq].out - -*.cgo1.go -*.cgo2.c -_cgo_defun.c -_cgo_gotypes.go -_cgo_export.* - -_testmain.go - -*.exe -*.test -*.prof diff --git a/vendor/github.com/apparentlymart/go-rundeck-api/rundeck/job.go b/vendor/github.com/apparentlymart/go-rundeck-api/rundeck/job.go index 275db8501..ca04ac35a 100644 --- a/vendor/github.com/apparentlymart/go-rundeck-api/rundeck/job.go +++ b/vendor/github.com/apparentlymart/go-rundeck-api/rundeck/job.go @@ -30,57 +30,12 @@ type JobDetail struct { GroupName string `xml:"group,omitempty"` ProjectName string `xml:"context>project,omitempty"` OptionsConfig *JobOptions `xml:"context>options,omitempty"` - Description string `xml:"description"` + Description string `xml:"description,omitempty"` LogLevel string `xml:"loglevel,omitempty"` - AllowConcurrentExecutions bool `xml:"multipleExecutions,omitempty"` - Dispatch *JobDispatch `xml:"dispatch,omitempty"` + AllowConcurrentExecutions bool `xml:"multipleExecutions"` + Dispatch *JobDispatch `xml:"dispatch"` CommandSequence *JobCommandSequence `xml:"sequence,omitempty"` - Timeout string `xml:"timeout,omitempty"` - Retry string `xml:"retry,omitempty"` NodeFilter *JobNodeFilter `xml:"nodefilters,omitempty"` - - /* If Dispatch is enabled, nodesSelectedByDefault is always present with true/false. - * by this reason omitempty cannot be present. - * This has to be handle by the user. - */ - NodesSelectedByDefault bool `xml:"nodesSelectedByDefault"` - Schedule *JobSchedule `xml:"schedule,omitempty"` -} - -type JobSchedule struct { - XMLName xml.Name `xml:"schedule"` - DayOfMonth *JobScheduleDayOfMonth `xml:"dayofmonth,omitempty"` - Time JobScheduleTime `xml:"time"` - Month JobScheduleMonth `xml:"month"` - WeekDay *JobScheduleWeekDay `xml:"weekday,omitempty"` - Year JobScheduleYear `xml:"year"` -} - -type JobScheduleDayOfMonth struct { - XMLName xml.Name `xml:"dayofmonth"` -} - -type JobScheduleMonth struct { - XMLName xml.Name `xml:"month"` - Day string `xml:"day,attr,omitempty"` - Month string `xml:"month,attr"` -} - -type JobScheduleYear struct { - XMLName xml.Name `xml:"year"` - Year string `xml:"year,attr"` -} - -type JobScheduleWeekDay struct { - XMLName xml.Name `xml:"weekday"` - Day string `xml:"day,attr"` -} - -type JobScheduleTime struct { - XMLName xml.Name `xml:"time"` - Hour string `xml:"hour,attr"` - Minute string `xml:"minute,attr"` - Seconds string `xml:"seconds,attr"` } type jobDetailList struct { @@ -98,40 +53,13 @@ type JobOptions struct { type JobOption struct { XMLName xml.Name `xml:"option"` - // If AllowsMultipleChoices is set, the string that will be used to delimit the multiple - // chosen options. - MultiValueDelimiter string `xml:"delimiter,attr,omitempty"` - - // If set, Rundeck will reject values that are not in the set of predefined choices. - RequirePredefinedChoice bool `xml:"enforcedvalues,attr,omitempty"` - - // When either ValueChoices or ValueChoicesURL is set, controls whether more than one - // choice may be selected as the value. - AllowsMultipleValues bool `xml:"multivalued,attr,omitempty"` - // The name of the option, which can be used to interpolate its value // into job commands. Name string `xml:"name,attr,omitempty"` - // Regular expression to be used to validate the option value. - ValidationRegex string `xml:"regex,attr,omitempty"` - - // If set, Rundeck requires a value to be set for this option. - IsRequired bool `xml:"required,attr,omitempty"` - - // If set, the input for this field will be obscured in the UI. Useful for passwords - // and other secrets. - ObscureInput bool `xml:"secure,attr,omitempty"` - - // If ObscureInput is set, StoragePath can be used to point out credentials. - StoragePath string `xml:"storagePath,attr,omitempty"` - // The default value of the option. DefaultValue string `xml:"value,attr,omitempty"` - // If set, the value can be accessed from scripts. - ValueIsExposedToScripts bool `xml:"valueExposed,attr,omitempty"` - // A sequence of predefined choices for this option. Mutually exclusive with ValueChoicesURL. ValueChoices JobValueChoices `xml:"values,attr"` @@ -139,10 +67,33 @@ type JobOption struct { // Mutually exclusive with ValueChoices ValueChoicesURL string `xml:"valuesUrl,attr,omitempty"` + // If set, Rundeck will reject values that are not in the set of predefined choices. + RequirePredefinedChoice bool `xml:"enforcedvalues,attr,omitempty"` + + // Regular expression to be used to validate the option value. + ValidationRegex string `xml:"regex,attr,omitempty"` + // Description of the value to be shown in the Rundeck UI. Description string `xml:"description,omitempty"` -} + // If set, Rundeck requires a value to be set for this option. + IsRequired bool `xml:"required,attr,omitempty"` + + // When either ValueChoices or ValueChoicesURL is set, controls whether more than one + // choice may be selected as the value. + AllowsMultipleValues bool `xml:"multivalued,attr,omitempty"` + + // If AllowsMultipleChoices is set, the string that will be used to delimit the multiple + // chosen options. + MultiValueDelimiter string `xml:"delimeter,attr,omitempty"` + + // If set, the input for this field will be obscured in the UI. Useful for passwords + // and other secrets. + ObscureInput bool `xml:"secure,attr,omitempty"` + + // If set, the value can be accessed from scripts. + ValueIsExposedToScripts bool `xml:"valueExposed,attr,omitempty"` +} // JobValueChoices is a specialization of []string representing a sequence of predefined values // for a job option. @@ -161,9 +112,6 @@ type JobCommandSequence struct { // Sequence of commands to run in the sequence. Commands []JobCommand `xml:"command"` - - // Description - Description string `xml:"description,omitempty"` } // JobCommand describes a particular command to run within the sequence of commands on a job. @@ -172,21 +120,9 @@ type JobCommandSequence struct { type JobCommand struct { XMLName xml.Name - // If the Workflow keepgoing is false, this allows the Workflow to continue when the Error Handler is successful. - ContinueOnError bool `xml:"keepgoingOnSuccess,attr,omitempty"` - - // Description - Description string `xml:"description,omitempty"` - - // On error: - ErrorHandler *JobCommand `xml:"errorhandler,omitempty"` - // A literal shell command to run. ShellCommand string `xml:"exec,omitempty"` - // Add extension to the temporary filename. - FileExtension string `xml:"fileExtension,omitempty"` - // An inline program to run. This will be written to disk and executed, so if it is // a shell script it should have an appropriate #! line. Script string `xml:"script,omitempty"` @@ -197,9 +133,6 @@ type JobCommand struct { // When ScriptFile is set, the arguments to provide to the script when executing it. ScriptFileArgs string `xml:"scriptargs,omitempty"` - // ScriptInterpreter is used to execute (Script)File with. - ScriptInterpreter *JobCommandScriptInterpreter `xml:"scriptinterpreter,omitempty"` - // A reference to another job to run as this command. Job *JobCommandJobRef `xml:"jobref"` @@ -210,20 +143,12 @@ type JobCommand struct { NodeStepPlugin *JobPlugin `xml:"node-step-plugin"` } -// (Inline) Script interpreter -type JobCommandScriptInterpreter struct { - XMLName xml.Name `xml:"scriptinterpreter"` - InvocationString string `xml:",chardata"` - ArgsQuoted bool `xml:"argsquoted,attr,omitempty"` -} - // JobCommandJobRef is a reference to another job that will run as one of the commands of a job. type JobCommandJobRef struct { XMLName xml.Name `xml:"jobref"` Name string `xml:"name,attr"` GroupName string `xml:"group,attr"` RunForEachNode bool `xml:"nodeStep,attr"` - NodeFilter *JobNodeFilter `xml:"nodefilters,omitempty"` Arguments JobCommandJobRefArguments `xml:"arg"` } diff --git a/vendor/github.com/apparentlymart/go-rundeck-api/rundeck/job_test.go b/vendor/github.com/apparentlymart/go-rundeck-api/rundeck/job_test.go deleted file mode 100644 index aa54351b3..000000000 --- a/vendor/github.com/apparentlymart/go-rundeck-api/rundeck/job_test.go +++ /dev/null @@ -1,314 +0,0 @@ -package rundeck - -import ( - "fmt" - "testing" -) - -func TestUnmarshalJobDetail(t *testing.T) { - testUnmarshalXML(t, []unmarshalTest{ - unmarshalTest{ - "with-config", - `bazascending`, - &JobDetail{}, - func (rv interface {}) error { - v := rv.(*JobDetail) - if v.ID != "baz" { - return fmt.Errorf("got ID %s, but expecting baz", v.ID) - } - if v.Dispatch.RankOrder != "ascending" { - return fmt.Errorf("Dispatch.RankOrder = \"%v\", but expecting \"ascending\"", v.Dispatch.RankOrder) - } - return nil - }, - }, - unmarshalTest{ - "with-empty-config", - ``, - &JobPlugin{}, - func (rv interface {}) error { - v := rv.(*JobPlugin) - if v.Type != "foo-plugin" { - return fmt.Errorf("got Type %s, but expecting foo-plugin", v.Type) - } - if len(v.Config) != 0 { - return fmt.Errorf("got %i Config values, but expecting 0", len(v.Config)) - } - return nil - }, - }, - }) -} - -func TestMarshalJobPlugin(t *testing.T) { - testMarshalXML(t, []marshalTest{ - marshalTest{ - "with-config", - JobPlugin{ - Type: "foo-plugin", - Config: map[string]string{ - "woo": "foo", - "bar": "baz", - }, - }, - ``, - }, - marshalTest{ - "with-empty-config", - JobPlugin{ - Type: "foo-plugin", - Config: map[string]string{}, - }, - ``, - }, - marshalTest{ - "with-zero-value-config", - JobPlugin{ - Type: "foo-plugin", - }, - ``, - }, - }) -} - -func TestUnmarshalJobPlugin(t *testing.T) { - testUnmarshalXML(t, []unmarshalTest{ - unmarshalTest{ - "with-config", - ``, - &JobPlugin{}, - func (rv interface {}) error { - v := rv.(*JobPlugin) - if v.Type != "foo-plugin" { - return fmt.Errorf("got Type %s, but expecting foo-plugin", v.Type) - } - if len(v.Config) != 2 { - return fmt.Errorf("got %v Config values, but expecting 2", len(v.Config)) - } - if v.Config["woo"] != "foo" { - return fmt.Errorf("Config[\"woo\"] = \"%s\", but expecting \"foo\"", v.Config["woo"]) - } - if v.Config["bar"] != "baz" { - return fmt.Errorf("Config[\"bar\"] = \"%s\", but expecting \"baz\"", v.Config["bar"]) - } - return nil - }, - }, - unmarshalTest{ - "with-empty-config", - ``, - &JobPlugin{}, - func (rv interface {}) error { - v := rv.(*JobPlugin) - if v.Type != "foo-plugin" { - return fmt.Errorf("got Type %s, but expecting foo-plugin", v.Type) - } - if len(v.Config) != 0 { - return fmt.Errorf("got %i Config values, but expecting 0", len(v.Config)) - } - return nil - }, - }, - }) -} - -func TestMarshalJobCommand(t *testing.T) { - testMarshalXML(t, []marshalTest{ - marshalTest{ - "with-shell", - JobCommand{ - ShellCommand: "command", - }, - `command`, - }, - marshalTest{ - "with-script", - JobCommand{ - Script: "script", - }, - ``, - }, - marshalTest{ - "with-script-interpreter", - JobCommand{ - FileExtension: "sh", - Script: "Hello World!", - ScriptInterpreter: &JobCommandScriptInterpreter{ - InvocationString: "sudo", - }, - }, - `shsudo`, - }, - }) -} - -func TestUnmarshalJobCommand(t *testing.T) { - testUnmarshalXML(t, []unmarshalTest{ - unmarshalTest{ - "with-shell", - `command`, - &JobCommand{}, - func (rv interface {}) error { - v := rv.(*JobCommand) - if v.ShellCommand != "command" { - return fmt.Errorf("got ShellCommand %s, but expecting command", v.ShellCommand) - } - return nil - }, - }, - unmarshalTest{ - "with-script", - ``, - &JobCommand{}, - func (rv interface {}) error { - v := rv.(*JobCommand) - if v.Script != "script" { - return fmt.Errorf("got Script %s, but expecting script", v.Script) - } - return nil - }, - }, - unmarshalTest{ - "with-script-interpreter", - `shsudo`, - &JobCommand{}, - func (rv interface {}) error { - v := rv.(*JobCommand) - if v.FileExtension != "sh" { - return fmt.Errorf("got FileExtension %s, but expecting sh", v.FileExtension) - } - if v.Script != "Hello World!" { - return fmt.Errorf("got Script %s, but expecting Hello World!", v.Script) - } - if v.ScriptInterpreter == nil { - return fmt.Errorf("got %s, but expecting not nil", v.ScriptInterpreter) - } - if v.ScriptInterpreter.InvocationString != "sudo" { - return fmt.Errorf("got InvocationString %s, but expecting sudo", v.ScriptInterpreter.InvocationString) - } - return nil - }, - }, - }) -} - -func TestMarshalScriptInterpreter(t *testing.T) { - testMarshalXML(t, []marshalTest{ - marshalTest{ - "with-script-interpreter", - JobCommandScriptInterpreter{ - InvocationString: "sudo", - }, - `sudo`, - }, - marshalTest{ - "with-script-interpreter-quoted", - JobCommandScriptInterpreter{ - ArgsQuoted: true, - InvocationString: "sudo", - }, - `sudo`, - }, - }) -} - -func TestUnmarshalScriptInterpreter(t *testing.T) { - testUnmarshalXML(t, []unmarshalTest{ - unmarshalTest{ - "with-script-interpreter", - `sudo`, - &JobCommandScriptInterpreter{}, - func (rv interface {}) error { - v := rv.(*JobCommandScriptInterpreter) - if v.InvocationString != "sudo" { - return fmt.Errorf("got InvocationString %s, but expecting sudo", v.InvocationString) - } - if v.ArgsQuoted { - return fmt.Errorf("got ArgsQuoted %s, but expecting false", v.ArgsQuoted) - } - return nil - }, - }, - unmarshalTest{ - "with-script-interpreter-quoted", - `sudo`, - &JobCommandScriptInterpreter{}, - func (rv interface {}) error { - v := rv.(*JobCommandScriptInterpreter) - if v.InvocationString != "sudo" { - return fmt.Errorf("got InvocationString %s, but expecting sudo", v.InvocationString) - } - if ! v.ArgsQuoted { - return fmt.Errorf("got ArgsQuoted %s, but expecting true", v.ArgsQuoted) - } - return nil - }, - }, - }) -} - -func TestMarshalErrorHanlder(t *testing.T) { - testMarshalXML(t, []marshalTest{ - marshalTest{ - "with-errorhandler", - JobCommandSequence{ - ContinueOnError: true, - OrderingStrategy: "step-first", - Commands: []JobCommand{ - JobCommand{ - Script: "inline_script", - ErrorHandler: &JobCommand{ - ContinueOnError: true, - Script: "error_script", - }, - }, - }, - }, - ``, - }, - }) -} - - -func TestMarshalJobOption(t *testing.T) { - testMarshalXML(t, []marshalTest{ - marshalTest{ - "with-option-basic", - JobOption{ - Name: "basic", - }, - ``, - }, - marshalTest{ - "with-option-multivalued", - JobOption{ - Name: "Multivalued", - MultiValueDelimiter: "|", - RequirePredefinedChoice: true, - AllowsMultipleValues: true, - IsRequired: true, - ValueChoices: JobValueChoices([]string{"myValues"}), - }, - ``, - }, - marshalTest{ - "with-all-attributes", - JobOption{ - Name: "advanced", - MultiValueDelimiter: "|", - RequirePredefinedChoice: true, - AllowsMultipleValues: true, - ValidationRegex: ".+", - IsRequired: true, - ObscureInput: true, - StoragePath: "myKey", - DefaultValue: "myValue", - ValueIsExposedToScripts: true, - ValueChoices: JobValueChoices([]string{"myValues"}), - ValueChoicesURL: "myValuesUrl", - }, - ``, - }, - }) -} - diff --git a/vendor/github.com/armon/circbuf/.gitignore b/vendor/github.com/armon/circbuf/.gitignore deleted file mode 100644 index 00268614f..000000000 --- a/vendor/github.com/armon/circbuf/.gitignore +++ /dev/null @@ -1,22 +0,0 @@ -# Compiled Object files, Static and Dynamic libs (Shared Objects) -*.o -*.a -*.so - -# Folders -_obj -_test - -# Architecture specific extensions/prefixes -*.[568vq] -[568vq].out - -*.cgo1.go -*.cgo2.c -_cgo_defun.c -_cgo_gotypes.go -_cgo_export.* - -_testmain.go - -*.exe diff --git a/vendor/github.com/armon/circbuf/LICENSE b/vendor/github.com/armon/circbuf/LICENSE old mode 100644 new mode 100755 diff --git a/vendor/github.com/armon/go-radix/.gitignore b/vendor/github.com/armon/go-radix/.gitignore deleted file mode 100644 index 00268614f..000000000 --- a/vendor/github.com/armon/go-radix/.gitignore +++ /dev/null @@ -1,22 +0,0 @@ -# Compiled Object files, Static and Dynamic libs (Shared Objects) -*.o -*.a -*.so - -# Folders -_obj -_test - -# Architecture specific extensions/prefixes -*.[568vq] -[568vq].out - -*.cgo1.go -*.cgo2.c -_cgo_defun.c -_cgo_gotypes.go -_cgo_export.* - -_testmain.go - -*.exe diff --git a/vendor/github.com/armon/go-radix/.travis.yml b/vendor/github.com/armon/go-radix/.travis.yml deleted file mode 100644 index 1a0bbea6c..000000000 --- a/vendor/github.com/armon/go-radix/.travis.yml +++ /dev/null @@ -1,3 +0,0 @@ -language: go -go: - - tip diff --git a/vendor/github.com/aws/aws-sdk-go/private/endpoints/endpoints.go b/vendor/github.com/aws/aws-sdk-go/private/endpoints/endpoints.go new file mode 100644 index 000000000..19d97562f --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/private/endpoints/endpoints.go @@ -0,0 +1,70 @@ +// Package endpoints validates regional endpoints for services. +package endpoints + +//go:generate go run -tags codegen ../model/cli/gen-endpoints/main.go endpoints.json endpoints_map.go +//go:generate gofmt -s -w endpoints_map.go + +import ( + "fmt" + "regexp" + "strings" +) + +// NormalizeEndpoint takes and endpoint and service API information to return a +// normalized endpoint and signing region. If the endpoint is not an empty string +// the service name and region will be used to look up the service's API endpoint. +// If the endpoint is provided the scheme will be added if it is not present. +func NormalizeEndpoint(endpoint, serviceName, region string, disableSSL, useDualStack bool) (normEndpoint, signingRegion string) { + if endpoint == "" { + return EndpointForRegion(serviceName, region, disableSSL, useDualStack) + } + + return AddScheme(endpoint, disableSSL), "" +} + +// EndpointForRegion returns an endpoint and its signing region for a service and region. +// if the service and region pair are not found endpoint and signingRegion will be empty. +func EndpointForRegion(svcName, region string, disableSSL, useDualStack bool) (endpoint, signingRegion string) { + dualStackField := "" + if useDualStack { + dualStackField = "/dualstack" + } + + derivedKeys := []string{ + region + "/" + svcName + dualStackField, + region + "/*" + dualStackField, + "*/" + svcName + dualStackField, + "*/*" + dualStackField, + } + + for _, key := range derivedKeys { + if val, ok := endpointsMap.Endpoints[key]; ok { + ep := val.Endpoint + ep = strings.Replace(ep, "{region}", region, -1) + ep = strings.Replace(ep, "{service}", svcName, -1) + + endpoint = ep + signingRegion = val.SigningRegion + break + } + } + + return AddScheme(endpoint, disableSSL), signingRegion +} + +// Regular expression to determine if the endpoint string is prefixed with a scheme. +var schemeRE = regexp.MustCompile("^([^:]+)://") + +// AddScheme adds the HTTP or HTTPS schemes to a endpoint URL if there is no +// scheme. If disableSSL is true HTTP will be added instead of the default HTTPS. +func AddScheme(endpoint string, disableSSL bool) string { + if endpoint != "" && !schemeRE.MatchString(endpoint) { + scheme := "https" + if disableSSL { + scheme = "http" + } + endpoint = fmt.Sprintf("%s://%s", scheme, endpoint) + } + + return endpoint +} diff --git a/vendor/github.com/aws/aws-sdk-go/private/endpoints/endpoints.json b/vendor/github.com/aws/aws-sdk-go/private/endpoints/endpoints.json new file mode 100644 index 000000000..5594f2efd --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/private/endpoints/endpoints.json @@ -0,0 +1,82 @@ +{ + "version": 2, + "endpoints": { + "*/*": { + "endpoint": "{service}.{region}.amazonaws.com" + }, + "cn-north-1/*": { + "endpoint": "{service}.{region}.amazonaws.com.cn", + "signatureVersion": "v4" + }, + "cn-north-1/ec2metadata": { + "endpoint": "http://169.254.169.254/latest" + }, + "us-gov-west-1/iam": { + "endpoint": "iam.us-gov.amazonaws.com" + }, + "us-gov-west-1/sts": { + "endpoint": "sts.us-gov-west-1.amazonaws.com" + }, + "us-gov-west-1/s3": { + "endpoint": "s3-{region}.amazonaws.com" + }, + "us-gov-west-1/ec2metadata": { + "endpoint": "http://169.254.169.254/latest" + }, + "*/budgets": { + "endpoint": "budgets.amazonaws.com", + "signingRegion": "us-east-1" + }, + "*/cloudfront": { + "endpoint": "cloudfront.amazonaws.com", + "signingRegion": "us-east-1" + }, + "*/cloudsearchdomain": { + "endpoint": "", + "signingRegion": "us-east-1" + }, + "*/data.iot": { + "endpoint": "", + "signingRegion": "us-east-1" + }, + "*/ec2metadata": { + "endpoint": "http://169.254.169.254/latest" + }, + "*/iam": { + "endpoint": "iam.amazonaws.com", + "signingRegion": "us-east-1" + }, + "*/importexport": { + "endpoint": "importexport.amazonaws.com", + "signingRegion": "us-east-1" + }, + "*/route53": { + "endpoint": "route53.amazonaws.com", + "signingRegion": "us-east-1" + }, + "*/sts": { + "endpoint": "sts.amazonaws.com", + "signingRegion": "us-east-1" + }, + "*/waf": { + "endpoint": "waf.amazonaws.com", + "signingRegion": "us-east-1" + }, + "us-east-1/sdb": { + "endpoint": "sdb.amazonaws.com", + "signingRegion": "us-east-1" + }, + "*/s3": { + "endpoint": "s3-{region}.amazonaws.com" + }, + "*/s3/dualstack": { + "endpoint": "s3.dualstack.{region}.amazonaws.com" + }, + "us-east-1/s3": { + "endpoint": "s3.amazonaws.com" + }, + "eu-central-1/s3": { + "endpoint": "{service}.{region}.amazonaws.com" + } + } +} diff --git a/vendor/github.com/aws/aws-sdk-go/private/endpoints/endpoints_map.go b/vendor/github.com/aws/aws-sdk-go/private/endpoints/endpoints_map.go new file mode 100644 index 000000000..e79e6782a --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/private/endpoints/endpoints_map.go @@ -0,0 +1,95 @@ +package endpoints + +// THIS FILE IS AUTOMATICALLY GENERATED. DO NOT EDIT. + +type endpointStruct struct { + Version int + Endpoints map[string]endpointEntry +} + +type endpointEntry struct { + Endpoint string + SigningRegion string +} + +var endpointsMap = endpointStruct{ + Version: 2, + Endpoints: map[string]endpointEntry{ + "*/*": { + Endpoint: "{service}.{region}.amazonaws.com", + }, + "*/budgets": { + Endpoint: "budgets.amazonaws.com", + SigningRegion: "us-east-1", + }, + "*/cloudfront": { + Endpoint: "cloudfront.amazonaws.com", + SigningRegion: "us-east-1", + }, + "*/cloudsearchdomain": { + Endpoint: "", + SigningRegion: "us-east-1", + }, + "*/data.iot": { + Endpoint: "", + SigningRegion: "us-east-1", + }, + "*/ec2metadata": { + Endpoint: "http://169.254.169.254/latest", + }, + "*/iam": { + Endpoint: "iam.amazonaws.com", + SigningRegion: "us-east-1", + }, + "*/importexport": { + Endpoint: "importexport.amazonaws.com", + SigningRegion: "us-east-1", + }, + "*/route53": { + Endpoint: "route53.amazonaws.com", + SigningRegion: "us-east-1", + }, + "*/s3": { + Endpoint: "s3-{region}.amazonaws.com", + }, + "*/s3/dualstack": { + Endpoint: "s3.dualstack.{region}.amazonaws.com", + }, + "*/sts": { + Endpoint: "sts.amazonaws.com", + SigningRegion: "us-east-1", + }, + "*/waf": { + Endpoint: "waf.amazonaws.com", + SigningRegion: "us-east-1", + }, + "cn-north-1/*": { + Endpoint: "{service}.{region}.amazonaws.com.cn", + }, + "cn-north-1/ec2metadata": { + Endpoint: "http://169.254.169.254/latest", + }, + "eu-central-1/s3": { + Endpoint: "{service}.{region}.amazonaws.com", + }, + "us-east-1/s3": { + Endpoint: "s3.amazonaws.com", + }, + "us-east-1/sdb": { + Endpoint: "sdb.amazonaws.com", + SigningRegion: "us-east-1", + }, + "us-gov-west-1/ec2metadata": { + Endpoint: "http://169.254.169.254/latest", + }, + "us-gov-west-1/iam": { + Endpoint: "iam.us-gov.amazonaws.com", + }, + "us-gov-west-1/s3": { + Endpoint: "s3-{region}.amazonaws.com", + }, + "us-gov-west-1/sts": { + Endpoint: "sts.us-gov-west-1.amazonaws.com", + }, + }, +} diff --git a/vendor/github.com/bgentry/speakeasy/.gitignore b/vendor/github.com/bgentry/speakeasy/.gitignore deleted file mode 100644 index 9e1311461..000000000 --- a/vendor/github.com/bgentry/speakeasy/.gitignore +++ /dev/null @@ -1,2 +0,0 @@ -example/example -example/example.exe diff --git a/vendor/github.com/cenkalti/backoff/.gitignore b/vendor/github.com/cenkalti/backoff/.gitignore deleted file mode 100644 index 00268614f..000000000 --- a/vendor/github.com/cenkalti/backoff/.gitignore +++ /dev/null @@ -1,22 +0,0 @@ -# Compiled Object files, Static and Dynamic libs (Shared Objects) -*.o -*.a -*.so - -# Folders -_obj -_test - -# Architecture specific extensions/prefixes -*.[568vq] -[568vq].out - -*.cgo1.go -*.cgo2.c -_cgo_defun.c -_cgo_gotypes.go -_cgo_export.* - -_testmain.go - -*.exe diff --git a/vendor/github.com/cenkalti/backoff/.travis.yml b/vendor/github.com/cenkalti/backoff/.travis.yml deleted file mode 100644 index ce9cb6233..000000000 --- a/vendor/github.com/cenkalti/backoff/.travis.yml +++ /dev/null @@ -1,2 +0,0 @@ -language: go -go: 1.3.3 diff --git a/vendor/github.com/cihub/seelog/archive/archive_test.go b/vendor/github.com/cihub/seelog/archive/archive_test.go deleted file mode 100644 index a05cac726..000000000 --- a/vendor/github.com/cihub/seelog/archive/archive_test.go +++ /dev/null @@ -1,178 +0,0 @@ -package archive_test - -import ( - "bytes" - "fmt" - "io" - "testing" - - "github.com/cihub/seelog/archive" - "github.com/cihub/seelog/archive/gzip" - "github.com/cihub/seelog/archive/tar" - "github.com/cihub/seelog/archive/zip" - "github.com/cihub/seelog/io/iotest" -) - -const ( - gzipType = "gzip" - tarType = "tar" - zipType = "zip" -) - -var types = []string{gzipType, tarType, zipType} - -type file struct { - name string - contents []byte -} - -var ( - oneFile = []file{ - { - name: "file1", - contents: []byte("This is a single log."), - }, - } - twoFiles = []file{ - { - name: "file1", - contents: []byte("This is a log."), - }, - { - name: "file2", - contents: []byte("This is another log."), - }, - } -) - -type testCase struct { - srcType, dstType string - in []file -} - -func copyTests() map[string]testCase { - // types X types X files - tests := make(map[string]testCase, len(types)*len(types)*2) - for _, srct := range types { - for _, dstt := range types { - tests[fmt.Sprintf("%s to %s: one file", srct, dstt)] = testCase{ - srcType: srct, - dstType: dstt, - in: oneFile, - } - // gzip does not handle more than one file - if srct != gzipType && dstt != gzipType { - tests[fmt.Sprintf("%s to %s: two files", srct, dstt)] = testCase{ - srcType: srct, - dstType: dstt, - in: twoFiles, - } - } - } - } - return tests -} - -func TestCopy(t *testing.T) { - srcb, dstb := new(bytes.Buffer), new(bytes.Buffer) - for tname, tt := range copyTests() { - // Reset buffers between tests - srcb.Reset() - dstb.Reset() - - // Last file name (needed for gzip.NewReader) - var fname string - - // Seed the src - srcw := writer(t, tname, srcb, tt.srcType) - for _, f := range tt.in { - srcw.NextFile(f.name, iotest.FileInfo(t, f.contents)) - mustCopy(t, tname, srcw, bytes.NewReader(f.contents)) - fname = f.name - } - mustClose(t, tname, srcw) - - // Perform the copy - srcr := reader(t, tname, srcb, tt.srcType, fname) - dstw := writer(t, tname, dstb, tt.dstType) - if err := archive.Copy(dstw, srcr); err != nil { - t.Fatalf("%s: %v", tname, err) - } - srcr.Close() // Read-only - mustClose(t, tname, dstw) - - // Read back dst to confirm our expectations - dstr := reader(t, tname, dstb, tt.dstType, fname) - for _, want := range tt.in { - buf := new(bytes.Buffer) - name, err := dstr.NextFile() - if err != nil { - t.Fatalf("%s: %v", tname, err) - } - mustCopy(t, tname, buf, dstr) - got := file{ - name: name, - contents: buf.Bytes(), - } - - switch { - case got.name != want.name: - t.Errorf("%s: got file %q but want file %q", - tname, got.name, want.name) - - case !bytes.Equal(got.contents, want.contents): - t.Errorf("%s: mismatched contents in %q: got %q but want %q", - tname, got.name, got.contents, want.contents) - } - } - dstr.Close() - } -} - -func writer(t *testing.T, tname string, w io.Writer, atype string) archive.WriteCloser { - switch atype { - case gzipType: - return gzip.NewWriter(w) - case tarType: - return tar.NewWriter(w) - case zipType: - return zip.NewWriter(w) - } - t.Fatalf("%s: unrecognized archive type: %s", tname, atype) - panic("execution continued after (*testing.T).Fatalf") -} - -func reader(t *testing.T, tname string, buf *bytes.Buffer, atype string, fname string) archive.ReadCloser { - switch atype { - case gzipType: - gr, err := gzip.NewReader(buf, fname) - if err != nil { - t.Fatalf("%s: %v", tname, err) - } - return gr - case tarType: - return archive.NopCloser(tar.NewReader(buf)) - case zipType: - zr, err := zip.NewReader( - bytes.NewReader(buf.Bytes()), - int64(buf.Len())) - if err != nil { - t.Fatalf("%s: new zip reader: %v", tname, err) - } - return archive.NopCloser(zr) - } - t.Fatalf("%s: unrecognized archive type: %s", tname, atype) - panic("execution continued after (*testing.T).Fatalf") -} - -func mustCopy(t *testing.T, tname string, dst io.Writer, src io.Reader) { - if _, err := io.Copy(dst, src); err != nil { - t.Fatalf("%s: copy: %v", tname, err) - } -} - -func mustClose(t *testing.T, tname string, c io.Closer) { - if err := c.Close(); err != nil { - t.Fatalf("%s: close: %v", tname, err) - } -} diff --git a/vendor/github.com/cihub/seelog/archive/tar/tar_test.go b/vendor/github.com/cihub/seelog/archive/tar/tar_test.go deleted file mode 100644 index eeb5b44e4..000000000 --- a/vendor/github.com/cihub/seelog/archive/tar/tar_test.go +++ /dev/null @@ -1,104 +0,0 @@ -package tar_test - -import ( - "bytes" - "io" - "io/ioutil" - "os" - "testing" - - "github.com/cihub/seelog/archive/tar" - "github.com/cihub/seelog/io/iotest" -) - -type file struct { - name string - contents []byte -} - -var tarTests = map[string]struct{ want []file }{ - "one file": { - want: []file{ - { - name: "file", - contents: []byte("I am a log file"), - }, - }, - }, - "multiple files": { - want: []file{ - { - name: "file1", - contents: []byte("I am log file 1"), - }, - { - name: "file2", - contents: []byte("I am log file 2"), - }, - }, - }, -} - -func TestWriterAndReader(t *testing.T) { - for tname, tt := range tarTests { - f, cleanup := iotest.TempFile(t) - defer cleanup() - writeFiles(t, f, tname, tt.want) - readFiles(t, f, tname, tt.want) - } -} - -// writeFiles iterates through the files we want and writes them as a tarred -// file. -func writeFiles(t *testing.T, f *os.File, tname string, want []file) { - w := tar.NewWriter(f) - defer w.Close() - - // Write zipped files - for _, fwant := range want { - fi := iotest.FileInfo(t, fwant.contents) - - // Write the file - err := w.NextFile(fwant.name, fi) - switch err { - case io.EOF: - break - default: - t.Fatalf("%s: write header for next file: %v", tname, err) - case nil: // Proceed below - } - if _, err := io.Copy(w, bytes.NewReader(fwant.contents)); err != nil { - t.Fatalf("%s: copy to writer: %v", tname, err) - } - } -} - -// readFiles iterates through tarred files and ensures they are the same. -func readFiles(t *testing.T, f *os.File, tname string, want []file) { - r := tar.NewReader(f) - - for _, fwant := range want { - fname, err := r.NextFile() - switch err { - case io.EOF: - return - default: - t.Fatalf("%s: read header for next file: %v", tname, err) - case nil: // Proceed below - } - - if fname != fwant.name { - t.Fatalf("%s: incorrect file name: got %q but want %q", tname, fname, fwant.name) - continue - } - - gotContents, err := ioutil.ReadAll(r) - if err != nil { - t.Fatalf("%s: read file: %v", tname, err) - } - - if !bytes.Equal(gotContents, fwant.contents) { - t.Errorf("%s: %q = %q but want %q", tname, fname, gotContents, fwant.contents) - } - } -} diff --git a/vendor/github.com/cihub/seelog/archive/zip/zip_test.go b/vendor/github.com/cihub/seelog/archive/zip/zip_test.go deleted file mode 100644 index 5bec3dff1..000000000 --- a/vendor/github.com/cihub/seelog/archive/zip/zip_test.go +++ /dev/null @@ -1,99 +0,0 @@ -package zip_test - -import ( - "bytes" - "io" - "io/ioutil" - "os" - "testing" - - "github.com/cihub/seelog/archive/zip" - "github.com/cihub/seelog/io/iotest" -) - -var zipTests = map[string]struct{ want map[string][]byte }{ - "one file": { - want: map[string][]byte{ - "file": []byte("I am a log file"), - }, - }, - "multiple files": { - want: map[string][]byte{ - "file1": []byte("I am log file 1"), - "file2": []byte("I am log file 2"), - }, - }, -} - -func TestWriterAndReader(t *testing.T) { - for tname, tt := range zipTests { - f, cleanup := iotest.TempFile(t) - defer cleanup() - writeFiles(t, f, tname, tt.want) - readFiles(t, f, tname, tt.want) - } -} - -// writeFiles iterates through the files we want and writes them as a zipped -// file. -func writeFiles(t *testing.T, f *os.File, tname string, want map[string][]byte) { - w := zip.NewWriter(f) - defer w.Close() - - // Write zipped files - for fname, fbytes := range want { - fi := iotest.FileInfo(t, fbytes) - - // Write the file - err := w.NextFile(fname, fi) - switch err { - case io.EOF: - break - default: - t.Fatalf("%s: write header for next file: %v", tname, err) - case nil: // Proceed below - } - if _, err := io.Copy(w, bytes.NewReader(fbytes)); err != nil { - t.Fatalf("%s: copy to writer: %v", tname, err) - } - } -} - -// readFiles iterates through zipped files and ensures they are the same. -func readFiles(t *testing.T, f *os.File, tname string, want map[string][]byte) { - // Get zip Reader - fi, err := f.Stat() - if err != nil { - t.Fatalf("%s: stat zipped file: %v", tname, err) - } - r, err := zip.NewReader(f, fi.Size()) - if err != nil { - t.Fatalf("%s: %v", tname, err) - } - - for { - fname, err := r.NextFile() - switch err { - case io.EOF: - return - default: - t.Fatalf("%s: read header for next file: %v", tname, err) - case nil: // Proceed below - } - - wantBytes, ok := want[fname] - if !ok { - t.Errorf("%s: read unwanted file: %v", tname, fname) - continue - } - - gotBytes, err := ioutil.ReadAll(r) - if err != nil { - t.Fatalf("%s: read file: %v", tname, err) - } - - if !bytes.Equal(gotBytes, wantBytes) { - t.Errorf("%s: %q = %q but want %q", tname, fname, gotBytes, wantBytes) - } - } -} diff --git a/vendor/github.com/davecgh/go-spew/spew/bypasssafe.go b/vendor/github.com/davecgh/go-spew/spew/bypasssafe.go deleted file mode 100644 index 457e41235..000000000 --- a/vendor/github.com/davecgh/go-spew/spew/bypasssafe.go +++ /dev/null @@ -1,37 +0,0 @@ -// Copyright (c) 2015 Dave Collins -// -// Permission to use, copy, modify, and distribute this software for any -// purpose with or without fee is hereby granted, provided that the above -// copyright notice and this permission notice appear in all copies. -// -// THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES -// WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF -// MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR -// ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES -// WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN -// ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF -// OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. - -// NOTE: Due to the following build constraints, this file will only be compiled -// when either the code is running on Google App Engine or "-tags disableunsafe" -// is added to the go build command line. -// +build appengine disableunsafe - -package spew - -import "reflect" - -const ( - // UnsafeDisabled is a build-time constant which specifies whether or - // not access to the unsafe package is available. - UnsafeDisabled = true -) - -// unsafeReflectValue typically converts the passed reflect.Value into a one -// that bypasses the typical safety restrictions preventing access to -// unaddressable and unexported data. However, doing this relies on access to -// the unsafe package. This is a stub version which simply returns the passed -// reflect.Value when the unsafe package is not available. -func unsafeReflectValue(v reflect.Value) reflect.Value { - return v -} diff --git a/vendor/github.com/fatih/structs/.gitignore b/vendor/github.com/fatih/structs/.gitignore deleted file mode 100644 index 836562412..000000000 --- a/vendor/github.com/fatih/structs/.gitignore +++ /dev/null @@ -1,23 +0,0 @@ -# Compiled Object files, Static and Dynamic libs (Shared Objects) -*.o -*.a -*.so - -# Folders -_obj -_test - -# Architecture specific extensions/prefixes -*.[568vq] -[568vq].out - -*.cgo1.go -*.cgo2.c -_cgo_defun.c -_cgo_gotypes.go -_cgo_export.* - -_testmain.go - -*.exe -*.test diff --git a/vendor/github.com/fatih/structs/.travis.yml b/vendor/github.com/fatih/structs/.travis.yml deleted file mode 100644 index 845012b7a..000000000 --- a/vendor/github.com/fatih/structs/.travis.yml +++ /dev/null @@ -1,11 +0,0 @@ -language: go -go: - - 1.6 - - tip -sudo: false -before_install: -- go get github.com/axw/gocov/gocov -- go get github.com/mattn/goveralls -- if ! go get github.com/golang/tools/cmd/cover; then go get golang.org/x/tools/cmd/cover; fi -script: -- $HOME/gopath/bin/goveralls -service=travis-ci diff --git a/vendor/github.com/fsouza/go-dockerclient/.gitignore b/vendor/github.com/fsouza/go-dockerclient/.gitignore deleted file mode 100644 index 5f6b48eae..000000000 --- a/vendor/github.com/fsouza/go-dockerclient/.gitignore +++ /dev/null @@ -1,2 +0,0 @@ -# temporary symlink for testing -testing/data/symlink diff --git a/vendor/github.com/fsouza/go-dockerclient/.travis.yml b/vendor/github.com/fsouza/go-dockerclient/.travis.yml deleted file mode 100644 index 68b137ad2..000000000 --- a/vendor/github.com/fsouza/go-dockerclient/.travis.yml +++ /dev/null @@ -1,27 +0,0 @@ -language: go -sudo: required -go: - - 1.4.2 - - 1.5.3 - - 1.6 - - tip -os: - - linux - - osx -env: - - GOARCH=amd64 DOCKER_VERSION=1.8.3 - - GOARCH=386 DOCKER_VERSION=1.8.3 - - GOARCH=amd64 DOCKER_VERSION=1.9.1 - - GOARCH=386 DOCKER_VERSION=1.9.1 - - GOARCH=amd64 DOCKER_VERSION=1.10.3 - - GOARCH=386 DOCKER_VERSION=1.10.3 -install: - - travis_retry travis-scripts/install.bash -script: - - travis-scripts/run-tests.bash -services: - - docker -matrix: - fast_finish: true - allow_failures: - - go: tip diff --git a/vendor/github.com/fsouza/go-dockerclient/LICENSE b/vendor/github.com/fsouza/go-dockerclient/LICENSE index 11c9e2889..b1cdd4cd2 100644 --- a/vendor/github.com/fsouza/go-dockerclient/LICENSE +++ b/vendor/github.com/fsouza/go-dockerclient/LICENSE @@ -1,4 +1,4 @@ -Copyright (c) 2013-2016, go-dockerclient authors +Copyright (c) 2016, go-dockerclient authors All rights reserved. Redistribution and use in source and binary forms, with or without diff --git a/vendor/github.com/fsouza/go-dockerclient/external/golang.org/x/net/context/context.go b/vendor/github.com/fsouza/go-dockerclient/external/golang.org/x/net/context/context.go index 46629881b..dd138571f 100644 --- a/vendor/github.com/fsouza/go-dockerclient/external/golang.org/x/net/context/context.go +++ b/vendor/github.com/fsouza/go-dockerclient/external/golang.org/x/net/context/context.go @@ -34,7 +34,7 @@ // // See http://blog.golang.org/context for example code for a server that uses // Contexts. -package context +package context // import "github.com/fsouza/go-dockerclient/external/golang.org/x/net/context" import ( "errors" diff --git a/vendor/github.com/fsouza/go-dockerclient/external/golang.org/x/sys/unix/mkall.sh b/vendor/github.com/fsouza/go-dockerclient/external/golang.org/x/sys/unix/mkall.sh old mode 100644 new mode 100755 diff --git a/vendor/github.com/fsouza/go-dockerclient/external/golang.org/x/sys/unix/mkerrors.sh b/vendor/github.com/fsouza/go-dockerclient/external/golang.org/x/sys/unix/mkerrors.sh old mode 100644 new mode 100755 diff --git a/vendor/github.com/fsouza/go-dockerclient/external/golang.org/x/sys/unix/mksyscall.pl b/vendor/github.com/fsouza/go-dockerclient/external/golang.org/x/sys/unix/mksyscall.pl old mode 100644 new mode 100755 diff --git a/vendor/github.com/fsouza/go-dockerclient/external/golang.org/x/sys/unix/mksyscall_solaris.pl b/vendor/github.com/fsouza/go-dockerclient/external/golang.org/x/sys/unix/mksyscall_solaris.pl old mode 100644 new mode 100755 diff --git a/vendor/github.com/fsouza/go-dockerclient/external/golang.org/x/sys/unix/mksysctl_openbsd.pl b/vendor/github.com/fsouza/go-dockerclient/external/golang.org/x/sys/unix/mksysctl_openbsd.pl old mode 100644 new mode 100755 diff --git a/vendor/github.com/fsouza/go-dockerclient/external/golang.org/x/sys/unix/mksysnum_darwin.pl b/vendor/github.com/fsouza/go-dockerclient/external/golang.org/x/sys/unix/mksysnum_darwin.pl old mode 100644 new mode 100755 diff --git a/vendor/github.com/fsouza/go-dockerclient/external/golang.org/x/sys/unix/mksysnum_dragonfly.pl b/vendor/github.com/fsouza/go-dockerclient/external/golang.org/x/sys/unix/mksysnum_dragonfly.pl old mode 100644 new mode 100755 diff --git a/vendor/github.com/fsouza/go-dockerclient/external/golang.org/x/sys/unix/mksysnum_freebsd.pl b/vendor/github.com/fsouza/go-dockerclient/external/golang.org/x/sys/unix/mksysnum_freebsd.pl old mode 100644 new mode 100755 diff --git a/vendor/github.com/fsouza/go-dockerclient/external/golang.org/x/sys/unix/mksysnum_linux.pl b/vendor/github.com/fsouza/go-dockerclient/external/golang.org/x/sys/unix/mksysnum_linux.pl old mode 100644 new mode 100755 diff --git a/vendor/github.com/fsouza/go-dockerclient/external/golang.org/x/sys/unix/mksysnum_netbsd.pl b/vendor/github.com/fsouza/go-dockerclient/external/golang.org/x/sys/unix/mksysnum_netbsd.pl old mode 100644 new mode 100755 diff --git a/vendor/github.com/fsouza/go-dockerclient/external/golang.org/x/sys/unix/mksysnum_openbsd.pl b/vendor/github.com/fsouza/go-dockerclient/external/golang.org/x/sys/unix/mksysnum_openbsd.pl old mode 100644 new mode 100755 diff --git a/vendor/github.com/fsouza/go-dockerclient/external/golang.org/x/sys/unix/syscall.go b/vendor/github.com/fsouza/go-dockerclient/external/golang.org/x/sys/unix/syscall.go index a48d47cff..012f2d64f 100644 --- a/vendor/github.com/fsouza/go-dockerclient/external/golang.org/x/sys/unix/syscall.go +++ b/vendor/github.com/fsouza/go-dockerclient/external/golang.org/x/sys/unix/syscall.go @@ -19,7 +19,7 @@ // These calls return err == nil to indicate success; otherwise // err represents an operating system error describing the failure and // holds a value of type syscall.Errno. -package unix +package unix // import "github.com/fsouza/go-dockerclient/external/golang.org/x/sys/unix" import "unsafe" diff --git a/vendor/github.com/go-ini/ini/.gitignore b/vendor/github.com/go-ini/ini/.gitignore deleted file mode 100644 index 7adca9439..000000000 --- a/vendor/github.com/go-ini/ini/.gitignore +++ /dev/null @@ -1,4 +0,0 @@ -testdata/conf_out.ini -ini.sublime-project -ini.sublime-workspace -testdata/conf_reflect.ini diff --git a/vendor/github.com/golang/protobuf/proto/pointer_reflect.go b/vendor/github.com/golang/protobuf/proto/pointer_reflect.go deleted file mode 100644 index c68b12525..000000000 --- a/vendor/github.com/golang/protobuf/proto/pointer_reflect.go +++ /dev/null @@ -1,479 +0,0 @@ -// Go support for Protocol Buffers - Google's data interchange format -// -// Copyright 2012 The Go Authors. All rights reserved. -// https://github.com/golang/protobuf -// -// Redistribution and use in source and binary forms, with or without -// modification, are permitted provided that the following conditions are -// met: -// -// * Redistributions of source code must retain the above copyright -// notice, this list of conditions and the following disclaimer. -// * Redistributions in binary form must reproduce the above -// copyright notice, this list of conditions and the following disclaimer -// in the documentation and/or other materials provided with the -// distribution. -// * Neither the name of Google Inc. nor the names of its -// contributors may be used to endorse or promote products derived from -// this software without specific prior written permission. -// -// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS -// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT -// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR -// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT -// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, -// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT -// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, -// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY -// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE -// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - -// +build appengine - -// This file contains an implementation of proto field accesses using package reflect. -// It is slower than the code in pointer_unsafe.go but it avoids package unsafe and can -// be used on App Engine. - -package proto - -import ( - "math" - "reflect" -) - -// A structPointer is a pointer to a struct. -type structPointer struct { - v reflect.Value -} - -// toStructPointer returns a structPointer equivalent to the given reflect value. -// The reflect value must itself be a pointer to a struct. -func toStructPointer(v reflect.Value) structPointer { - return structPointer{v} -} - -// IsNil reports whether p is nil. -func structPointer_IsNil(p structPointer) bool { - return p.v.IsNil() -} - -// Interface returns the struct pointer as an interface value. -func structPointer_Interface(p structPointer, _ reflect.Type) interface{} { - return p.v.Interface() -} - -// A field identifies a field in a struct, accessible from a structPointer. -// In this implementation, a field is identified by the sequence of field indices -// passed to reflect's FieldByIndex. -type field []int - -// toField returns a field equivalent to the given reflect field. -func toField(f *reflect.StructField) field { - return f.Index -} - -// invalidField is an invalid field identifier. -var invalidField = field(nil) - -// IsValid reports whether the field identifier is valid. -func (f field) IsValid() bool { return f != nil } - -// field returns the given field in the struct as a reflect value. -func structPointer_field(p structPointer, f field) reflect.Value { - // Special case: an extension map entry with a value of type T - // passes a *T to the struct-handling code with a zero field, - // expecting that it will be treated as equivalent to *struct{ X T }, - // which has the same memory layout. We have to handle that case - // specially, because reflect will panic if we call FieldByIndex on a - // non-struct. - if f == nil { - return p.v.Elem() - } - - return p.v.Elem().FieldByIndex(f) -} - -// ifield returns the given field in the struct as an interface value. -func structPointer_ifield(p structPointer, f field) interface{} { - return structPointer_field(p, f).Addr().Interface() -} - -// Bytes returns the address of a []byte field in the struct. -func structPointer_Bytes(p structPointer, f field) *[]byte { - return structPointer_ifield(p, f).(*[]byte) -} - -// BytesSlice returns the address of a [][]byte field in the struct. -func structPointer_BytesSlice(p structPointer, f field) *[][]byte { - return structPointer_ifield(p, f).(*[][]byte) -} - -// Bool returns the address of a *bool field in the struct. -func structPointer_Bool(p structPointer, f field) **bool { - return structPointer_ifield(p, f).(**bool) -} - -// BoolVal returns the address of a bool field in the struct. -func structPointer_BoolVal(p structPointer, f field) *bool { - return structPointer_ifield(p, f).(*bool) -} - -// BoolSlice returns the address of a []bool field in the struct. -func structPointer_BoolSlice(p structPointer, f field) *[]bool { - return structPointer_ifield(p, f).(*[]bool) -} - -// String returns the address of a *string field in the struct. -func structPointer_String(p structPointer, f field) **string { - return structPointer_ifield(p, f).(**string) -} - -// StringVal returns the address of a string field in the struct. -func structPointer_StringVal(p structPointer, f field) *string { - return structPointer_ifield(p, f).(*string) -} - -// StringSlice returns the address of a []string field in the struct. -func structPointer_StringSlice(p structPointer, f field) *[]string { - return structPointer_ifield(p, f).(*[]string) -} - -// ExtMap returns the address of an extension map field in the struct. -func structPointer_ExtMap(p structPointer, f field) *map[int32]Extension { - return structPointer_ifield(p, f).(*map[int32]Extension) -} - -// Map returns the reflect.Value for the address of a map field in the struct. -func structPointer_Map(p structPointer, f field, typ reflect.Type) reflect.Value { - return structPointer_field(p, f).Addr() -} - -// SetStructPointer writes a *struct field in the struct. -func structPointer_SetStructPointer(p structPointer, f field, q structPointer) { - structPointer_field(p, f).Set(q.v) -} - -// GetStructPointer reads a *struct field in the struct. -func structPointer_GetStructPointer(p structPointer, f field) structPointer { - return structPointer{structPointer_field(p, f)} -} - -// StructPointerSlice the address of a []*struct field in the struct. -func structPointer_StructPointerSlice(p structPointer, f field) structPointerSlice { - return structPointerSlice{structPointer_field(p, f)} -} - -// A structPointerSlice represents the address of a slice of pointers to structs -// (themselves messages or groups). That is, v.Type() is *[]*struct{...}. -type structPointerSlice struct { - v reflect.Value -} - -func (p structPointerSlice) Len() int { return p.v.Len() } -func (p structPointerSlice) Index(i int) structPointer { return structPointer{p.v.Index(i)} } -func (p structPointerSlice) Append(q structPointer) { - p.v.Set(reflect.Append(p.v, q.v)) -} - -var ( - int32Type = reflect.TypeOf(int32(0)) - uint32Type = reflect.TypeOf(uint32(0)) - float32Type = reflect.TypeOf(float32(0)) - int64Type = reflect.TypeOf(int64(0)) - uint64Type = reflect.TypeOf(uint64(0)) - float64Type = reflect.TypeOf(float64(0)) -) - -// A word32 represents a field of type *int32, *uint32, *float32, or *enum. -// That is, v.Type() is *int32, *uint32, *float32, or *enum and v is assignable. -type word32 struct { - v reflect.Value -} - -// IsNil reports whether p is nil. -func word32_IsNil(p word32) bool { - return p.v.IsNil() -} - -// Set sets p to point at a newly allocated word with bits set to x. -func word32_Set(p word32, o *Buffer, x uint32) { - t := p.v.Type().Elem() - switch t { - case int32Type: - if len(o.int32s) == 0 { - o.int32s = make([]int32, uint32PoolSize) - } - o.int32s[0] = int32(x) - p.v.Set(reflect.ValueOf(&o.int32s[0])) - o.int32s = o.int32s[1:] - return - case uint32Type: - if len(o.uint32s) == 0 { - o.uint32s = make([]uint32, uint32PoolSize) - } - o.uint32s[0] = x - p.v.Set(reflect.ValueOf(&o.uint32s[0])) - o.uint32s = o.uint32s[1:] - return - case float32Type: - if len(o.float32s) == 0 { - o.float32s = make([]float32, uint32PoolSize) - } - o.float32s[0] = math.Float32frombits(x) - p.v.Set(reflect.ValueOf(&o.float32s[0])) - o.float32s = o.float32s[1:] - return - } - - // must be enum - p.v.Set(reflect.New(t)) - p.v.Elem().SetInt(int64(int32(x))) -} - -// Get gets the bits pointed at by p, as a uint32. -func word32_Get(p word32) uint32 { - elem := p.v.Elem() - switch elem.Kind() { - case reflect.Int32: - return uint32(elem.Int()) - case reflect.Uint32: - return uint32(elem.Uint()) - case reflect.Float32: - return math.Float32bits(float32(elem.Float())) - } - panic("unreachable") -} - -// Word32 returns a reference to a *int32, *uint32, *float32, or *enum field in the struct. -func structPointer_Word32(p structPointer, f field) word32 { - return word32{structPointer_field(p, f)} -} - -// A word32Val represents a field of type int32, uint32, float32, or enum. -// That is, v.Type() is int32, uint32, float32, or enum and v is assignable. -type word32Val struct { - v reflect.Value -} - -// Set sets *p to x. -func word32Val_Set(p word32Val, x uint32) { - switch p.v.Type() { - case int32Type: - p.v.SetInt(int64(x)) - return - case uint32Type: - p.v.SetUint(uint64(x)) - return - case float32Type: - p.v.SetFloat(float64(math.Float32frombits(x))) - return - } - - // must be enum - p.v.SetInt(int64(int32(x))) -} - -// Get gets the bits pointed at by p, as a uint32. -func word32Val_Get(p word32Val) uint32 { - elem := p.v - switch elem.Kind() { - case reflect.Int32: - return uint32(elem.Int()) - case reflect.Uint32: - return uint32(elem.Uint()) - case reflect.Float32: - return math.Float32bits(float32(elem.Float())) - } - panic("unreachable") -} - -// Word32Val returns a reference to a int32, uint32, float32, or enum field in the struct. -func structPointer_Word32Val(p structPointer, f field) word32Val { - return word32Val{structPointer_field(p, f)} -} - -// A word32Slice is a slice of 32-bit values. -// That is, v.Type() is []int32, []uint32, []float32, or []enum. -type word32Slice struct { - v reflect.Value -} - -func (p word32Slice) Append(x uint32) { - n, m := p.v.Len(), p.v.Cap() - if n < m { - p.v.SetLen(n + 1) - } else { - t := p.v.Type().Elem() - p.v.Set(reflect.Append(p.v, reflect.Zero(t))) - } - elem := p.v.Index(n) - switch elem.Kind() { - case reflect.Int32: - elem.SetInt(int64(int32(x))) - case reflect.Uint32: - elem.SetUint(uint64(x)) - case reflect.Float32: - elem.SetFloat(float64(math.Float32frombits(x))) - } -} - -func (p word32Slice) Len() int { - return p.v.Len() -} - -func (p word32Slice) Index(i int) uint32 { - elem := p.v.Index(i) - switch elem.Kind() { - case reflect.Int32: - return uint32(elem.Int()) - case reflect.Uint32: - return uint32(elem.Uint()) - case reflect.Float32: - return math.Float32bits(float32(elem.Float())) - } - panic("unreachable") -} - -// Word32Slice returns a reference to a []int32, []uint32, []float32, or []enum field in the struct. -func structPointer_Word32Slice(p structPointer, f field) word32Slice { - return word32Slice{structPointer_field(p, f)} -} - -// word64 is like word32 but for 64-bit values. -type word64 struct { - v reflect.Value -} - -func word64_Set(p word64, o *Buffer, x uint64) { - t := p.v.Type().Elem() - switch t { - case int64Type: - if len(o.int64s) == 0 { - o.int64s = make([]int64, uint64PoolSize) - } - o.int64s[0] = int64(x) - p.v.Set(reflect.ValueOf(&o.int64s[0])) - o.int64s = o.int64s[1:] - return - case uint64Type: - if len(o.uint64s) == 0 { - o.uint64s = make([]uint64, uint64PoolSize) - } - o.uint64s[0] = x - p.v.Set(reflect.ValueOf(&o.uint64s[0])) - o.uint64s = o.uint64s[1:] - return - case float64Type: - if len(o.float64s) == 0 { - o.float64s = make([]float64, uint64PoolSize) - } - o.float64s[0] = math.Float64frombits(x) - p.v.Set(reflect.ValueOf(&o.float64s[0])) - o.float64s = o.float64s[1:] - return - } - panic("unreachable") -} - -func word64_IsNil(p word64) bool { - return p.v.IsNil() -} - -func word64_Get(p word64) uint64 { - elem := p.v.Elem() - switch elem.Kind() { - case reflect.Int64: - return uint64(elem.Int()) - case reflect.Uint64: - return elem.Uint() - case reflect.Float64: - return math.Float64bits(elem.Float()) - } - panic("unreachable") -} - -func structPointer_Word64(p structPointer, f field) word64 { - return word64{structPointer_field(p, f)} -} - -// word64Val is like word32Val but for 64-bit values. -type word64Val struct { - v reflect.Value -} - -func word64Val_Set(p word64Val, o *Buffer, x uint64) { - switch p.v.Type() { - case int64Type: - p.v.SetInt(int64(x)) - return - case uint64Type: - p.v.SetUint(x) - return - case float64Type: - p.v.SetFloat(math.Float64frombits(x)) - return - } - panic("unreachable") -} - -func word64Val_Get(p word64Val) uint64 { - elem := p.v - switch elem.Kind() { - case reflect.Int64: - return uint64(elem.Int()) - case reflect.Uint64: - return elem.Uint() - case reflect.Float64: - return math.Float64bits(elem.Float()) - } - panic("unreachable") -} - -func structPointer_Word64Val(p structPointer, f field) word64Val { - return word64Val{structPointer_field(p, f)} -} - -type word64Slice struct { - v reflect.Value -} - -func (p word64Slice) Append(x uint64) { - n, m := p.v.Len(), p.v.Cap() - if n < m { - p.v.SetLen(n + 1) - } else { - t := p.v.Type().Elem() - p.v.Set(reflect.Append(p.v, reflect.Zero(t))) - } - elem := p.v.Index(n) - switch elem.Kind() { - case reflect.Int64: - elem.SetInt(int64(int64(x))) - case reflect.Uint64: - elem.SetUint(uint64(x)) - case reflect.Float64: - elem.SetFloat(float64(math.Float64frombits(x))) - } -} - -func (p word64Slice) Len() int { - return p.v.Len() -} - -func (p word64Slice) Index(i int) uint64 { - elem := p.v.Index(i) - switch elem.Kind() { - case reflect.Int64: - return uint64(elem.Int()) - case reflect.Uint64: - return uint64(elem.Uint()) - case reflect.Float64: - return math.Float64bits(float64(elem.Float())) - } - panic("unreachable") -} - -func structPointer_Word64Slice(p structPointer, f field) word64Slice { - return word64Slice{structPointer_field(p, f)} -} diff --git a/vendor/github.com/gosimple/slug/.gitignore b/vendor/github.com/gosimple/slug/.gitignore deleted file mode 100644 index 25d190e2b..000000000 --- a/vendor/github.com/gosimple/slug/.gitignore +++ /dev/null @@ -1,2 +0,0 @@ -_* -cover*.out diff --git a/vendor/github.com/hashicorp/go-plugin/.gitignore b/vendor/github.com/hashicorp/go-plugin/.gitignore deleted file mode 100644 index e43b0f988..000000000 --- a/vendor/github.com/hashicorp/go-plugin/.gitignore +++ /dev/null @@ -1 +0,0 @@ -.DS_Store diff --git a/vendor/github.com/hashicorp/go-rootcerts/.travis.yml b/vendor/github.com/hashicorp/go-rootcerts/.travis.yml deleted file mode 100644 index 80e1de44e..000000000 --- a/vendor/github.com/hashicorp/go-rootcerts/.travis.yml +++ /dev/null @@ -1,12 +0,0 @@ -sudo: false - -language: go - -go: - - 1.6 - -branches: - only: - - master - -script: make test diff --git a/vendor/github.com/hashicorp/go-version/.travis.yml b/vendor/github.com/hashicorp/go-version/.travis.yml deleted file mode 100644 index 9f30eecd7..000000000 --- a/vendor/github.com/hashicorp/go-version/.travis.yml +++ /dev/null @@ -1,11 +0,0 @@ -language: go - -go: - - 1.0 - - 1.1 - - 1.2 - - 1.3 - - 1.4 - -script: - - go test diff --git a/vendor/github.com/hashicorp/logutils/.gitignore b/vendor/github.com/hashicorp/logutils/.gitignore deleted file mode 100644 index 00268614f..000000000 --- a/vendor/github.com/hashicorp/logutils/.gitignore +++ /dev/null @@ -1,22 +0,0 @@ -# Compiled Object files, Static and Dynamic libs (Shared Objects) -*.o -*.a -*.so - -# Folders -_obj -_test - -# Architecture specific extensions/prefixes -*.[568vq] -[568vq].out - -*.cgo1.go -*.cgo2.c -_cgo_defun.c -_cgo_gotypes.go -_cgo_export.* - -_testmain.go - -*.exe diff --git a/vendor/github.com/hashicorp/nomad/client/allocdir/alloc_dir.go b/vendor/github.com/hashicorp/nomad/client/allocdir/alloc_dir.go new file mode 100644 index 000000000..f9e6c05f9 --- /dev/null +++ b/vendor/github.com/hashicorp/nomad/client/allocdir/alloc_dir.go @@ -0,0 +1,399 @@ +package allocdir + +import ( + "fmt" + "io" + "io/ioutil" + "os" + "path/filepath" + "time" + + "gopkg.in/tomb.v1" + + "github.com/hashicorp/go-multierror" + "github.com/hashicorp/nomad/nomad/structs" + "github.com/hpcloud/tail/watch" +) + +var ( + // The name of the directory that is shared across tasks in a task group. + SharedAllocName = "alloc" + + // Name of the directory where logs of Tasks are written + LogDirName = "logs" + + // The set of directories that exist inside eache shared alloc directory. + SharedAllocDirs = []string{LogDirName, "tmp", "data"} + + // The name of the directory that exists inside each task directory + // regardless of driver. + TaskLocal = "local" + + // TaskDirs is the set of directories created in each tasks directory. + TaskDirs = []string{"tmp"} +) + +type AllocDir struct { + // AllocDir is the directory used for storing any state + // of this allocation. It will be purged on alloc destroy. + AllocDir string + + // The shared directory is available to all tasks within the same task + // group. + SharedDir string + + // TaskDirs is a mapping of task names to their non-shared directory. + TaskDirs map[string]string +} + +// AllocFileInfo holds information about a file inside the AllocDir +type AllocFileInfo struct { + Name string + IsDir bool + Size int64 + FileMode string + ModTime time.Time +} + +// AllocDirFS exposes file operations on the alloc dir +type AllocDirFS interface { + List(path string) ([]*AllocFileInfo, error) + Stat(path string) (*AllocFileInfo, error) + ReadAt(path string, offset int64) (io.ReadCloser, error) + BlockUntilExists(path string, t *tomb.Tomb) chan error + ChangeEvents(path string, curOffset int64, t *tomb.Tomb) (*watch.FileChanges, error) +} + +func NewAllocDir(allocDir string) *AllocDir { + d := &AllocDir{AllocDir: allocDir, TaskDirs: make(map[string]string)} + d.SharedDir = filepath.Join(d.AllocDir, SharedAllocName) + return d +} + +// Tears down previously build directory structure. +func (d *AllocDir) Destroy() error { + // Unmount all mounted shared alloc dirs. + var mErr multierror.Error + if err := d.UnmountAll(); err != nil { + mErr.Errors = append(mErr.Errors, err) + } + + if err := os.RemoveAll(d.AllocDir); err != nil { + mErr.Errors = append(mErr.Errors, err) + } + + return mErr.ErrorOrNil() +} + +func (d *AllocDir) UnmountAll() error { + var mErr multierror.Error + for _, dir := range d.TaskDirs { + // Check if the directory has the shared alloc mounted. + taskAlloc := filepath.Join(dir, SharedAllocName) + if d.pathExists(taskAlloc) { + if err := d.unmountSharedDir(taskAlloc); err != nil { + mErr.Errors = append(mErr.Errors, + fmt.Errorf("failed to unmount shared alloc dir %q: %v", taskAlloc, err)) + } else if err := os.RemoveAll(taskAlloc); err != nil { + mErr.Errors = append(mErr.Errors, + fmt.Errorf("failed to delete shared alloc dir %q: %v", taskAlloc, err)) + } + } + + // Unmount dev/ and proc/ have been mounted. + d.unmountSpecialDirs(dir) + } + + return mErr.ErrorOrNil() +} + +// Given a list of a task build the correct alloc structure. +func (d *AllocDir) Build(tasks []*structs.Task) error { + // Make the alloc directory, owned by the nomad process. + if err := os.MkdirAll(d.AllocDir, 0755); err != nil { + return fmt.Errorf("Failed to make the alloc directory %v: %v", d.AllocDir, err) + } + + // Make the shared directory and make it available to all user/groups. + if err := os.MkdirAll(d.SharedDir, 0777); err != nil { + return err + } + + // Make the shared directory have non-root permissions. + if err := d.dropDirPermissions(d.SharedDir); err != nil { + return err + } + + for _, dir := range SharedAllocDirs { + p := filepath.Join(d.SharedDir, dir) + if err := os.MkdirAll(p, 0777); err != nil { + return err + } + if err := d.dropDirPermissions(p); err != nil { + return err + } + } + + // Make the task directories. + for _, t := range tasks { + taskDir := filepath.Join(d.AllocDir, t.Name) + if err := os.MkdirAll(taskDir, 0777); err != nil { + return err + } + + // Make the task directory have non-root permissions. + if err := d.dropDirPermissions(taskDir); err != nil { + return err + } + + // Create a local directory that each task can use. + local := filepath.Join(taskDir, TaskLocal) + if err := os.MkdirAll(local, 0777); err != nil { + return err + } + + if err := d.dropDirPermissions(local); err != nil { + return err + } + + d.TaskDirs[t.Name] = taskDir + + // Create the directories that should be in every task. + for _, dir := range TaskDirs { + local := filepath.Join(taskDir, dir) + if err := os.MkdirAll(local, 0777); err != nil { + return err + } + + if err := d.dropDirPermissions(local); err != nil { + return err + } + } + } + + return nil +} + +// Embed takes a mapping of absolute directory or file paths on the host to +// their intended, relative location within the task directory. Embed attempts +// hardlink and then defaults to copying. If the path exists on the host and +// can't be embedded an error is returned. +func (d *AllocDir) Embed(task string, entries map[string]string) error { + taskdir, ok := d.TaskDirs[task] + if !ok { + return fmt.Errorf("Task directory doesn't exist for task %v", task) + } + + subdirs := make(map[string]string) + for source, dest := range entries { + // Check to see if directory exists on host. + s, err := os.Stat(source) + if os.IsNotExist(err) { + continue + } + + // Embedding a single file + if !s.IsDir() { + destDir := filepath.Join(taskdir, filepath.Dir(dest)) + if err := os.MkdirAll(destDir, s.Mode().Perm()); err != nil { + return fmt.Errorf("Couldn't create destination directory %v: %v", destDir, err) + } + + // Copy the file. + taskEntry := filepath.Join(destDir, filepath.Base(dest)) + if err := d.linkOrCopy(source, taskEntry, s.Mode().Perm()); err != nil { + return err + } + + continue + } + + // Create destination directory. + destDir := filepath.Join(taskdir, dest) + if err := os.MkdirAll(destDir, s.Mode().Perm()); err != nil { + return fmt.Errorf("Couldn't create destination directory %v: %v", destDir, err) + } + + // Enumerate the files in source. + dirEntries, err := ioutil.ReadDir(source) + if err != nil { + return fmt.Errorf("Couldn't read directory %v: %v", source, err) + } + + for _, entry := range dirEntries { + hostEntry := filepath.Join(source, entry.Name()) + taskEntry := filepath.Join(destDir, filepath.Base(hostEntry)) + if entry.IsDir() { + subdirs[hostEntry] = filepath.Join(dest, filepath.Base(hostEntry)) + continue + } + + // Check if entry exists. This can happen if restarting a failed + // task. + if _, err := os.Lstat(taskEntry); err == nil { + continue + } + + if !entry.Mode().IsRegular() { + // If it is a symlink we can create it, otherwise we skip it. + if entry.Mode()&os.ModeSymlink == 0 { + continue + } + + link, err := os.Readlink(hostEntry) + if err != nil { + return fmt.Errorf("Couldn't resolve symlink for %v: %v", source, err) + } + + if err := os.Symlink(link, taskEntry); err != nil { + // Symlinking twice + if err.(*os.LinkError).Err.Error() != "file exists" { + return fmt.Errorf("Couldn't create symlink: %v", err) + } + } + continue + } + + if err := d.linkOrCopy(hostEntry, taskEntry, entry.Mode().Perm()); err != nil { + return err + } + } + } + + // Recurse on self to copy subdirectories. + if len(subdirs) != 0 { + return d.Embed(task, subdirs) + } + + return nil +} + +// MountSharedDir mounts the shared directory into the specified task's +// directory. Mount is documented at an OS level in their respective +// implementation files. +func (d *AllocDir) MountSharedDir(task string) error { + taskDir, ok := d.TaskDirs[task] + if !ok { + return fmt.Errorf("No task directory exists for %v", task) + } + + taskLoc := filepath.Join(taskDir, SharedAllocName) + if err := d.mountSharedDir(taskLoc); err != nil { + return fmt.Errorf("Failed to mount shared directory for task %v: %v", task, err) + } + + return nil +} + +// LogDir returns the log dir in the current allocation directory +func (d *AllocDir) LogDir() string { + return filepath.Join(d.AllocDir, SharedAllocName, LogDirName) +} + +// List returns the list of files at a path relative to the alloc dir +func (d *AllocDir) List(path string) ([]*AllocFileInfo, error) { + p := filepath.Join(d.AllocDir, path) + finfos, err := ioutil.ReadDir(p) + if err != nil { + return []*AllocFileInfo{}, err + } + files := make([]*AllocFileInfo, len(finfos)) + for idx, info := range finfos { + files[idx] = &AllocFileInfo{ + Name: info.Name(), + IsDir: info.IsDir(), + Size: info.Size(), + FileMode: info.Mode().String(), + ModTime: info.ModTime(), + } + } + return files, err +} + +// Stat returns information about the file at a path relative to the alloc dir +func (d *AllocDir) Stat(path string) (*AllocFileInfo, error) { + p := filepath.Join(d.AllocDir, path) + info, err := os.Stat(p) + if err != nil { + return nil, err + } + + return &AllocFileInfo{ + Size: info.Size(), + Name: info.Name(), + IsDir: info.IsDir(), + FileMode: info.Mode().String(), + ModTime: info.ModTime(), + }, nil +} + +// ReadAt returns a reader for a file at the path relative to the alloc dir +func (d *AllocDir) ReadAt(path string, offset int64) (io.ReadCloser, error) { + p := filepath.Join(d.AllocDir, path) + f, err := os.Open(p) + if err != nil { + return nil, err + } + if _, err := f.Seek(offset, 0); err != nil { + return nil, fmt.Errorf("can't seek to offset %q: %v", offset, err) + } + return f, nil +} + +// BlockUntilExists blocks until the passed file relative the allocation +// directory exists. The block can be cancelled with the passed tomb. +func (d *AllocDir) BlockUntilExists(path string, t *tomb.Tomb) chan error { + // Get the path relative to the alloc directory + p := filepath.Join(d.AllocDir, path) + watcher := getFileWatcher(p) + returnCh := make(chan error, 1) + go func() { + returnCh <- watcher.BlockUntilExists(t) + close(returnCh) + }() + return returnCh +} + +// ChangeEvents watches for changes to the passed path relative to the +// allocation directory. The offset should be the last read offset. The tomb is +// used to clean up the watch. +func (d *AllocDir) ChangeEvents(path string, curOffset int64, t *tomb.Tomb) (*watch.FileChanges, error) { + // Get the path relative to the alloc directory + p := filepath.Join(d.AllocDir, path) + watcher := getFileWatcher(p) + return watcher.ChangeEvents(t, curOffset) +} + +// getFileWatcher returns a FileWatcher for the given path. +func getFileWatcher(path string) watch.FileWatcher { + return watch.NewPollingFileWatcher(path) +} + +func fileCopy(src, dst string, perm os.FileMode) error { + // Do a simple copy. + srcFile, err := os.Open(src) + if err != nil { + return fmt.Errorf("Couldn't open src file %v: %v", src, err) + } + + dstFile, err := os.OpenFile(dst, os.O_WRONLY|os.O_CREATE, perm) + if err != nil { + return fmt.Errorf("Couldn't create destination file %v: %v", dst, err) + } + + if _, err := io.Copy(dstFile, srcFile); err != nil { + return fmt.Errorf("Couldn't copy %v to %v: %v", src, dst, err) + } + + return nil +} + +// pathExists is a helper function to check if the path exists. +func (d *AllocDir) pathExists(path string) bool { + if _, err := os.Stat(path); err != nil { + if os.IsNotExist(err) { + return false + } + } + return true +} diff --git a/vendor/github.com/hashicorp/nomad/client/allocdir/alloc_dir_darwin.go b/vendor/github.com/hashicorp/nomad/client/allocdir/alloc_dir_darwin.go new file mode 100644 index 000000000..2cfdd38c3 --- /dev/null +++ b/vendor/github.com/hashicorp/nomad/client/allocdir/alloc_dir_darwin.go @@ -0,0 +1,26 @@ +package allocdir + +import ( + "syscall" +) + +// Hardlinks the shared directory. As a side-effect the shared directory and +// task directory must be on the same filesystem. +func (d *AllocDir) mountSharedDir(dir string) error { + return syscall.Link(d.SharedDir, dir) +} + +func (d *AllocDir) unmountSharedDir(dir string) error { + return syscall.Unlink(dir) +} + +// MountSpecialDirs mounts the dev and proc file system on the chroot of the +// task. It's a no-op on darwin. +func (d *AllocDir) MountSpecialDirs(taskDir string) error { + return nil +} + +// unmountSpecialDirs unmounts the dev and proc file system from the chroot +func (d *AllocDir) unmountSpecialDirs(taskDir string) error { + return nil +} diff --git a/vendor/github.com/hashicorp/nomad/client/allocdir/alloc_dir_freebsd.go b/vendor/github.com/hashicorp/nomad/client/allocdir/alloc_dir_freebsd.go new file mode 100644 index 000000000..a4d3801db --- /dev/null +++ b/vendor/github.com/hashicorp/nomad/client/allocdir/alloc_dir_freebsd.go @@ -0,0 +1,26 @@ +package allocdir + +import ( + "syscall" +) + +// Hardlinks the shared directory. As a side-effect the shared directory and +// task directory must be on the same filesystem. +func (d *AllocDir) mountSharedDir(dir string) error { + return syscall.Link(d.SharedDir, dir) +} + +func (d *AllocDir) unmountSharedDir(dir string) error { + return syscall.Unlink(dir) +} + +// MountSpecialDirs mounts the dev and proc file system on the chroot of the +// task. It's a no-op on FreeBSD right now. +func (d *AllocDir) MountSpecialDirs(taskDir string) error { + return nil +} + +// unmountSpecialDirs unmounts the dev and proc file system from the chroot +func (d *AllocDir) unmountSpecialDirs(taskDir string) error { + return nil +} diff --git a/vendor/github.com/hashicorp/nomad/client/allocdir/alloc_dir_linux.go b/vendor/github.com/hashicorp/nomad/client/allocdir/alloc_dir_linux.go new file mode 100644 index 000000000..9b2c67035 --- /dev/null +++ b/vendor/github.com/hashicorp/nomad/client/allocdir/alloc_dir_linux.go @@ -0,0 +1,79 @@ +package allocdir + +import ( + "fmt" + "os" + "path/filepath" + "syscall" + + "github.com/hashicorp/go-multierror" +) + +// Bind mounts the shared directory into the task directory. Must be root to +// run. +func (d *AllocDir) mountSharedDir(taskDir string) error { + if err := os.MkdirAll(taskDir, 0777); err != nil { + return err + } + + return syscall.Mount(d.SharedDir, taskDir, "", syscall.MS_BIND, "") +} + +func (d *AllocDir) unmountSharedDir(dir string) error { + return syscall.Unmount(dir, 0) +} + +// MountSpecialDirs mounts the dev and proc file system from the host to the +// chroot +func (d *AllocDir) MountSpecialDirs(taskDir string) error { + // Mount dev + dev := filepath.Join(taskDir, "dev") + if !d.pathExists(dev) { + if err := os.MkdirAll(dev, 0777); err != nil { + return fmt.Errorf("Mkdir(%v) failed: %v", dev, err) + } + + if err := syscall.Mount("none", dev, "devtmpfs", syscall.MS_RDONLY, ""); err != nil { + return fmt.Errorf("Couldn't mount /dev to %v: %v", dev, err) + } + } + + // Mount proc + proc := filepath.Join(taskDir, "proc") + if !d.pathExists(proc) { + if err := os.MkdirAll(proc, 0777); err != nil { + return fmt.Errorf("Mkdir(%v) failed: %v", proc, err) + } + + if err := syscall.Mount("none", proc, "proc", syscall.MS_RDONLY, ""); err != nil { + return fmt.Errorf("Couldn't mount /proc to %v: %v", proc, err) + } + } + + return nil +} + +// unmountSpecialDirs unmounts the dev and proc file system from the chroot +func (d *AllocDir) unmountSpecialDirs(taskDir string) error { + errs := new(multierror.Error) + dev := filepath.Join(taskDir, "dev") + if d.pathExists(dev) { + if err := syscall.Unmount(dev, 0); err != nil { + errs = multierror.Append(errs, fmt.Errorf("Failed to unmount dev (%v): %v", dev, err)) + } else if err := os.RemoveAll(dev); err != nil { + errs = multierror.Append(errs, fmt.Errorf("Failed to delete dev directory (%v): %v", dev, err)) + } + } + + // Unmount proc. + proc := filepath.Join(taskDir, "proc") + if d.pathExists(proc) { + if err := syscall.Unmount(proc, 0); err != nil { + errs = multierror.Append(errs, fmt.Errorf("Failed to unmount proc (%v): %v", proc, err)) + } else if err := os.RemoveAll(proc); err != nil { + errs = multierror.Append(errs, fmt.Errorf("Failed to delete proc directory (%v): %v", dev, err)) + } + } + + return errs.ErrorOrNil() +} diff --git a/vendor/github.com/hashicorp/nomad/client/allocdir/alloc_dir_unix.go b/vendor/github.com/hashicorp/nomad/client/allocdir/alloc_dir_unix.go new file mode 100644 index 000000000..339e59d5d --- /dev/null +++ b/vendor/github.com/hashicorp/nomad/client/allocdir/alloc_dir_unix.go @@ -0,0 +1,81 @@ +// +build darwin dragonfly freebsd linux netbsd openbsd solaris + +// Functions shared between linux/darwin. +package allocdir + +import ( + "fmt" + "os" + "os/user" + "path/filepath" + "strconv" + + "golang.org/x/sys/unix" +) + +var ( + //Path inside container for mounted directory shared across tasks in a task group. + SharedAllocContainerPath = filepath.Join("/", SharedAllocName) + + //Path inside container for mounted directory for local storage. + TaskLocalContainerPath = filepath.Join("/", TaskLocal) +) + +func (d *AllocDir) linkOrCopy(src, dst string, perm os.FileMode) error { + // Attempt to hardlink. + if err := os.Link(src, dst); err == nil { + return nil + } + + return fileCopy(src, dst, perm) +} + +func (d *AllocDir) dropDirPermissions(path string) error { + // Can't do anything if not root. + if unix.Geteuid() != 0 { + return nil + } + + u, err := user.Lookup("nobody") + if err != nil { + return err + } + + uid, err := getUid(u) + if err != nil { + return err + } + + gid, err := getGid(u) + if err != nil { + return err + } + + if err := os.Chown(path, uid, gid); err != nil { + return fmt.Errorf("Couldn't change owner/group of %v to (uid: %v, gid: %v): %v", path, uid, gid, err) + } + + if err := os.Chmod(path, 0777); err != nil { + return fmt.Errorf("Chmod(%v) failed: %v", path, err) + } + + return nil +} + +func getUid(u *user.User) (int, error) { + uid, err := strconv.Atoi(u.Uid) + if err != nil { + return 0, fmt.Errorf("Unable to convert Uid to an int: %v", err) + } + + return uid, nil +} + +func getGid(u *user.User) (int, error) { + gid, err := strconv.Atoi(u.Gid) + if err != nil { + return 0, fmt.Errorf("Unable to convert Gid to an int: %v", err) + } + + return gid, nil +} diff --git a/vendor/github.com/hashicorp/nomad/client/allocdir/alloc_dir_windows.go b/vendor/github.com/hashicorp/nomad/client/allocdir/alloc_dir_windows.go new file mode 100644 index 000000000..112fe9b63 --- /dev/null +++ b/vendor/github.com/hashicorp/nomad/client/allocdir/alloc_dir_windows.go @@ -0,0 +1,45 @@ +package allocdir + +import ( + "errors" + "os" + "path/filepath" +) + +var ( + //Path inside container for mounted directory that is shared across tasks in a task group. + SharedAllocContainerPath = filepath.Join("c:\\", SharedAllocName) + + //Path inside container for mounted directory for local storage. + TaskLocalContainerPath = filepath.Join("c:\\", TaskLocal) +) + +func (d *AllocDir) linkOrCopy(src, dst string, perm os.FileMode) error { + return fileCopy(src, dst, perm) +} + +// The windows version does nothing currently. +func (d *AllocDir) mountSharedDir(dir string) error { + return errors.New("Mount on Windows not supported.") +} + +// The windows version does nothing currently. +func (d *AllocDir) dropDirPermissions(path string) error { + return nil +} + +// The windows version does nothing currently. +func (d *AllocDir) unmountSharedDir(dir string) error { + return nil +} + +// MountSpecialDirs mounts the dev and proc file system on the chroot of the +// task. It's a no-op on windows. +func (d *AllocDir) MountSpecialDirs(taskDir string) error { + return nil +} + +// unmountSpecialDirs unmounts the dev and proc file system from the chroot +func (d *AllocDir) unmountSpecialDirs(taskDir string) error { + return nil +} diff --git a/vendor/github.com/hashicorp/nomad/client/config/config.go b/vendor/github.com/hashicorp/nomad/client/config/config.go new file mode 100644 index 000000000..1d50c5669 --- /dev/null +++ b/vendor/github.com/hashicorp/nomad/client/config/config.go @@ -0,0 +1,221 @@ +package config + +import ( + "fmt" + "io" + "os" + "strconv" + "strings" + "time" + + "github.com/hashicorp/nomad/nomad/structs" + "github.com/hashicorp/nomad/nomad/structs/config" +) + +var ( + // DefaultEnvBlacklist is the default set of environment variables that are + // filtered when passing the environment variables of the host to a task. + DefaultEnvBlacklist = strings.Join([]string{ + "CONSUL_TOKEN", + "VAULT_TOKEN", + "ATLAS_TOKEN", + "AWS_ACCESS_KEY_ID", "AWS_SECRET_ACCESS_KEY", "AWS_SESSION_TOKEN", + "GOOGLE_APPLICATION_CREDENTIALS", + }, ",") + + // DefaulUserBlacklist is the default set of users that tasks are not + // allowed to run as when using a driver in "user.checked_drivers" + DefaultUserBlacklist = strings.Join([]string{ + "root", + "Administrator", + }, ",") + + // DefaultUserCheckedDrivers is the set of drivers we apply the user + // blacklist onto. For virtualized drivers it often doesn't make sense to + // make this stipulation so by default they are ignored. + DefaultUserCheckedDrivers = strings.Join([]string{ + "exec", + "qemu", + "java", + }, ",") +) + +// RPCHandler can be provided to the Client if there is a local server +// to avoid going over the network. If not provided, the Client will +// maintain a connection pool to the servers +type RPCHandler interface { + RPC(method string, args interface{}, reply interface{}) error +} + +// Config is used to parameterize and configure the behavior of the client +type Config struct { + // DevMode controls if we are in a development mode which + // avoids persistent storage. + DevMode bool + + // StateDir is where we store our state + StateDir string + + // AllocDir is where we store data for allocations + AllocDir string + + // LogOutput is the destination for logs + LogOutput io.Writer + + // Region is the clients region + Region string + + // Network interface to be used in network fingerprinting + NetworkInterface string + + // Network speed is the default speed of network interfaces if they can not + // be determined dynamically. + NetworkSpeed int + + // MaxKillTimeout allows capping the user-specifiable KillTimeout. If the + // task's KillTimeout is greater than the MaxKillTimeout, MaxKillTimeout is + // used. + MaxKillTimeout time.Duration + + // Servers is a list of known server addresses. These are as "host:port" + Servers []string + + // RPCHandler can be provided to avoid network traffic if the + // server is running locally. + RPCHandler RPCHandler + + // Node provides the base node + Node *structs.Node + + // ClientMaxPort is the upper range of the ports that the client uses for + // communicating with plugin subsystems over loopback + ClientMaxPort uint + + // ClientMinPort is the lower range of the ports that the client uses for + // communicating with plugin subsystems over loopback + ClientMinPort uint + + // GloballyReservedPorts are ports that are reserved across all network + // devices and IPs. + GloballyReservedPorts []int + + // A mapping of directories on the host OS to attempt to embed inside each + // task's chroot. + ChrootEnv map[string]string + + // Options provides arbitrary key-value configuration for nomad internals, + // like fingerprinters and drivers. The format is: + // + // namespace.option = value + Options map[string]string + + // Version is the version of the Nomad client + Version string + + // Revision is the commit number of the Nomad client + Revision string + + // ConsulConfig is this Agent's Consul configuration + ConsulConfig *config.ConsulConfig + + // StatsCollectionInterval is the interval at which the Nomad client + // collects resource usage stats + StatsCollectionInterval time.Duration + + // PublishNodeMetrics determines whether nomad is going to publish node + // level metrics to remote Telemetry sinks + PublishNodeMetrics bool + + // PublishAllocationMetrics determines whether nomad is going to publish + // allocation metrics to remote Telemetry sinks + PublishAllocationMetrics bool +} + +func (c *Config) Copy() *Config { + nc := new(Config) + *nc = *c + nc.Node = nc.Node.Copy() + nc.Servers = structs.CopySliceString(nc.Servers) + nc.Options = structs.CopyMapStringString(nc.Options) + return nc +} + +// DefaultConfig returns the default configuration +func DefaultConfig() *Config { + return &Config{ + ConsulConfig: config.DefaultConsulConfig(), + LogOutput: os.Stderr, + Region: "global", + StatsCollectionInterval: 1 * time.Second, + } +} + +// Read returns the specified configuration value or "". +func (c *Config) Read(id string) string { + return c.Options[id] +} + +// ReadDefault returns the specified configuration value, or the specified +// default value if none is set. +func (c *Config) ReadDefault(id string, defaultValue string) string { + val, ok := c.Options[id] + if !ok { + return defaultValue + } + return val +} + +// ReadBool parses the specified option as a boolean. +func (c *Config) ReadBool(id string) (bool, error) { + val, ok := c.Options[id] + if !ok { + return false, fmt.Errorf("Specified config is missing from options") + } + bval, err := strconv.ParseBool(val) + if err != nil { + return false, fmt.Errorf("Failed to parse %s as bool: %s", val, err) + } + return bval, nil +} + +// ReadBoolDefault tries to parse the specified option as a boolean. If there is +// an error in parsing, the default option is returned. +func (c *Config) ReadBoolDefault(id string, defaultValue bool) bool { + val, err := c.ReadBool(id) + if err != nil { + return defaultValue + } + return val +} + +// ReadStringListToMap tries to parse the specified option as a comma separated list. +// If there is an error in parsing, an empty list is returned. +func (c *Config) ReadStringListToMap(key string) map[string]struct{} { + s := strings.TrimSpace(c.Read(key)) + list := make(map[string]struct{}) + if s != "" { + for _, e := range strings.Split(s, ",") { + trimmed := strings.TrimSpace(e) + list[trimmed] = struct{}{} + } + } + return list +} + +// ReadStringListToMap tries to parse the specified option as a comma separated list. +// If there is an error in parsing, an empty list is returned. +func (c *Config) ReadStringListToMapDefault(key, defaultValue string) map[string]struct{} { + val, ok := c.Options[key] + if !ok { + val = defaultValue + } + + list := make(map[string]struct{}) + if val != "" { + for _, e := range strings.Split(val, ",") { + trimmed := strings.TrimSpace(e) + list[trimmed] = struct{}{} + } + } + return list +} diff --git a/vendor/github.com/hashicorp/nomad/client/driver/docker.go b/vendor/github.com/hashicorp/nomad/client/driver/docker.go new file mode 100644 index 000000000..cbb6e2e8e --- /dev/null +++ b/vendor/github.com/hashicorp/nomad/client/driver/docker.go @@ -0,0 +1,1085 @@ +package driver + +import ( + "encoding/json" + "fmt" + "log" + "net" + "os" + "os/exec" + "path/filepath" + "regexp" + "runtime" + "strconv" + "strings" + "sync" + "time" + + docker "github.com/fsouza/go-dockerclient" + + "github.com/hashicorp/go-multierror" + "github.com/hashicorp/go-plugin" + "github.com/hashicorp/nomad/client/allocdir" + "github.com/hashicorp/nomad/client/config" + "github.com/hashicorp/nomad/client/driver/executor" + dstructs "github.com/hashicorp/nomad/client/driver/structs" + cstructs "github.com/hashicorp/nomad/client/structs" + "github.com/hashicorp/nomad/helper/discover" + "github.com/hashicorp/nomad/helper/fields" + shelpers "github.com/hashicorp/nomad/helper/stats" + "github.com/hashicorp/nomad/nomad/structs" + "github.com/mitchellh/mapstructure" +) + +var ( + // We store the clients globally to cache the connection to the docker daemon. + createClients sync.Once + + // client is a docker client with a timeout of 1 minute. This is for doing + // all operations with the docker daemon besides which are not long running + // such as creating, killing containers, etc. + client *docker.Client + + // waitClient is a docker client with no timeouts. This is used for long + // running operations such as waiting on containers and collect stats + waitClient *docker.Client + + // The statistics the Docker driver exposes + DockerMeasuredMemStats = []string{"RSS", "Cache", "Swap", "Max Usage"} + DockerMeasuredCpuStats = []string{"Throttled Periods", "Throttled Time", "Percent"} +) + +const ( + // NoSuchContainerError is returned by the docker daemon if the container + // does not exist. + NoSuchContainerError = "No such container" + + // The key populated in Node Attributes to indicate presence of the Docker + // driver + dockerDriverAttr = "driver.docker" + + // dockerTimeout is the length of time a request can be outstanding before + // it is timed out. + dockerTimeout = 1 * time.Minute +) + +type DockerDriver struct { + DriverContext +} + +type DockerDriverAuth struct { + Username string `mapstructure:"username"` // username for the registry + Password string `mapstructure:"password"` // password to access the registry + Email string `mapstructure:"email"` // email address of the user who is allowed to access the registry + ServerAddress string `mapstructure:"server_address"` // server address of the registry +} + +type DockerDriverConfig struct { + ImageName string `mapstructure:"image"` // Container's Image Name + LoadImages []string `mapstructure:"load"` // LoadImage is array of paths to image archive files + Command string `mapstructure:"command"` // The Command/Entrypoint to run when the container starts up + Args []string `mapstructure:"args"` // The arguments to the Command/Entrypoint + IpcMode string `mapstructure:"ipc_mode"` // The IPC mode of the container - host and none + NetworkMode string `mapstructure:"network_mode"` // The network mode of the container - host, nat and none + PidMode string `mapstructure:"pid_mode"` // The PID mode of the container - host and none + UTSMode string `mapstructure:"uts_mode"` // The UTS mode of the container - host and none + PortMapRaw []map[string]int `mapstructure:"port_map"` // + PortMap map[string]int `mapstructure:"-"` // A map of host port labels and the ports exposed on the container + Privileged bool `mapstructure:"privileged"` // Flag to run the container in privileged mode + DNSServers []string `mapstructure:"dns_servers"` // DNS Server for containers + DNSSearchDomains []string `mapstructure:"dns_search_domains"` // DNS Search domains for containers + Hostname string `mapstructure:"hostname"` // Hostname for containers + LabelsRaw []map[string]string `mapstructure:"labels"` // + Labels map[string]string `mapstructure:"-"` // Labels to set when the container starts up + Auth []DockerDriverAuth `mapstructure:"auth"` // Authentication credentials for a private Docker registry + SSL bool `mapstructure:"ssl"` // Flag indicating repository is served via https + TTY bool `mapstructure:"tty"` // Allocate a Pseudo-TTY + Interactive bool `mapstructure:"interactive"` // Keep STDIN open even if not attached + ShmSize int64 `mapstructure:"shm_size"` // Size of /dev/shm of the container in bytes + WorkDir string `mapstructure:"work_dir"` // Working directory inside the container +} + +// Validate validates a docker driver config +func (c *DockerDriverConfig) Validate() error { + if c.ImageName == "" { + return fmt.Errorf("Docker Driver needs an image name") + } + + c.PortMap = mapMergeStrInt(c.PortMapRaw...) + c.Labels = mapMergeStrStr(c.LabelsRaw...) + + return nil +} + +// NewDockerDriverConfig returns a docker driver config by parsing the HCL +// config +func NewDockerDriverConfig(task *structs.Task) (*DockerDriverConfig, error) { + var driverConfig DockerDriverConfig + driverConfig.SSL = true + if err := mapstructure.WeakDecode(task.Config, &driverConfig); err != nil { + return nil, err + } + if strings.Contains(driverConfig.ImageName, "https://") { + driverConfig.ImageName = strings.Replace(driverConfig.ImageName, "https://", "", 1) + } + + if err := driverConfig.Validate(); err != nil { + return nil, err + } + return &driverConfig, nil +} + +type dockerPID struct { + Version string + ImageID string + ContainerID string + KillTimeout time.Duration + MaxKillTimeout time.Duration + PluginConfig *PluginReattachConfig +} + +type DockerHandle struct { + pluginClient *plugin.Client + executor executor.Executor + client *docker.Client + waitClient *docker.Client + logger *log.Logger + cleanupImage bool + imageID string + containerID string + version string + clkSpeed float64 + killTimeout time.Duration + maxKillTimeout time.Duration + resourceUsageLock sync.RWMutex + resourceUsage *cstructs.TaskResourceUsage + waitCh chan *dstructs.WaitResult + doneCh chan bool +} + +func NewDockerDriver(ctx *DriverContext) Driver { + return &DockerDriver{DriverContext: *ctx} +} + +// Validate is used to validate the driver configuration +func (d *DockerDriver) Validate(config map[string]interface{}) error { + fd := &fields.FieldData{ + Raw: config, + Schema: map[string]*fields.FieldSchema{ + "image": &fields.FieldSchema{ + Type: fields.TypeString, + Required: true, + }, + "load": &fields.FieldSchema{ + Type: fields.TypeArray, + }, + "command": &fields.FieldSchema{ + Type: fields.TypeString, + }, + "args": &fields.FieldSchema{ + Type: fields.TypeArray, + }, + "ipc_mode": &fields.FieldSchema{ + Type: fields.TypeString, + }, + "network_mode": &fields.FieldSchema{ + Type: fields.TypeString, + }, + "pid_mode": &fields.FieldSchema{ + Type: fields.TypeString, + }, + "uts_mode": &fields.FieldSchema{ + Type: fields.TypeString, + }, + "port_map": &fields.FieldSchema{ + Type: fields.TypeArray, + }, + "privileged": &fields.FieldSchema{ + Type: fields.TypeBool, + }, + "dns_servers": &fields.FieldSchema{ + Type: fields.TypeArray, + }, + "dns_search_domains": &fields.FieldSchema{ + Type: fields.TypeArray, + }, + "hostname": &fields.FieldSchema{ + Type: fields.TypeString, + }, + "labels": &fields.FieldSchema{ + Type: fields.TypeArray, + }, + "auth": &fields.FieldSchema{ + Type: fields.TypeArray, + }, + "ssl": &fields.FieldSchema{ + Type: fields.TypeBool, + }, + "tty": &fields.FieldSchema{ + Type: fields.TypeBool, + }, + "interactive": &fields.FieldSchema{ + Type: fields.TypeBool, + }, + "shm_size": &fields.FieldSchema{ + Type: fields.TypeInt, + }, + "work_dir": &fields.FieldSchema{ + Type: fields.TypeString, + }, + }, + } + + if err := fd.Validate(); err != nil { + return err + } + + return nil +} + +// dockerClients creates two *docker.Client, one for long running operations and +// the other for shorter operations. In test / dev mode we can use ENV vars to +// connect to the docker daemon. In production mode we will read docker.endpoint +// from the config file. +func (d *DockerDriver) dockerClients() (*docker.Client, *docker.Client, error) { + if client != nil && waitClient != nil { + return client, waitClient, nil + } + + var err error + var merr multierror.Error + createClients.Do(func() { + if err = shelpers.Init(); err != nil { + d.logger.Printf("[FATAL] driver.docker: unable to initialize stats: %v", err) + return + } + + // Default to using whatever is configured in docker.endpoint. If this is + // not specified we'll fall back on NewClientFromEnv which reads config from + // the DOCKER_* environment variables DOCKER_HOST, DOCKER_TLS_VERIFY, and + // DOCKER_CERT_PATH. This allows us to lock down the config in production + // but also accept the standard ENV configs for dev and test. + dockerEndpoint := d.config.Read("docker.endpoint") + if dockerEndpoint != "" { + cert := d.config.Read("docker.tls.cert") + key := d.config.Read("docker.tls.key") + ca := d.config.Read("docker.tls.ca") + + if cert+key+ca != "" { + d.logger.Printf("[DEBUG] driver.docker: using TLS client connection to %s", dockerEndpoint) + client, err = docker.NewTLSClient(dockerEndpoint, cert, key, ca) + if err != nil { + merr.Errors = append(merr.Errors, err) + } + waitClient, err = docker.NewTLSClient(dockerEndpoint, cert, key, ca) + if err != nil { + merr.Errors = append(merr.Errors, err) + } + } else { + d.logger.Printf("[DEBUG] driver.docker: using standard client connection to %s", dockerEndpoint) + client, err = docker.NewClient(dockerEndpoint) + if err != nil { + merr.Errors = append(merr.Errors, err) + } + waitClient, err = docker.NewClient(dockerEndpoint) + if err != nil { + merr.Errors = append(merr.Errors, err) + } + } + client.SetTimeout(dockerTimeout) + return + } + + d.logger.Println("[DEBUG] driver.docker: using client connection initialized from environment") + client, err = docker.NewClientFromEnv() + if err != nil { + merr.Errors = append(merr.Errors, err) + } + client.SetTimeout(dockerTimeout) + + waitClient, err = docker.NewClientFromEnv() + if err != nil { + merr.Errors = append(merr.Errors, err) + } + }) + return client, waitClient, merr.ErrorOrNil() +} + +func (d *DockerDriver) Fingerprint(cfg *config.Config, node *structs.Node) (bool, error) { + // Get the current status so that we can log any debug messages only if the + // state changes + _, currentlyEnabled := node.Attributes[dockerDriverAttr] + + // Initialize docker API clients + client, _, err := d.dockerClients() + if err != nil { + delete(node.Attributes, dockerDriverAttr) + if currentlyEnabled { + d.logger.Printf("[INFO] driver.docker: failed to initialize client: %s", err) + } + return false, nil + } + + privileged := d.config.ReadBoolDefault("docker.privileged.enabled", false) + if privileged { + node.Attributes["docker.privileged.enabled"] = "1" + } + + // This is the first operation taken on the client so we'll try to + // establish a connection to the Docker daemon. If this fails it means + // Docker isn't available so we'll simply disable the docker driver. + env, err := client.Version() + if err != nil { + if currentlyEnabled { + d.logger.Printf("[DEBUG] driver.docker: could not connect to docker daemon at %s: %s", client.Endpoint(), err) + } + delete(node.Attributes, dockerDriverAttr) + return false, nil + } + + node.Attributes[dockerDriverAttr] = "1" + node.Attributes["driver.docker.version"] = env.Get("Version") + return true, nil +} + +func (d *DockerDriver) containerBinds(alloc *allocdir.AllocDir, task *structs.Task) ([]string, error) { + shared := alloc.SharedDir + local, ok := alloc.TaskDirs[task.Name] + if !ok { + return nil, fmt.Errorf("Failed to find task local directory: %v", task.Name) + } + + allocDirBind := fmt.Sprintf("%s:%s", shared, allocdir.SharedAllocContainerPath) + taskLocalBind := fmt.Sprintf("%s:%s", local, allocdir.TaskLocalContainerPath) + + if selinuxLabel := d.config.Read("docker.volumes.selinuxlabel"); selinuxLabel != "" { + allocDirBind = fmt.Sprintf("%s:%s", allocDirBind, selinuxLabel) + taskLocalBind = fmt.Sprintf("%s:%s", taskLocalBind, selinuxLabel) + } + return []string{ + allocDirBind, + taskLocalBind, + }, nil +} + +// createContainer initializes a struct needed to call docker.client.CreateContainer() +func (d *DockerDriver) createContainer(ctx *ExecContext, task *structs.Task, + driverConfig *DockerDriverConfig, syslogAddr string) (docker.CreateContainerOptions, error) { + var c docker.CreateContainerOptions + if task.Resources == nil { + // Guard against missing resources. We should never have been able to + // schedule a job without specifying this. + d.logger.Println("[ERR] driver.docker: task.Resources is empty") + return c, fmt.Errorf("task.Resources is empty") + } + + binds, err := d.containerBinds(ctx.AllocDir, task) + if err != nil { + return c, err + } + + // Set environment variables. + d.taskEnv.SetAllocDir(allocdir.SharedAllocContainerPath) + d.taskEnv.SetTaskLocalDir(allocdir.TaskLocalContainerPath) + + config := &docker.Config{ + Image: driverConfig.ImageName, + Hostname: driverConfig.Hostname, + User: task.User, + Tty: driverConfig.TTY, + OpenStdin: driverConfig.Interactive, + } + + if driverConfig.WorkDir != "" { + config.WorkingDir = driverConfig.WorkDir + } + + memLimit := int64(task.Resources.MemoryMB) * 1024 * 1024 + hostConfig := &docker.HostConfig{ + // Convert MB to bytes. This is an absolute value. + Memory: memLimit, + MemorySwap: memLimit, // MemorySwap is memory + swap. + // Convert Mhz to shares. This is a relative value. + CPUShares: int64(task.Resources.CPU), + + // Binds are used to mount a host volume into the container. We mount a + // local directory for storage and a shared alloc directory that can be + // used to share data between different tasks in the same task group. + Binds: binds, + LogConfig: docker.LogConfig{ + Type: "syslog", + Config: map[string]string{ + "syslog-address": syslogAddr, + }, + }, + } + + d.logger.Printf("[DEBUG] driver.docker: using %d bytes memory for %s", hostConfig.Memory, task.Config["image"]) + d.logger.Printf("[DEBUG] driver.docker: using %d cpu shares for %s", hostConfig.CPUShares, task.Config["image"]) + d.logger.Printf("[DEBUG] driver.docker: binding directories %#v for %s", hostConfig.Binds, task.Config["image"]) + + // set privileged mode + hostPrivileged := d.config.ReadBoolDefault("docker.privileged.enabled", false) + if driverConfig.Privileged && !hostPrivileged { + return c, fmt.Errorf(`Docker privileged mode is disabled on this Nomad agent`) + } + hostConfig.Privileged = driverConfig.Privileged + + // set SHM size + if driverConfig.ShmSize != 0 { + hostConfig.ShmSize = driverConfig.ShmSize + } + + // set DNS servers + for _, ip := range driverConfig.DNSServers { + if net.ParseIP(ip) != nil { + hostConfig.DNS = append(hostConfig.DNS, ip) + } else { + d.logger.Printf("[ERR] driver.docker: invalid ip address for container dns server: %s", ip) + } + } + + // set DNS search domains + for _, domain := range driverConfig.DNSSearchDomains { + hostConfig.DNSSearch = append(hostConfig.DNSSearch, domain) + } + + hostConfig.IpcMode = driverConfig.IpcMode + hostConfig.PidMode = driverConfig.PidMode + hostConfig.UTSMode = driverConfig.UTSMode + + hostConfig.NetworkMode = driverConfig.NetworkMode + if hostConfig.NetworkMode == "" { + // docker default + d.logger.Printf("[DEBUG] driver.docker: networking mode not specified; defaulting to %s", defaultNetworkMode) + hostConfig.NetworkMode = defaultNetworkMode + } + + // Setup port mapping and exposed ports + if len(task.Resources.Networks) == 0 { + d.logger.Println("[DEBUG] driver.docker: No network interfaces are available") + if len(driverConfig.PortMap) > 0 { + return c, fmt.Errorf("Trying to map ports but no network interface is available") + } + } else { + // TODO add support for more than one network + network := task.Resources.Networks[0] + publishedPorts := map[docker.Port][]docker.PortBinding{} + exposedPorts := map[docker.Port]struct{}{} + + for _, port := range network.ReservedPorts { + // By default we will map the allocated port 1:1 to the container + containerPortInt := port.Value + + // If the user has mapped a port using port_map we'll change it here + if mapped, ok := driverConfig.PortMap[port.Label]; ok { + containerPortInt = mapped + } + + hostPortStr := strconv.Itoa(port.Value) + containerPort := docker.Port(strconv.Itoa(containerPortInt)) + + publishedPorts[containerPort+"/tcp"] = getPortBinding(network.IP, hostPortStr) + publishedPorts[containerPort+"/udp"] = getPortBinding(network.IP, hostPortStr) + d.logger.Printf("[DEBUG] driver.docker: allocated port %s:%d -> %d (static)", network.IP, port.Value, port.Value) + + exposedPorts[containerPort+"/tcp"] = struct{}{} + exposedPorts[containerPort+"/udp"] = struct{}{} + d.logger.Printf("[DEBUG] driver.docker: exposed port %d", port.Value) + } + + for _, port := range network.DynamicPorts { + // By default we will map the allocated port 1:1 to the container + containerPortInt := port.Value + + // If the user has mapped a port using port_map we'll change it here + if mapped, ok := driverConfig.PortMap[port.Label]; ok { + containerPortInt = mapped + } + + hostPortStr := strconv.Itoa(port.Value) + containerPort := docker.Port(strconv.Itoa(containerPortInt)) + + publishedPorts[containerPort+"/tcp"] = getPortBinding(network.IP, hostPortStr) + publishedPorts[containerPort+"/udp"] = getPortBinding(network.IP, hostPortStr) + d.logger.Printf("[DEBUG] driver.docker: allocated port %s:%d -> %d (mapped)", network.IP, port.Value, containerPortInt) + + exposedPorts[containerPort+"/tcp"] = struct{}{} + exposedPorts[containerPort+"/udp"] = struct{}{} + d.logger.Printf("[DEBUG] driver.docker: exposed port %s", containerPort) + } + + d.taskEnv.SetPortMap(driverConfig.PortMap) + + hostConfig.PortBindings = publishedPorts + config.ExposedPorts = exposedPorts + } + + d.taskEnv.Build() + parsedArgs := d.taskEnv.ParseAndReplace(driverConfig.Args) + + // If the user specified a custom command to run as their entrypoint, we'll + // inject it here. + if driverConfig.Command != "" { + // Validate command + if err := validateCommand(driverConfig.Command, "args"); err != nil { + return c, err + } + + cmd := []string{driverConfig.Command} + if len(driverConfig.Args) != 0 { + cmd = append(cmd, parsedArgs...) + } + d.logger.Printf("[DEBUG] driver.docker: setting container startup command to: %s", strings.Join(cmd, " ")) + config.Cmd = cmd + } else if len(driverConfig.Args) != 0 { + config.Cmd = parsedArgs + } + + if len(driverConfig.Labels) > 0 { + config.Labels = driverConfig.Labels + d.logger.Printf("[DEBUG] driver.docker: applied labels on the container: %+v", config.Labels) + } + + config.Env = d.taskEnv.EnvList() + + containerName := fmt.Sprintf("%s-%s", task.Name, ctx.AllocID) + d.logger.Printf("[DEBUG] driver.docker: setting container name to: %s", containerName) + + return docker.CreateContainerOptions{ + Name: containerName, + Config: config, + HostConfig: hostConfig, + }, nil +} + +var ( + // imageNotFoundMatcher is a regex expression that matches the image not + // found error Docker returns. + imageNotFoundMatcher = regexp.MustCompile(`Error: image .+ not found`) +) + +// recoverablePullError wraps the error gotten when trying to pull and image if +// the error is recoverable. +func (d *DockerDriver) recoverablePullError(err error, image string) error { + recoverable := true + if imageNotFoundMatcher.MatchString(err.Error()) { + recoverable = false + } + return dstructs.NewRecoverableError(fmt.Errorf("Failed to pull `%s`: %s", image, err), recoverable) +} + +func (d *DockerDriver) Periodic() (bool, time.Duration) { + return true, 15 * time.Second +} + +// createImage creates a docker image either by pulling it from a registry or by +// loading it from the file system +func (d *DockerDriver) createImage(driverConfig *DockerDriverConfig, client *docker.Client, taskDir string) error { + image := driverConfig.ImageName + repo, tag := docker.ParseRepositoryTag(image) + if tag == "" { + tag = "latest" + } + + var dockerImage *docker.Image + var err error + // We're going to check whether the image is already downloaded. If the tag + // is "latest" we have to check for a new version every time so we don't + // bother to check and cache the id here. We'll download first, then cache. + if tag != "latest" { + dockerImage, err = client.InspectImage(image) + } + + // Download the image + if dockerImage == nil { + if len(driverConfig.LoadImages) > 0 { + return d.loadImage(driverConfig, client, taskDir) + } + + return d.pullImage(driverConfig, client, repo, tag) + } + return err +} + +// pullImage creates an image by pulling it from a docker registry +func (d *DockerDriver) pullImage(driverConfig *DockerDriverConfig, client *docker.Client, repo string, tag string) error { + pullOptions := docker.PullImageOptions{ + Repository: repo, + Tag: tag, + } + + authOptions := docker.AuthConfiguration{} + if len(driverConfig.Auth) != 0 { + authOptions = docker.AuthConfiguration{ + Username: driverConfig.Auth[0].Username, + Password: driverConfig.Auth[0].Password, + Email: driverConfig.Auth[0].Email, + ServerAddress: driverConfig.Auth[0].ServerAddress, + } + } + + if authConfigFile := d.config.Read("docker.auth.config"); authConfigFile != "" { + if f, err := os.Open(authConfigFile); err == nil { + defer f.Close() + var authConfigurations *docker.AuthConfigurations + if authConfigurations, err = docker.NewAuthConfigurations(f); err != nil { + return fmt.Errorf("Failed to create docker auth object: %v", err) + } + + authConfigurationKey := "" + if driverConfig.SSL { + authConfigurationKey += "https://" + } + + authConfigurationKey += strings.Split(driverConfig.ImageName, "/")[0] + if authConfiguration, ok := authConfigurations.Configs[authConfigurationKey]; ok { + authOptions = authConfiguration + } + } else { + return fmt.Errorf("Failed to open auth config file: %v, error: %v", authConfigFile, err) + } + } + + err := client.PullImage(pullOptions, authOptions) + if err != nil { + d.logger.Printf("[ERR] driver.docker: failed pulling container %s:%s: %s", repo, tag, err) + return d.recoverablePullError(err, driverConfig.ImageName) + } + d.logger.Printf("[DEBUG] driver.docker: docker pull %s:%s succeeded", repo, tag) + return nil +} + +// loadImage creates an image by loading it from the file system +func (d *DockerDriver) loadImage(driverConfig *DockerDriverConfig, client *docker.Client, taskDir string) error { + var errors multierror.Error + for _, image := range driverConfig.LoadImages { + archive := filepath.Join(taskDir, allocdir.TaskLocal, image) + d.logger.Printf("[DEBUG] driver.docker: loading image from: %v", archive) + f, err := os.Open(archive) + if err != nil { + errors.Errors = append(errors.Errors, fmt.Errorf("unable to open image archive: %v", err)) + continue + } + if err := client.LoadImage(docker.LoadImageOptions{InputStream: f}); err != nil { + errors.Errors = append(errors.Errors, err) + } + f.Close() + } + return errors.ErrorOrNil() +} + +func (d *DockerDriver) Start(ctx *ExecContext, task *structs.Task) (DriverHandle, error) { + driverConfig, err := NewDockerDriverConfig(task) + if err != nil { + return nil, err + } + + cleanupImage := d.config.ReadBoolDefault("docker.cleanup.image", true) + + taskDir, ok := ctx.AllocDir.TaskDirs[d.DriverContext.taskName] + if !ok { + return nil, fmt.Errorf("Could not find task directory for task: %v", d.DriverContext.taskName) + } + + // Initialize docker API clients + client, waitClient, err := d.dockerClients() + if err != nil { + return nil, fmt.Errorf("Failed to connect to docker daemon: %s", err) + } + + if err := d.createImage(driverConfig, client, taskDir); err != nil { + return nil, fmt.Errorf("failed to create image: %v", err) + } + + image := driverConfig.ImageName + // Now that we have the image we can get the image id + dockerImage, err := client.InspectImage(image) + if err != nil { + d.logger.Printf("[ERR] driver.docker: failed getting image id for %s: %s", image, err) + return nil, fmt.Errorf("Failed to determine image id for `%s`: %s", image, err) + } + d.logger.Printf("[DEBUG] driver.docker: identified image %s as %s", image, dockerImage.ID) + + bin, err := discover.NomadExecutable() + if err != nil { + return nil, fmt.Errorf("unable to find the nomad binary: %v", err) + } + pluginLogFile := filepath.Join(taskDir, fmt.Sprintf("%s-executor.out", task.Name)) + pluginConfig := &plugin.ClientConfig{ + Cmd: exec.Command(bin, "executor", pluginLogFile), + } + + exec, pluginClient, err := createExecutor(pluginConfig, d.config.LogOutput, d.config) + if err != nil { + return nil, err + } + executorCtx := &executor.ExecutorContext{ + TaskEnv: d.taskEnv, + Task: task, + Driver: "docker", + AllocDir: ctx.AllocDir, + AllocID: ctx.AllocID, + PortLowerBound: d.config.ClientMinPort, + PortUpperBound: d.config.ClientMaxPort, + } + ss, err := exec.LaunchSyslogServer(executorCtx) + if err != nil { + return nil, fmt.Errorf("failed to start syslog collector: %v", err) + } + + config, err := d.createContainer(ctx, task, driverConfig, ss.Addr) + if err != nil { + d.logger.Printf("[ERR] driver.docker: failed to create container configuration for image %s: %s", image, err) + pluginClient.Kill() + return nil, fmt.Errorf("Failed to create container configuration for image %s: %s", image, err) + } + // Create a container + container, err := client.CreateContainer(config) + if err != nil { + // If the container already exists because of a previous failure we'll + // try to purge it and re-create it. + if strings.Contains(err.Error(), "container already exists") { + // Get the ID of the existing container so we can delete it + containers, err := client.ListContainers(docker.ListContainersOptions{ + // The image might be in use by a stopped container, so check everything + All: true, + Filters: map[string][]string{ + "name": []string{config.Name}, + }, + }) + if err != nil { + d.logger.Printf("[ERR] driver.docker: failed to query list of containers matching name:%s", config.Name) + pluginClient.Kill() + return nil, fmt.Errorf("Failed to query list of containers: %s", err) + } + + // Couldn't find any matching containers + if len(containers) == 0 { + d.logger.Printf("[ERR] driver.docker: failed to get id for container %s: %#v", config.Name, containers) + pluginClient.Kill() + return nil, fmt.Errorf("Failed to get id for container %s", config.Name) + } + + // Delete matching containers + d.logger.Printf("[INFO] driver.docker: a container with the name %s already exists; will attempt to purge and re-create", config.Name) + for _, container := range containers { + err = client.RemoveContainer(docker.RemoveContainerOptions{ + ID: container.ID, + }) + if err != nil { + d.logger.Printf("[ERR] driver.docker: failed to purge container %s", container.ID) + pluginClient.Kill() + return nil, fmt.Errorf("Failed to purge container %s: %s", container.ID, err) + } + d.logger.Printf("[INFO] driver.docker: purged container %s", container.ID) + } + + container, err = client.CreateContainer(config) + if err != nil { + d.logger.Printf("[ERR] driver.docker: failed to re-create container %s; aborting", config.Name) + pluginClient.Kill() + return nil, fmt.Errorf("Failed to re-create container %s; aborting", config.Name) + } + } else { + // We failed to create the container for some other reason. + d.logger.Printf("[ERR] driver.docker: failed to create container from image %s: %s", image, err) + pluginClient.Kill() + return nil, fmt.Errorf("Failed to create container from image %s: %s", image, err) + } + } + d.logger.Printf("[INFO] driver.docker: created container %s", container.ID) + + // Start the container + err = client.StartContainer(container.ID, container.HostConfig) + if err != nil { + d.logger.Printf("[ERR] driver.docker: failed to start container %s: %s", container.ID, err) + pluginClient.Kill() + return nil, fmt.Errorf("Failed to start container %s: %s", container.ID, err) + } + d.logger.Printf("[INFO] driver.docker: started container %s", container.ID) + + // Return a driver handle + maxKill := d.DriverContext.config.MaxKillTimeout + h := &DockerHandle{ + client: client, + waitClient: waitClient, + executor: exec, + pluginClient: pluginClient, + cleanupImage: cleanupImage, + logger: d.logger, + imageID: dockerImage.ID, + containerID: container.ID, + version: d.config.Version, + killTimeout: GetKillTimeout(task.KillTimeout, maxKill), + maxKillTimeout: maxKill, + doneCh: make(chan bool), + waitCh: make(chan *dstructs.WaitResult, 1), + } + if err := exec.SyncServices(consulContext(d.config, container.ID)); err != nil { + d.logger.Printf("[ERR] driver.docker: error registering services with consul for task: %q: %v", task.Name, err) + } + go h.collectStats() + go h.run() + return h, nil +} + +func (d *DockerDriver) Open(ctx *ExecContext, handleID string) (DriverHandle, error) { + cleanupImage := d.config.ReadBoolDefault("docker.cleanup.image", true) + + // Split the handle + pidBytes := []byte(strings.TrimPrefix(handleID, "DOCKER:")) + pid := &dockerPID{} + if err := json.Unmarshal(pidBytes, pid); err != nil { + return nil, fmt.Errorf("Failed to parse handle '%s': %v", handleID, err) + } + d.logger.Printf("[INFO] driver.docker: re-attaching to docker process: %s", pid.ContainerID) + d.logger.Printf("[DEBUG] driver.docker: re-attached to handle: %s", handleID) + pluginConfig := &plugin.ClientConfig{ + Reattach: pid.PluginConfig.PluginConfig(), + } + + client, waitClient, err := d.dockerClients() + if err != nil { + return nil, fmt.Errorf("Failed to connect to docker daemon: %s", err) + } + + // Look for a running container with this ID + containers, err := client.ListContainers(docker.ListContainersOptions{ + Filters: map[string][]string{ + "id": []string{pid.ContainerID}, + }, + }) + if err != nil { + return nil, fmt.Errorf("Failed to query for container %s: %v", pid.ContainerID, err) + } + + found := false + for _, container := range containers { + if container.ID == pid.ContainerID { + found = true + } + } + if !found { + return nil, fmt.Errorf("Failed to find container %s", pid.ContainerID) + } + exec, pluginClient, err := createExecutor(pluginConfig, d.config.LogOutput, d.config) + if err != nil { + d.logger.Printf("[INFO] driver.docker: couldn't re-attach to the plugin process: %v", err) + d.logger.Printf("[DEBUG] driver.docker: stopping container %q", pid.ContainerID) + if e := client.StopContainer(pid.ContainerID, uint(pid.KillTimeout.Seconds())); e != nil { + d.logger.Printf("[DEBUG] driver.docker: couldn't stop container: %v", e) + } + return nil, err + } + + ver, _ := exec.Version() + d.logger.Printf("[DEBUG] driver.docker: version of executor: %v", ver.Version) + + // Return a driver handle + h := &DockerHandle{ + client: client, + waitClient: waitClient, + executor: exec, + pluginClient: pluginClient, + cleanupImage: cleanupImage, + logger: d.logger, + imageID: pid.ImageID, + containerID: pid.ContainerID, + version: pid.Version, + killTimeout: pid.KillTimeout, + maxKillTimeout: pid.MaxKillTimeout, + doneCh: make(chan bool), + waitCh: make(chan *dstructs.WaitResult, 1), + } + if err := exec.SyncServices(consulContext(d.config, pid.ContainerID)); err != nil { + h.logger.Printf("[ERR] driver.docker: error registering services with consul: %v", err) + } + + go h.collectStats() + go h.run() + return h, nil +} + +func (h *DockerHandle) ID() string { + // Return a handle to the PID + pid := dockerPID{ + Version: h.version, + ImageID: h.imageID, + ContainerID: h.containerID, + KillTimeout: h.killTimeout, + MaxKillTimeout: h.maxKillTimeout, + PluginConfig: NewPluginReattachConfig(h.pluginClient.ReattachConfig()), + } + data, err := json.Marshal(pid) + if err != nil { + h.logger.Printf("[ERR] driver.docker: failed to marshal docker PID to JSON: %s", err) + } + return fmt.Sprintf("DOCKER:%s", string(data)) +} + +func (h *DockerHandle) ContainerID() string { + return h.containerID +} + +func (h *DockerHandle) WaitCh() chan *dstructs.WaitResult { + return h.waitCh +} + +func (h *DockerHandle) Update(task *structs.Task) error { + // Store the updated kill timeout. + h.killTimeout = GetKillTimeout(task.KillTimeout, h.maxKillTimeout) + if err := h.executor.UpdateTask(task); err != nil { + h.logger.Printf("[DEBUG] driver.docker: failed to update log config: %v", err) + } + + // Update is not possible + return nil +} + +// Kill is used to terminate the task. This uses `docker stop -t killTimeout` +func (h *DockerHandle) Kill() error { + // Stop the container + err := h.client.StopContainer(h.containerID, uint(h.killTimeout.Seconds())) + if err != nil { + h.executor.Exit() + h.pluginClient.Kill() + + // Container has already been removed. + if strings.Contains(err.Error(), NoSuchContainerError) { + h.logger.Printf("[DEBUG] driver.docker: attempted to stop non-existent container %s", h.containerID) + return nil + } + h.logger.Printf("[ERR] driver.docker: failed to stop container %s: %v", h.containerID, err) + return fmt.Errorf("Failed to stop container %s: %s", h.containerID, err) + } + h.logger.Printf("[INFO] driver.docker: stopped container %s", h.containerID) + return nil +} + +func (h *DockerHandle) Stats() (*cstructs.TaskResourceUsage, error) { + h.resourceUsageLock.RLock() + defer h.resourceUsageLock.RUnlock() + var err error + if h.resourceUsage == nil { + err = fmt.Errorf("stats collection hasn't started yet") + } + return h.resourceUsage, err +} + +func (h *DockerHandle) run() { + // Wait for it... + exitCode, err := h.waitClient.WaitContainer(h.containerID) + if err != nil { + h.logger.Printf("[ERR] driver.docker: failed to wait for %s; container already terminated", h.containerID) + } + + if exitCode != 0 { + err = fmt.Errorf("Docker container exited with non-zero exit code: %d", exitCode) + } + + close(h.doneCh) + h.waitCh <- dstructs.NewWaitResult(exitCode, 0, err) + close(h.waitCh) + + // Remove services + if err := h.executor.DeregisterServices(); err != nil { + h.logger.Printf("[ERR] driver.docker: error deregistering services: %v", err) + } + + // Shutdown the syslog collector + if err := h.executor.Exit(); err != nil { + h.logger.Printf("[ERR] driver.docker: failed to kill the syslog collector: %v", err) + } + h.pluginClient.Kill() + + // Stop the container just incase the docker daemon's wait returned + // incorrectly + if err := h.client.StopContainer(h.containerID, 0); err != nil { + _, noSuchContainer := err.(*docker.NoSuchContainer) + _, containerNotRunning := err.(*docker.ContainerNotRunning) + if !containerNotRunning && !noSuchContainer { + h.logger.Printf("[ERR] driver.docker: error stopping container: %v", err) + } + } + + // Remove the container + if err := h.client.RemoveContainer(docker.RemoveContainerOptions{ID: h.containerID, RemoveVolumes: true, Force: true}); err != nil { + h.logger.Printf("[ERR] driver.docker: error removing container: %v", err) + } + + // Cleanup the image + if h.cleanupImage { + if err := h.client.RemoveImage(h.imageID); err != nil { + h.logger.Printf("[DEBUG] driver.docker: error removing image: %v", err) + } + } +} + +// collectStats starts collecting resource usage stats of a docker container +func (h *DockerHandle) collectStats() { + statsCh := make(chan *docker.Stats) + statsOpts := docker.StatsOptions{ID: h.containerID, Done: h.doneCh, Stats: statsCh, Stream: true} + go func() { + //TODO handle Stats error + if err := h.waitClient.Stats(statsOpts); err != nil { + h.logger.Printf("[DEBUG] driver.docker: error collecting stats from container %s: %v", h.containerID, err) + } + }() + numCores := runtime.NumCPU() + for { + select { + case s := <-statsCh: + if s != nil { + ms := &cstructs.MemoryStats{ + RSS: s.MemoryStats.Stats.Rss, + Cache: s.MemoryStats.Stats.Cache, + Swap: s.MemoryStats.Stats.Swap, + MaxUsage: s.MemoryStats.MaxUsage, + Measured: DockerMeasuredMemStats, + } + + cs := &cstructs.CpuStats{ + ThrottledPeriods: s.CPUStats.ThrottlingData.ThrottledPeriods, + ThrottledTime: s.CPUStats.ThrottlingData.ThrottledTime, + Measured: DockerMeasuredCpuStats, + } + + // Calculate percentage + cores := len(s.CPUStats.CPUUsage.PercpuUsage) + cs.Percent = calculatePercent( + s.CPUStats.CPUUsage.TotalUsage, s.PreCPUStats.CPUUsage.TotalUsage, + s.CPUStats.SystemCPUUsage, s.PreCPUStats.SystemCPUUsage, cores) + cs.SystemMode = calculatePercent( + s.CPUStats.CPUUsage.UsageInKernelmode, s.PreCPUStats.CPUUsage.UsageInKernelmode, + s.CPUStats.CPUUsage.TotalUsage, s.PreCPUStats.CPUUsage.TotalUsage, cores) + cs.UserMode = calculatePercent( + s.CPUStats.CPUUsage.UsageInUsermode, s.PreCPUStats.CPUUsage.UsageInUsermode, + s.CPUStats.CPUUsage.TotalUsage, s.PreCPUStats.CPUUsage.TotalUsage, cores) + cs.TotalTicks = (cs.Percent / 100) * shelpers.TotalTicksAvailable() / float64(numCores) + + h.resourceUsageLock.Lock() + h.resourceUsage = &cstructs.TaskResourceUsage{ + ResourceUsage: &cstructs.ResourceUsage{ + MemoryStats: ms, + CpuStats: cs, + }, + Timestamp: s.Read.UTC().UnixNano(), + } + h.resourceUsageLock.Unlock() + } + case <-h.doneCh: + return + } + } +} + +func calculatePercent(newSample, oldSample, newTotal, oldTotal uint64, cores int) float64 { + numerator := newSample - oldSample + denom := newTotal - oldTotal + if numerator <= 0 || denom <= 0 { + return 0.0 + } + + return (float64(numerator) / float64(denom)) * float64(cores) * 100.0 +} diff --git a/vendor/github.com/hashicorp/nomad/client/driver/docker_default.go b/vendor/github.com/hashicorp/nomad/client/driver/docker_default.go new file mode 100644 index 000000000..ee600b141 --- /dev/null +++ b/vendor/github.com/hashicorp/nomad/client/driver/docker_default.go @@ -0,0 +1,14 @@ +//+build !windows + +package driver + +import docker "github.com/fsouza/go-dockerclient" + +const ( + // Setting default network mode for non-windows OS as bridge + defaultNetworkMode = "bridge" +) + +func getPortBinding(ip string, port string) []docker.PortBinding { + return []docker.PortBinding{docker.PortBinding{HostIP: ip, HostPort: port}} +} diff --git a/vendor/github.com/hashicorp/nomad/client/driver/docker_windows.go b/vendor/github.com/hashicorp/nomad/client/driver/docker_windows.go new file mode 100644 index 000000000..92ba2ff6b --- /dev/null +++ b/vendor/github.com/hashicorp/nomad/client/driver/docker_windows.go @@ -0,0 +1,13 @@ +package driver + +import docker "github.com/fsouza/go-dockerclient" + +const ( + // Default network mode for windows containers is nat + defaultNetworkMode = "nat" +) + +//Currently Windows containers don't support host ip in port binding. +func getPortBinding(ip string, port string) []docker.PortBinding { + return []docker.PortBinding{docker.PortBinding{HostIP: "", HostPort: port}} +} diff --git a/vendor/github.com/hashicorp/nomad/client/driver/driver.go b/vendor/github.com/hashicorp/nomad/client/driver/driver.go new file mode 100644 index 000000000..112626585 --- /dev/null +++ b/vendor/github.com/hashicorp/nomad/client/driver/driver.go @@ -0,0 +1,192 @@ +package driver + +import ( + "fmt" + "log" + "path/filepath" + "sync" + + "github.com/hashicorp/nomad/client/allocdir" + "github.com/hashicorp/nomad/client/config" + "github.com/hashicorp/nomad/client/driver/env" + "github.com/hashicorp/nomad/client/fingerprint" + "github.com/hashicorp/nomad/nomad/structs" + + dstructs "github.com/hashicorp/nomad/client/driver/structs" + cstructs "github.com/hashicorp/nomad/client/structs" +) + +// BuiltinDrivers contains the built in registered drivers +// which are available for allocation handling +var BuiltinDrivers = map[string]Factory{ + "docker": NewDockerDriver, + "exec": NewExecDriver, + "raw_exec": NewRawExecDriver, + "java": NewJavaDriver, + "qemu": NewQemuDriver, + "rkt": NewRktDriver, +} + +// NewDriver is used to instantiate and return a new driver +// given the name and a logger +func NewDriver(name string, ctx *DriverContext) (Driver, error) { + // Lookup the factory function + factory, ok := BuiltinDrivers[name] + if !ok { + return nil, fmt.Errorf("unknown driver '%s'", name) + } + + // Instantiate the driver + f := factory(ctx) + return f, nil +} + +// Factory is used to instantiate a new Driver +type Factory func(*DriverContext) Driver + +// Driver is used for execution of tasks. This allows Nomad +// to support many pluggable implementations of task drivers. +// Examples could include LXC, Docker, Qemu, etc. +type Driver interface { + // Drivers must support the fingerprint interface for detection + fingerprint.Fingerprint + + // Start is used to being task execution + Start(ctx *ExecContext, task *structs.Task) (DriverHandle, error) + + // Open is used to re-open a handle to a task + Open(ctx *ExecContext, handleID string) (DriverHandle, error) + + // Drivers must validate their configuration + Validate(map[string]interface{}) error +} + +// DriverContext is a means to inject dependencies such as loggers, configs, and +// node attributes into a Driver without having to change the Driver interface +// each time we do it. Used in conjection with Factory, above. +type DriverContext struct { + taskName string + config *config.Config + logger *log.Logger + node *structs.Node + taskEnv *env.TaskEnvironment +} + +// NewEmptyDriverContext returns a DriverContext with all fields set to their +// zero value. +func NewEmptyDriverContext() *DriverContext { + return &DriverContext{ + taskName: "", + config: nil, + node: nil, + logger: nil, + taskEnv: nil, + } +} + +// NewDriverContext initializes a new DriverContext with the specified fields. +// This enables other packages to create DriverContexts but keeps the fields +// private to the driver. If we want to change this later we can gorename all of +// the fields in DriverContext. +func NewDriverContext(taskName string, config *config.Config, node *structs.Node, + logger *log.Logger, taskEnv *env.TaskEnvironment) *DriverContext { + return &DriverContext{ + taskName: taskName, + config: config, + node: node, + logger: logger, + taskEnv: taskEnv, + } +} + +// DriverHandle is an opaque handle into a driver used for task +// manipulation +type DriverHandle interface { + // Returns an opaque handle that can be used to re-open the handle + ID() string + + // WaitCh is used to return a channel used wait for task completion + WaitCh() chan *dstructs.WaitResult + + // Update is used to update the task if possible and update task related + // configurations. + Update(task *structs.Task) error + + // Kill is used to stop the task + Kill() error + + // Stats returns aggregated stats of the driver + Stats() (*cstructs.TaskResourceUsage, error) +} + +// ExecContext is shared between drivers within an allocation +type ExecContext struct { + sync.Mutex + + // AllocDir contains information about the alloc directory structure. + AllocDir *allocdir.AllocDir + + // Alloc ID + AllocID string +} + +// NewExecContext is used to create a new execution context +func NewExecContext(alloc *allocdir.AllocDir, allocID string) *ExecContext { + return &ExecContext{AllocDir: alloc, AllocID: allocID} +} + +// GetTaskEnv converts the alloc dir, the node, task and alloc into a +// TaskEnvironment. +func GetTaskEnv(allocDir *allocdir.AllocDir, node *structs.Node, + task *structs.Task, alloc *structs.Allocation) (*env.TaskEnvironment, error) { + + tg := alloc.Job.LookupTaskGroup(alloc.TaskGroup) + env := env.NewTaskEnvironment(node). + SetTaskMeta(task.Meta). + SetTaskGroupMeta(tg.Meta). + SetJobMeta(alloc.Job.Meta). + SetEnvvars(task.Env). + SetTaskName(task.Name) + + if allocDir != nil { + env.SetAllocDir(allocDir.SharedDir) + taskdir, ok := allocDir.TaskDirs[task.Name] + if !ok { + return nil, fmt.Errorf("failed to get task directory for task %q", task.Name) + } + + env.SetTaskLocalDir(filepath.Join(taskdir, allocdir.TaskLocal)) + } + + if task.Resources != nil { + env.SetMemLimit(task.Resources.MemoryMB). + SetCpuLimit(task.Resources.CPU). + SetNetworks(task.Resources.Networks) + } + + if alloc != nil { + env.SetAlloc(alloc) + } + + return env.Build(), nil +} + +func mapMergeStrInt(maps ...map[string]int) map[string]int { + out := map[string]int{} + for _, in := range maps { + for key, val := range in { + out[key] = val + } + } + return out +} + +func mapMergeStrStr(maps ...map[string]string) map[string]string { + out := map[string]string{} + for _, in := range maps { + for key, val := range in { + out[key] = val + } + } + return out +} diff --git a/vendor/github.com/hashicorp/nomad/client/driver/env/env.go b/vendor/github.com/hashicorp/nomad/client/driver/env/env.go new file mode 100644 index 000000000..1ac9b7510 --- /dev/null +++ b/vendor/github.com/hashicorp/nomad/client/driver/env/env.go @@ -0,0 +1,430 @@ +package env + +import ( + "fmt" + "os" + "strconv" + "strings" + + hargs "github.com/hashicorp/nomad/helper/args" + "github.com/hashicorp/nomad/nomad/structs" +) + +// A set of environment variables that are exported by each driver. +const ( + // AllocDir is the environment variable with the path to the alloc directory + // that is shared across tasks within a task group. + AllocDir = "NOMAD_ALLOC_DIR" + + // TaskLocalDir is the environment variable with the path to the tasks local + // directory where it can store data that is persisted to the alloc is + // removed. + TaskLocalDir = "NOMAD_TASK_DIR" + + // MemLimit is the environment variable with the tasks memory limit in MBs. + MemLimit = "NOMAD_MEMORY_LIMIT" + + // CpuLimit is the environment variable with the tasks CPU limit in MHz. + CpuLimit = "NOMAD_CPU_LIMIT" + + // AllocID is the environment variable for passing the allocation ID. + AllocID = "NOMAD_ALLOC_ID" + + // AllocName is the environment variable for passing the allocation name. + AllocName = "NOMAD_ALLOC_NAME" + + // TaskName is the environment variable for passing the task name. + TaskName = "NOMAD_TASK_NAME" + + // AllocIndex is the environment variable for passing the allocation index. + AllocIndex = "NOMAD_ALLOC_INDEX" + + // AddrPrefix is the prefix for passing both dynamic and static port + // allocations to tasks. + // E.g$NOMAD_ADDR_http=127.0.0.1:80 + AddrPrefix = "NOMAD_ADDR_" + + // IpPrefix is the prefix for passing the IP of a port allocation to a task. + IpPrefix = "NOMAD_IP_" + + // PortPrefix is the prefix for passing the port allocation to a task. + PortPrefix = "NOMAD_PORT_" + + // HostPortPrefix is the prefix for passing the host port when a portmap is + // specified. + HostPortPrefix = "NOMAD_HOST_PORT_" + + // MetaPrefix is the prefix for passing task meta data. + MetaPrefix = "NOMAD_META_" +) + +// The node values that can be interpreted. +const ( + nodeIdKey = "node.unique.id" + nodeDcKey = "node.datacenter" + nodeNameKey = "node.unique.name" + nodeClassKey = "node.class" + + // Prefixes used for lookups. + nodeAttributePrefix = "attr." + nodeMetaPrefix = "meta." +) + +// TaskEnvironment is used to expose information to a task via environment +// variables and provide interpolation of Nomad variables. +type TaskEnvironment struct { + Env map[string]string + TaskMeta map[string]string + TaskGroupMeta map[string]string + JobMeta map[string]string + AllocDir string + TaskDir string + CpuLimit int + MemLimit int + TaskName string + AllocIndex int + AllocId string + AllocName string + Node *structs.Node + Networks []*structs.NetworkResource + PortMap map[string]int + + // taskEnv is the variables that will be set in the tasks environment + TaskEnv map[string]string + + // nodeValues is the values that are allowed for interprolation from the + // node. + NodeValues map[string]string +} + +func NewTaskEnvironment(node *structs.Node) *TaskEnvironment { + return &TaskEnvironment{Node: node, AllocIndex: -1} +} + +// ParseAndReplace takes the user supplied args replaces any instance of an +// environment variable or nomad variable in the args with the actual value. +func (t *TaskEnvironment) ParseAndReplace(args []string) []string { + replaced := make([]string, len(args)) + for i, arg := range args { + replaced[i] = hargs.ReplaceEnv(arg, t.TaskEnv, t.NodeValues) + } + + return replaced +} + +// ReplaceEnv takes an arg and replaces all occurrences of environment variables +// and nomad variables. If the variable is found in the passed map it is +// replaced, otherwise the original string is returned. +func (t *TaskEnvironment) ReplaceEnv(arg string) string { + return hargs.ReplaceEnv(arg, t.TaskEnv, t.NodeValues) +} + +// Build must be called after all the tasks environment values have been set. +func (t *TaskEnvironment) Build() *TaskEnvironment { + t.NodeValues = make(map[string]string) + t.TaskEnv = make(map[string]string) + + // Build the meta with the following precedence: task, task group, job. + for _, meta := range []map[string]string{t.JobMeta, t.TaskGroupMeta, t.TaskMeta} { + for k, v := range meta { + t.TaskEnv[fmt.Sprintf("%s%s", MetaPrefix, strings.ToUpper(k))] = v + } + } + + // Build the ports + for _, network := range t.Networks { + for label, value := range network.MapLabelToValues(nil) { + t.TaskEnv[fmt.Sprintf("%s%s", IpPrefix, label)] = network.IP + t.TaskEnv[fmt.Sprintf("%s%s", HostPortPrefix, label)] = strconv.Itoa(value) + if forwardedPort, ok := t.PortMap[label]; ok { + value = forwardedPort + } + t.TaskEnv[fmt.Sprintf("%s%s", PortPrefix, label)] = fmt.Sprintf("%d", value) + IPPort := fmt.Sprintf("%s:%d", network.IP, value) + t.TaskEnv[fmt.Sprintf("%s%s", AddrPrefix, label)] = IPPort + + } + } + + // Build the directories + if t.AllocDir != "" { + t.TaskEnv[AllocDir] = t.AllocDir + } + if t.TaskDir != "" { + t.TaskEnv[TaskLocalDir] = t.TaskDir + } + + // Build the resource limits + if t.MemLimit != 0 { + t.TaskEnv[MemLimit] = strconv.Itoa(t.MemLimit) + } + if t.CpuLimit != 0 { + t.TaskEnv[CpuLimit] = strconv.Itoa(t.CpuLimit) + } + + // Build the tasks ids + if t.AllocId != "" { + t.TaskEnv[AllocID] = t.AllocId + } + if t.AllocName != "" { + t.TaskEnv[AllocName] = t.AllocName + } + if t.AllocIndex != -1 { + t.TaskEnv[AllocIndex] = strconv.Itoa(t.AllocIndex) + } + if t.TaskName != "" { + t.TaskEnv[TaskName] = t.TaskName + } + + // Build the node + if t.Node != nil { + // Set up the node values. + t.NodeValues[nodeIdKey] = t.Node.ID + t.NodeValues[nodeDcKey] = t.Node.Datacenter + t.NodeValues[nodeNameKey] = t.Node.Name + t.NodeValues[nodeClassKey] = t.Node.NodeClass + + // Set up the attributes. + for k, v := range t.Node.Attributes { + t.NodeValues[fmt.Sprintf("%s%s", nodeAttributePrefix, k)] = v + } + + // Set up the meta. + for k, v := range t.Node.Meta { + t.NodeValues[fmt.Sprintf("%s%s", nodeMetaPrefix, k)] = v + } + } + + // Interpret the environment variables + interpreted := make(map[string]string, len(t.Env)) + for k, v := range t.Env { + interpreted[k] = hargs.ReplaceEnv(v, t.NodeValues, t.TaskEnv) + } + + for k, v := range interpreted { + t.TaskEnv[k] = v + } + + return t +} + +// EnvList returns a list of strings with NAME=value pairs. +func (t *TaskEnvironment) EnvList() []string { + env := []string{} + for k, v := range t.TaskEnv { + env = append(env, fmt.Sprintf("%s=%s", k, v)) + } + + return env +} + +// EnvMap returns a copy of the tasks environment variables. +func (t *TaskEnvironment) EnvMap() map[string]string { + m := make(map[string]string, len(t.TaskEnv)) + for k, v := range t.TaskEnv { + m[k] = v + } + + return m +} + +// Builder methods to build the TaskEnvironment +func (t *TaskEnvironment) SetAllocDir(dir string) *TaskEnvironment { + t.AllocDir = dir + return t +} + +func (t *TaskEnvironment) ClearAllocDir() *TaskEnvironment { + t.AllocDir = "" + return t +} + +func (t *TaskEnvironment) SetTaskLocalDir(dir string) *TaskEnvironment { + t.TaskDir = dir + return t +} + +func (t *TaskEnvironment) ClearTaskLocalDir() *TaskEnvironment { + t.TaskDir = "" + return t +} + +func (t *TaskEnvironment) SetMemLimit(limit int) *TaskEnvironment { + t.MemLimit = limit + return t +} + +func (t *TaskEnvironment) ClearMemLimit() *TaskEnvironment { + t.MemLimit = 0 + return t +} + +func (t *TaskEnvironment) SetCpuLimit(limit int) *TaskEnvironment { + t.CpuLimit = limit + return t +} + +func (t *TaskEnvironment) ClearCpuLimit() *TaskEnvironment { + t.CpuLimit = 0 + return t +} + +func (t *TaskEnvironment) SetNetworks(networks []*structs.NetworkResource) *TaskEnvironment { + t.Networks = networks + return t +} + +func (t *TaskEnvironment) clearNetworks() *TaskEnvironment { + t.Networks = nil + return t +} + +func (t *TaskEnvironment) SetPortMap(portMap map[string]int) *TaskEnvironment { + t.PortMap = portMap + return t +} + +func (t *TaskEnvironment) clearPortMap() *TaskEnvironment { + t.PortMap = nil + return t +} + +// Takes a map of meta values to be passed to the task. The keys are capatilized +// when the environent variable is set. +func (t *TaskEnvironment) SetTaskMeta(m map[string]string) *TaskEnvironment { + t.TaskMeta = m + return t +} + +func (t *TaskEnvironment) ClearTaskMeta() *TaskEnvironment { + t.TaskMeta = nil + return t +} + +func (t *TaskEnvironment) SetTaskGroupMeta(m map[string]string) *TaskEnvironment { + t.TaskGroupMeta = m + return t +} + +func (t *TaskEnvironment) ClearTaskGroupMeta() *TaskEnvironment { + t.TaskGroupMeta = nil + return t +} + +func (t *TaskEnvironment) SetJobMeta(m map[string]string) *TaskEnvironment { + t.JobMeta = m + return t +} + +func (t *TaskEnvironment) ClearJobMeta() *TaskEnvironment { + t.JobMeta = nil + return t +} + +func (t *TaskEnvironment) SetEnvvars(m map[string]string) *TaskEnvironment { + t.Env = m + return t +} + +// Appends the given environment variables. +func (t *TaskEnvironment) AppendEnvvars(m map[string]string) *TaskEnvironment { + if t.Env == nil { + t.Env = make(map[string]string, len(m)) + } + + for k, v := range m { + t.Env[k] = v + } + return t +} + +// AppendHostEnvvars adds the host environment variables to the tasks. The +// filter parameter can be use to filter host environment from entering the +// tasks. +func (t *TaskEnvironment) AppendHostEnvvars(filter []string) *TaskEnvironment { + hostEnv := os.Environ() + if t.Env == nil { + t.Env = make(map[string]string, len(hostEnv)) + } + + // Index the filtered environment variables. + index := make(map[string]struct{}, len(filter)) + for _, f := range filter { + index[f] = struct{}{} + } + + for _, e := range hostEnv { + parts := strings.SplitN(e, "=", 2) + key, value := parts[0], parts[1] + + // Skip filtered environment variables + if _, filtered := index[key]; filtered { + continue + } + + // Don't override the tasks environment variables. + if _, existing := t.Env[key]; !existing { + t.Env[key] = value + } + } + + return t +} + +func (t *TaskEnvironment) ClearEnvvars() *TaskEnvironment { + t.Env = nil + return t +} + +// Helper method for setting all fields from an allocation. +func (t *TaskEnvironment) SetAlloc(alloc *structs.Allocation) *TaskEnvironment { + t.AllocId = alloc.ID + t.AllocName = alloc.Name + t.AllocIndex = alloc.Index() + return t +} + +// Helper method for clearing all fields from an allocation. +func (t *TaskEnvironment) ClearAlloc(alloc *structs.Allocation) *TaskEnvironment { + return t.ClearAllocId().ClearAllocName().ClearAllocIndex() +} + +func (t *TaskEnvironment) SetAllocIndex(index int) *TaskEnvironment { + t.AllocIndex = index + return t +} + +func (t *TaskEnvironment) ClearAllocIndex() *TaskEnvironment { + t.AllocIndex = -1 + return t +} + +func (t *TaskEnvironment) SetAllocId(id string) *TaskEnvironment { + t.AllocId = id + return t +} + +func (t *TaskEnvironment) ClearAllocId() *TaskEnvironment { + t.AllocId = "" + return t +} + +func (t *TaskEnvironment) SetAllocName(name string) *TaskEnvironment { + t.AllocName = name + return t +} + +func (t *TaskEnvironment) ClearAllocName() *TaskEnvironment { + t.AllocName = "" + return t +} + +func (t *TaskEnvironment) SetTaskName(name string) *TaskEnvironment { + t.TaskName = name + return t +} + +func (t *TaskEnvironment) ClearTaskName() *TaskEnvironment { + t.TaskName = "" + return t +} diff --git a/vendor/github.com/hashicorp/nomad/client/driver/exec.go b/vendor/github.com/hashicorp/nomad/client/driver/exec.go new file mode 100644 index 000000000..728ed3f5f --- /dev/null +++ b/vendor/github.com/hashicorp/nomad/client/driver/exec.go @@ -0,0 +1,319 @@ +package driver + +import ( + "encoding/json" + "fmt" + "log" + "os/exec" + "path/filepath" + "strings" + "time" + + "github.com/hashicorp/go-multierror" + "github.com/hashicorp/go-plugin" + "github.com/hashicorp/nomad/client/allocdir" + "github.com/hashicorp/nomad/client/config" + "github.com/hashicorp/nomad/client/driver/executor" + dstructs "github.com/hashicorp/nomad/client/driver/structs" + cstructs "github.com/hashicorp/nomad/client/structs" + "github.com/hashicorp/nomad/helper/discover" + "github.com/hashicorp/nomad/helper/fields" + "github.com/hashicorp/nomad/nomad/structs" + "github.com/mitchellh/mapstructure" +) + +const ( + // The key populated in Node Attributes to indicate the presence of the Exec + // driver + execDriverAttr = "driver.exec" +) + +// ExecDriver fork/execs tasks using as many of the underlying OS's isolation +// features. +type ExecDriver struct { + DriverContext +} + +type ExecDriverConfig struct { + Command string `mapstructure:"command"` + Args []string `mapstructure:"args"` +} + +// execHandle is returned from Start/Open as a handle to the PID +type execHandle struct { + pluginClient *plugin.Client + executor executor.Executor + isolationConfig *dstructs.IsolationConfig + userPid int + allocDir *allocdir.AllocDir + killTimeout time.Duration + maxKillTimeout time.Duration + logger *log.Logger + waitCh chan *dstructs.WaitResult + doneCh chan struct{} + version string +} + +// NewExecDriver is used to create a new exec driver +func NewExecDriver(ctx *DriverContext) Driver { + return &ExecDriver{DriverContext: *ctx} +} + +// Validate is used to validate the driver configuration +func (d *ExecDriver) Validate(config map[string]interface{}) error { + fd := &fields.FieldData{ + Raw: config, + Schema: map[string]*fields.FieldSchema{ + "command": &fields.FieldSchema{ + Type: fields.TypeString, + Required: true, + }, + "args": &fields.FieldSchema{ + Type: fields.TypeArray, + }, + }, + } + + if err := fd.Validate(); err != nil { + return err + } + + return nil +} + +func (d *ExecDriver) Periodic() (bool, time.Duration) { + return true, 15 * time.Second +} + +func (d *ExecDriver) Start(ctx *ExecContext, task *structs.Task) (DriverHandle, error) { + var driverConfig ExecDriverConfig + if err := mapstructure.WeakDecode(task.Config, &driverConfig); err != nil { + return nil, err + } + + // Get the command to be ran + command := driverConfig.Command + if err := validateCommand(command, "args"); err != nil { + return nil, err + } + + // Set the host environment variables. + filter := strings.Split(d.config.ReadDefault("env.blacklist", config.DefaultEnvBlacklist), ",") + d.taskEnv.AppendHostEnvvars(filter) + + // Get the task directory for storing the executor logs. + taskDir, ok := ctx.AllocDir.TaskDirs[d.DriverContext.taskName] + if !ok { + return nil, fmt.Errorf("Could not find task directory for task: %v", d.DriverContext.taskName) + } + + bin, err := discover.NomadExecutable() + if err != nil { + return nil, fmt.Errorf("unable to find the nomad binary: %v", err) + } + pluginLogFile := filepath.Join(taskDir, fmt.Sprintf("%s-executor.out", task.Name)) + pluginConfig := &plugin.ClientConfig{ + Cmd: exec.Command(bin, "executor", pluginLogFile), + } + + exec, pluginClient, err := createExecutor(pluginConfig, d.config.LogOutput, d.config) + if err != nil { + return nil, err + } + executorCtx := &executor.ExecutorContext{ + TaskEnv: d.taskEnv, + Driver: "exec", + AllocDir: ctx.AllocDir, + AllocID: ctx.AllocID, + ChrootEnv: d.config.ChrootEnv, + Task: task, + } + + ps, err := exec.LaunchCmd(&executor.ExecCommand{ + Cmd: command, + Args: driverConfig.Args, + FSIsolation: true, + ResourceLimits: true, + User: getExecutorUser(task), + }, executorCtx) + if err != nil { + pluginClient.Kill() + return nil, err + } + d.logger.Printf("[DEBUG] driver.exec: started process via plugin with pid: %v", ps.Pid) + + // Return a driver handle + maxKill := d.DriverContext.config.MaxKillTimeout + h := &execHandle{ + pluginClient: pluginClient, + userPid: ps.Pid, + executor: exec, + allocDir: ctx.AllocDir, + isolationConfig: ps.IsolationConfig, + killTimeout: GetKillTimeout(task.KillTimeout, maxKill), + maxKillTimeout: maxKill, + logger: d.logger, + version: d.config.Version, + doneCh: make(chan struct{}), + waitCh: make(chan *dstructs.WaitResult, 1), + } + if err := exec.SyncServices(consulContext(d.config, "")); err != nil { + d.logger.Printf("[ERR] driver.exec: error registering services with consul for task: %q: %v", task.Name, err) + } + go h.run() + return h, nil +} + +type execId struct { + Version string + KillTimeout time.Duration + MaxKillTimeout time.Duration + UserPid int + TaskDir string + AllocDir *allocdir.AllocDir + IsolationConfig *dstructs.IsolationConfig + PluginConfig *PluginReattachConfig +} + +func (d *ExecDriver) Open(ctx *ExecContext, handleID string) (DriverHandle, error) { + id := &execId{} + if err := json.Unmarshal([]byte(handleID), id); err != nil { + return nil, fmt.Errorf("Failed to parse handle '%s': %v", handleID, err) + } + + pluginConfig := &plugin.ClientConfig{ + Reattach: id.PluginConfig.PluginConfig(), + } + exec, client, err := createExecutor(pluginConfig, d.config.LogOutput, d.config) + if err != nil { + merrs := new(multierror.Error) + merrs.Errors = append(merrs.Errors, err) + d.logger.Println("[ERR] driver.exec: error connecting to plugin so destroying plugin pid and user pid") + if e := destroyPlugin(id.PluginConfig.Pid, id.UserPid); e != nil { + merrs.Errors = append(merrs.Errors, fmt.Errorf("error destroying plugin and userpid: %v", e)) + } + if id.IsolationConfig != nil { + ePid := pluginConfig.Reattach.Pid + if e := executor.ClientCleanup(id.IsolationConfig, ePid); e != nil { + merrs.Errors = append(merrs.Errors, fmt.Errorf("destroying cgroup failed: %v", e)) + } + } + if e := ctx.AllocDir.UnmountAll(); e != nil { + merrs.Errors = append(merrs.Errors, e) + } + return nil, fmt.Errorf("error connecting to plugin: %v", merrs.ErrorOrNil()) + } + + ver, _ := exec.Version() + d.logger.Printf("[DEBUG] driver.exec : version of executor: %v", ver.Version) + // Return a driver handle + h := &execHandle{ + pluginClient: client, + executor: exec, + userPid: id.UserPid, + allocDir: id.AllocDir, + isolationConfig: id.IsolationConfig, + logger: d.logger, + version: id.Version, + killTimeout: id.KillTimeout, + maxKillTimeout: id.MaxKillTimeout, + doneCh: make(chan struct{}), + waitCh: make(chan *dstructs.WaitResult, 1), + } + if err := exec.SyncServices(consulContext(d.config, "")); err != nil { + d.logger.Printf("[ERR] driver.exec: error registering services with consul: %v", err) + } + go h.run() + return h, nil +} + +func (h *execHandle) ID() string { + id := execId{ + Version: h.version, + KillTimeout: h.killTimeout, + MaxKillTimeout: h.maxKillTimeout, + PluginConfig: NewPluginReattachConfig(h.pluginClient.ReattachConfig()), + UserPid: h.userPid, + AllocDir: h.allocDir, + IsolationConfig: h.isolationConfig, + } + + data, err := json.Marshal(id) + if err != nil { + h.logger.Printf("[ERR] driver.exec: failed to marshal ID to JSON: %s", err) + } + return string(data) +} + +func (h *execHandle) WaitCh() chan *dstructs.WaitResult { + return h.waitCh +} + +func (h *execHandle) Update(task *structs.Task) error { + // Store the updated kill timeout. + h.killTimeout = GetKillTimeout(task.KillTimeout, h.maxKillTimeout) + h.executor.UpdateTask(task) + + // Update is not possible + return nil +} + +func (h *execHandle) Kill() error { + if err := h.executor.ShutDown(); err != nil { + if h.pluginClient.Exited() { + return nil + } + return fmt.Errorf("executor Shutdown failed: %v", err) + } + + select { + case <-h.doneCh: + return nil + case <-time.After(h.killTimeout): + if h.pluginClient.Exited() { + return nil + } + if err := h.executor.Exit(); err != nil { + return fmt.Errorf("executor Exit failed: %v", err) + } + + return nil + } +} + +func (h *execHandle) Stats() (*cstructs.TaskResourceUsage, error) { + return h.executor.Stats() +} + +func (h *execHandle) run() { + ps, err := h.executor.Wait() + close(h.doneCh) + + // If the exitcode is 0 and we had an error that means the plugin didn't + // connect and doesn't know the state of the user process so we are killing + // the user process so that when we create a new executor on restarting the + // new user process doesn't have collisions with resources that the older + // user pid might be holding onto. + if ps.ExitCode == 0 && err != nil { + if h.isolationConfig != nil { + ePid := h.pluginClient.ReattachConfig().Pid + if e := executor.ClientCleanup(h.isolationConfig, ePid); e != nil { + h.logger.Printf("[ERR] driver.exec: destroying resource container failed: %v", e) + } + } + if e := h.allocDir.UnmountAll(); e != nil { + h.logger.Printf("[ERR] driver.exec: unmounting dev,proc and alloc dirs failed: %v", e) + } + } + h.waitCh <- dstructs.NewWaitResult(ps.ExitCode, ps.Signal, err) + close(h.waitCh) + // Remove services + if err := h.executor.DeregisterServices(); err != nil { + h.logger.Printf("[ERR] driver.exec: failed to deregister services: %v", err) + } + + if err := h.executor.Exit(); err != nil { + h.logger.Printf("[ERR] driver.exec: error destroying executor: %v", err) + } + h.pluginClient.Kill() +} diff --git a/vendor/github.com/hashicorp/nomad/client/driver/exec_default.go b/vendor/github.com/hashicorp/nomad/client/driver/exec_default.go new file mode 100644 index 000000000..176938726 --- /dev/null +++ b/vendor/github.com/hashicorp/nomad/client/driver/exec_default.go @@ -0,0 +1,12 @@ +//+build darwin dragonfly freebsd netbsd openbsd solaris windows + +package driver + +import ( + "github.com/hashicorp/nomad/client/config" + "github.com/hashicorp/nomad/nomad/structs" +) + +func (d *ExecDriver) Fingerprint(cfg *config.Config, node *structs.Node) (bool, error) { + return false, nil +} diff --git a/vendor/github.com/hashicorp/nomad/client/driver/exec_linux.go b/vendor/github.com/hashicorp/nomad/client/driver/exec_linux.go new file mode 100644 index 000000000..6c43b5119 --- /dev/null +++ b/vendor/github.com/hashicorp/nomad/client/driver/exec_linux.go @@ -0,0 +1,34 @@ +package driver + +import ( + "github.com/hashicorp/nomad/client/config" + "github.com/hashicorp/nomad/nomad/structs" + "golang.org/x/sys/unix" +) + +func (d *ExecDriver) Fingerprint(cfg *config.Config, node *structs.Node) (bool, error) { + // Get the current status so that we can log any debug messages only if the + // state changes + _, currentlyEnabled := node.Attributes[execDriverAttr] + + // Only enable if cgroups are available and we are root + if _, ok := node.Attributes["unique.cgroup.mountpoint"]; !ok { + if currentlyEnabled { + d.logger.Printf("[DEBUG] driver.exec: cgroups unavailable, disabling") + } + delete(node.Attributes, execDriverAttr) + return false, nil + } else if unix.Geteuid() != 0 { + if currentlyEnabled { + d.logger.Printf("[DEBUG] driver.exec: must run as root user, disabling") + } + delete(node.Attributes, execDriverAttr) + return false, nil + } + + if !currentlyEnabled { + d.logger.Printf("[DEBUG] driver.exec: exec driver is enabled") + } + node.Attributes[execDriverAttr] = "1" + return true, nil +} diff --git a/vendor/github.com/hashicorp/nomad/client/driver/executor/checks.go b/vendor/github.com/hashicorp/nomad/client/driver/executor/checks.go new file mode 100644 index 000000000..12064023f --- /dev/null +++ b/vendor/github.com/hashicorp/nomad/client/driver/executor/checks.go @@ -0,0 +1,206 @@ +package executor + +import ( + "fmt" + "log" + "os/exec" + "sync" + "syscall" + "time" + + "github.com/armon/circbuf" + docker "github.com/fsouza/go-dockerclient" + cstructs "github.com/hashicorp/nomad/client/driver/structs" +) + +var ( + // We store the client globally to cache the connection to the docker daemon. + createClient sync.Once + client *docker.Client +) + +const ( + // The default check timeout + defaultCheckTimeout = 30 * time.Second +) + +// DockerScriptCheck runs nagios compatible scripts in a docker container and +// provides the check result +type DockerScriptCheck struct { + id string // id of the check + interval time.Duration // interval of the check + timeout time.Duration // timeout of the check + containerID string // container id in which the check will be invoked + logger *log.Logger + cmd string // check command + args []string // check command arguments + + dockerEndpoint string // docker endpoint + tlsCert string // path to tls certificate + tlsCa string // path to tls ca + tlsKey string // path to tls key +} + +// dockerClient creates the client to interact with the docker daemon +func (d *DockerScriptCheck) dockerClient() (*docker.Client, error) { + if client != nil { + return client, nil + } + + var err error + createClient.Do(func() { + if d.dockerEndpoint != "" { + if d.tlsCert+d.tlsKey+d.tlsCa != "" { + d.logger.Printf("[DEBUG] executor.checks: using TLS client connection to %s", d.dockerEndpoint) + client, err = docker.NewTLSClient(d.dockerEndpoint, d.tlsCert, d.tlsKey, d.tlsCa) + } else { + d.logger.Printf("[DEBUG] executor.checks: using standard client connection to %s", d.dockerEndpoint) + client, err = docker.NewClient(d.dockerEndpoint) + } + return + } + + d.logger.Println("[DEBUG] executor.checks: using client connection initialized from environment") + client, err = docker.NewClientFromEnv() + }) + return client, err +} + +// Run runs a script check inside a docker container +func (d *DockerScriptCheck) Run() *cstructs.CheckResult { + var ( + exec *docker.Exec + err error + execRes *docker.ExecInspect + time = time.Now() + ) + + if client, err = d.dockerClient(); err != nil { + return &cstructs.CheckResult{Err: err} + } + client = client + execOpts := docker.CreateExecOptions{ + AttachStdin: false, + AttachStdout: true, + AttachStderr: true, + Tty: false, + Cmd: append([]string{d.cmd}, d.args...), + Container: d.containerID, + } + if exec, err = client.CreateExec(execOpts); err != nil { + return &cstructs.CheckResult{Err: err} + } + + output, _ := circbuf.NewBuffer(int64(cstructs.CheckBufSize)) + startOpts := docker.StartExecOptions{ + Detach: false, + Tty: false, + OutputStream: output, + ErrorStream: output, + } + + if err = client.StartExec(exec.ID, startOpts); err != nil { + return &cstructs.CheckResult{Err: err} + } + if execRes, err = client.InspectExec(exec.ID); err != nil { + return &cstructs.CheckResult{Err: err} + } + return &cstructs.CheckResult{ + ExitCode: execRes.ExitCode, + Output: string(output.Bytes()), + Timestamp: time, + } +} + +// ID returns the check id +func (d *DockerScriptCheck) ID() string { + return d.id +} + +// Interval returns the interval at which the check has to run +func (d *DockerScriptCheck) Interval() time.Duration { + return d.interval +} + +// Timeout returns the duration after which a check is timed out. +func (d *DockerScriptCheck) Timeout() time.Duration { + if d.timeout == 0 { + return defaultCheckTimeout + } + return d.timeout +} + +// ExecScriptCheck runs a nagios compatible script and returns the check result +type ExecScriptCheck struct { + id string // id of the script check + interval time.Duration // interval at which the check is invoked + timeout time.Duration // timeout duration of the check + cmd string // command of the check + args []string // args passed to the check + taskDir string // the root directory of the check + + FSIsolation bool // indicates whether the check has to be run within a chroot +} + +// Run runs an exec script check +func (e *ExecScriptCheck) Run() *cstructs.CheckResult { + buf, _ := circbuf.NewBuffer(int64(cstructs.CheckBufSize)) + cmd := exec.Command(e.cmd, e.args...) + cmd.Stdout = buf + cmd.Stderr = buf + e.setChroot(cmd) + ts := time.Now() + if err := cmd.Start(); err != nil { + return &cstructs.CheckResult{Err: err} + } + errCh := make(chan error, 2) + go func() { + errCh <- cmd.Wait() + }() + for { + select { + case err := <-errCh: + endTime := time.Now() + if err == nil { + return &cstructs.CheckResult{ + ExitCode: 0, + Output: string(buf.Bytes()), + Timestamp: ts, + } + } + exitCode := 1 + if exitErr, ok := err.(*exec.ExitError); ok { + if status, ok := exitErr.Sys().(syscall.WaitStatus); ok { + exitCode = status.ExitStatus() + } + } + return &cstructs.CheckResult{ + ExitCode: exitCode, + Output: string(buf.Bytes()), + Timestamp: ts, + Duration: endTime.Sub(ts), + } + case <-time.After(e.Timeout()): + errCh <- fmt.Errorf("timed out after waiting 30s") + } + } + return nil +} + +// ID returns the check id +func (e *ExecScriptCheck) ID() string { + return e.id +} + +// Interval returns the interval at which the check has to run +func (e *ExecScriptCheck) Interval() time.Duration { + return e.interval +} + +// Timeout returns the duration after which a check is timed out. +func (e *ExecScriptCheck) Timeout() time.Duration { + if e.timeout == 0 { + return defaultCheckTimeout + } + return e.timeout +} diff --git a/vendor/github.com/hashicorp/nomad/client/driver/executor/checks_unix.go b/vendor/github.com/hashicorp/nomad/client/driver/executor/checks_unix.go new file mode 100644 index 000000000..b18812dd8 --- /dev/null +++ b/vendor/github.com/hashicorp/nomad/client/driver/executor/checks_unix.go @@ -0,0 +1,18 @@ +// +build darwin dragonfly freebsd linux netbsd openbsd solaris + +package executor + +import ( + "os/exec" + "syscall" +) + +func (e *ExecScriptCheck) setChroot(cmd *exec.Cmd) { + if e.FSIsolation { + if cmd.SysProcAttr == nil { + cmd.SysProcAttr = &syscall.SysProcAttr{} + } + cmd.SysProcAttr.Chroot = e.taskDir + } + cmd.Dir = "/" +} diff --git a/vendor/github.com/hashicorp/nomad/client/driver/executor/checks_windows.go b/vendor/github.com/hashicorp/nomad/client/driver/executor/checks_windows.go new file mode 100644 index 000000000..a35c2722d --- /dev/null +++ b/vendor/github.com/hashicorp/nomad/client/driver/executor/checks_windows.go @@ -0,0 +1,8 @@ +// +build windows + +package executor + +import "os/exec" + +func (e *ExecScriptCheck) setChroot(cmd *exec.Cmd) { +} diff --git a/vendor/github.com/hashicorp/nomad/client/driver/executor/executor.go b/vendor/github.com/hashicorp/nomad/client/driver/executor/executor.go new file mode 100644 index 000000000..7fc973e63 --- /dev/null +++ b/vendor/github.com/hashicorp/nomad/client/driver/executor/executor.go @@ -0,0 +1,856 @@ +package executor + +import ( + "fmt" + "io/ioutil" + "log" + "net" + "os" + "os/exec" + "path/filepath" + "runtime" + "strconv" + "strings" + "sync" + "syscall" + "time" + + "github.com/hashicorp/go-multierror" + "github.com/mitchellh/go-ps" + "github.com/shirou/gopsutil/process" + + "github.com/hashicorp/nomad/client/allocdir" + "github.com/hashicorp/nomad/client/driver/env" + "github.com/hashicorp/nomad/client/driver/logging" + "github.com/hashicorp/nomad/client/stats" + "github.com/hashicorp/nomad/command/agent/consul" + shelpers "github.com/hashicorp/nomad/helper/stats" + "github.com/hashicorp/nomad/nomad/structs" + "github.com/hashicorp/nomad/nomad/structs/config" + + dstructs "github.com/hashicorp/nomad/client/driver/structs" + cstructs "github.com/hashicorp/nomad/client/structs" +) + +const ( + // pidScanInterval is the interval at which the executor scans the process + // tree for finding out the pids that the executor and it's child processes + // have forked + pidScanInterval = 5 * time.Second +) + +var ( + // The statistics the basic executor exposes + ExecutorBasicMeasuredMemStats = []string{"RSS", "Swap"} + ExecutorBasicMeasuredCpuStats = []string{"System Mode", "User Mode", "Percent"} +) + +// Executor is the interface which allows a driver to launch and supervise +// a process +type Executor interface { + LaunchCmd(command *ExecCommand, ctx *ExecutorContext) (*ProcessState, error) + LaunchSyslogServer(ctx *ExecutorContext) (*SyslogServerState, error) + Wait() (*ProcessState, error) + ShutDown() error + Exit() error + UpdateLogConfig(logConfig *structs.LogConfig) error + UpdateTask(task *structs.Task) error + SyncServices(ctx *ConsulContext) error + DeregisterServices() error + Version() (*ExecutorVersion, error) + Stats() (*cstructs.TaskResourceUsage, error) +} + +// ConsulContext holds context to configure the Consul client and run checks +type ConsulContext struct { + // ConsulConfig contains the configuration information for talking + // with this Nomad Agent's Consul Agent. + ConsulConfig *config.ConsulConfig + + // ContainerID is the ID of the container + ContainerID string + + // TLSCert is the cert which docker client uses while interactng with the docker + // daemon over TLS + TLSCert string + + // TLSCa is the CA which the docker client uses while interacting with the docker + // daeemon over TLS + TLSCa string + + // TLSKey is the TLS key which the docker client uses while interacting with + // the docker daemon + TLSKey string + + // DockerEndpoint is the endpoint of the docker daemon + DockerEndpoint string +} + +// ExecutorContext holds context to configure the command user +// wants to run and isolate it +type ExecutorContext struct { + // TaskEnv holds information about the environment of a Task + TaskEnv *env.TaskEnvironment + + // AllocDir is the handle to do operations on the alloc dir of + // the task + AllocDir *allocdir.AllocDir + + // Task is the task whose executor is being launched + Task *structs.Task + + // AllocID is the allocation id to which the task belongs + AllocID string + + // A mapping of directories on the host OS to attempt to embed inside each + // task's chroot. + ChrootEnv map[string]string + + // Driver is the name of the driver that invoked the executor + Driver string + + // PortUpperBound is the upper bound of the ports that we can use to start + // the syslog server + PortUpperBound uint + + // PortLowerBound is the lower bound of the ports that we can use to start + // the syslog server + PortLowerBound uint +} + +// ExecCommand holds the user command, args, and other isolation related +// settings. +type ExecCommand struct { + // Cmd is the command that the user wants to run. + Cmd string + + // Args is the args of the command that the user wants to run. + Args []string + + // FSIsolation determines whether the command would be run in a chroot. + FSIsolation bool + + // User is the user which the executor uses to run the command. + User string + + // ResourceLimits determines whether resource limits are enforced by the + // executor. + ResourceLimits bool +} + +// ProcessState holds information about the state of a user process. +type ProcessState struct { + Pid int + ExitCode int + Signal int + IsolationConfig *dstructs.IsolationConfig + Time time.Time +} + +// nomadPid holds a pid and it's cpu percentage calculator +type nomadPid struct { + pid int + cpuStatsTotal *stats.CpuStats + cpuStatsUser *stats.CpuStats + cpuStatsSys *stats.CpuStats +} + +// SyslogServerState holds the address and islation information of a launched +// syslog server +type SyslogServerState struct { + IsolationConfig *dstructs.IsolationConfig + Addr string +} + +// ExecutorVersion is the version of the executor +type ExecutorVersion struct { + Version string +} + +func (v *ExecutorVersion) GoString() string { + return v.Version +} + +// UniversalExecutor is an implementation of the Executor which launches and +// supervises processes. In addition to process supervision it provides resource +// and file system isolation +type UniversalExecutor struct { + cmd exec.Cmd + ctx *ExecutorContext + command *ExecCommand + + pids map[int]*nomadPid + pidLock sync.RWMutex + taskDir string + exitState *ProcessState + processExited chan interface{} + fsIsolationEnforced bool + + lre *logging.FileRotator + lro *logging.FileRotator + rotatorLock sync.Mutex + + shutdownCh chan struct{} + + syslogServer *logging.SyslogServer + syslogChan chan *logging.SyslogMessage + + resConCtx resourceContainerContext + + consulSyncer *consul.Syncer + consulCtx *ConsulContext + totalCpuStats *stats.CpuStats + userCpuStats *stats.CpuStats + systemCpuStats *stats.CpuStats + logger *log.Logger +} + +// NewExecutor returns an Executor +func NewExecutor(logger *log.Logger) Executor { + if err := shelpers.Init(); err != nil { + logger.Printf("[FATAL] executor: unable to initialize stats: %v", err) + return nil + } + + exec := &UniversalExecutor{ + logger: logger, + processExited: make(chan interface{}), + totalCpuStats: stats.NewCpuStats(), + userCpuStats: stats.NewCpuStats(), + systemCpuStats: stats.NewCpuStats(), + pids: make(map[int]*nomadPid), + } + + return exec +} + +// Version returns the api version of the executor +func (e *UniversalExecutor) Version() (*ExecutorVersion, error) { + return &ExecutorVersion{Version: "1.0.0"}, nil +} + +// LaunchCmd launches a process and returns it's state. It also configures an +// applies isolation on certain platforms. +func (e *UniversalExecutor) LaunchCmd(command *ExecCommand, ctx *ExecutorContext) (*ProcessState, error) { + e.logger.Printf("[DEBUG] executor: launching command %v %v", command.Cmd, strings.Join(command.Args, " ")) + + e.ctx = ctx + e.command = command + + // setting the user of the process + if command.User != "" { + e.logger.Printf("[DEBUG] executor: running command as %s", command.User) + if err := e.runAs(command.User); err != nil { + return nil, err + } + } + + // configuring the task dir + if err := e.configureTaskDir(); err != nil { + return nil, err + } + + e.ctx.TaskEnv.Build() + // configuring the chroot, resource container, and start the plugin + // process in the chroot. + if err := e.configureIsolation(); err != nil { + return nil, err + } + // Apply ourselves into the resource container. The executor MUST be in + // the resource container before the user task is started, otherwise we + // are subject to a fork attack in which a process escapes isolation by + // immediately forking. + if err := e.applyLimits(os.Getpid()); err != nil { + return nil, err + } + + // Setup the loggers + if err := e.configureLoggers(); err != nil { + return nil, err + } + e.cmd.Stdout = e.lro + e.cmd.Stderr = e.lre + + // Look up the binary path and make it executable + absPath, err := e.lookupBin(ctx.TaskEnv.ReplaceEnv(command.Cmd)) + if err != nil { + return nil, err + } + + if err := e.makeExecutable(absPath); err != nil { + return nil, err + } + + path := absPath + + // Determine the path to run as it may have to be relative to the chroot. + if e.fsIsolationEnforced { + rel, err := filepath.Rel(e.taskDir, path) + if err != nil { + return nil, err + } + path = rel + } + + // Set the commands arguments + e.cmd.Path = path + e.cmd.Args = append([]string{e.cmd.Path}, ctx.TaskEnv.ParseAndReplace(command.Args)...) + e.cmd.Env = ctx.TaskEnv.EnvList() + + // Start the process + if err := e.cmd.Start(); err != nil { + return nil, err + } + go e.collectPids() + go e.wait() + ic := e.resConCtx.getIsolationConfig() + return &ProcessState{Pid: e.cmd.Process.Pid, ExitCode: -1, IsolationConfig: ic, Time: time.Now()}, nil +} + +// configureLoggers sets up the standard out/error file rotators +func (e *UniversalExecutor) configureLoggers() error { + e.rotatorLock.Lock() + defer e.rotatorLock.Unlock() + + logFileSize := int64(e.ctx.Task.LogConfig.MaxFileSizeMB * 1024 * 1024) + if e.lro == nil { + lro, err := logging.NewFileRotator(e.ctx.AllocDir.LogDir(), fmt.Sprintf("%v.stdout", e.ctx.Task.Name), + e.ctx.Task.LogConfig.MaxFiles, logFileSize, e.logger) + if err != nil { + return err + } + e.lro = lro + } + + if e.lre == nil { + lre, err := logging.NewFileRotator(e.ctx.AllocDir.LogDir(), fmt.Sprintf("%v.stderr", e.ctx.Task.Name), + e.ctx.Task.LogConfig.MaxFiles, logFileSize, e.logger) + if err != nil { + return err + } + e.lre = lre + } + return nil +} + +// Wait waits until a process has exited and returns it's exitcode and errors +func (e *UniversalExecutor) Wait() (*ProcessState, error) { + <-e.processExited + return e.exitState, nil +} + +// COMPAT: prior to Nomad 0.3.2, UpdateTask didn't exist. +// UpdateLogConfig updates the log configuration +func (e *UniversalExecutor) UpdateLogConfig(logConfig *structs.LogConfig) error { + e.ctx.Task.LogConfig = logConfig + if e.lro == nil { + return fmt.Errorf("log rotator for stdout doesn't exist") + } + e.lro.MaxFiles = logConfig.MaxFiles + e.lro.FileSize = int64(logConfig.MaxFileSizeMB * 1024 * 1024) + + if e.lre == nil { + return fmt.Errorf("log rotator for stderr doesn't exist") + } + e.lre.MaxFiles = logConfig.MaxFiles + e.lre.FileSize = int64(logConfig.MaxFileSizeMB * 1024 * 1024) + return nil +} + +func (e *UniversalExecutor) UpdateTask(task *structs.Task) error { + e.ctx.Task = task + + // Updating Log Config + fileSize := int64(task.LogConfig.MaxFileSizeMB * 1024 * 1024) + e.lro.MaxFiles = task.LogConfig.MaxFiles + e.lro.FileSize = fileSize + e.lre.MaxFiles = task.LogConfig.MaxFiles + e.lre.FileSize = fileSize + + // Re-syncing task with Consul agent + if e.consulSyncer != nil { + e.interpolateServices(e.ctx.Task) + domain := consul.NewExecutorDomain(e.ctx.AllocID, task.Name) + serviceMap := generateServiceKeys(e.ctx.AllocID, task.Services) + e.consulSyncer.SetServices(domain, serviceMap) + } + return nil +} + +// generateServiceKeys takes a list of interpolated Nomad Services and returns a map +// of ServiceKeys to Nomad Services. +func generateServiceKeys(allocID string, services []*structs.Service) map[consul.ServiceKey]*structs.Service { + keys := make(map[consul.ServiceKey]*structs.Service, len(services)) + for _, service := range services { + key := consul.GenerateServiceKey(service) + keys[key] = service + } + return keys +} + +func (e *UniversalExecutor) wait() { + defer close(e.processExited) + err := e.cmd.Wait() + ic := e.resConCtx.getIsolationConfig() + if err == nil { + e.exitState = &ProcessState{Pid: 0, ExitCode: 0, IsolationConfig: ic, Time: time.Now()} + return + } + exitCode := 1 + var signal int + if exitErr, ok := err.(*exec.ExitError); ok { + if status, ok := exitErr.Sys().(syscall.WaitStatus); ok { + exitCode = status.ExitStatus() + if status.Signaled() { + // bash(1) uses the lower 7 bits of a uint8 + // to indicate normal program failure (see + // ). If a process terminates due + // to a signal, encode the signal number to + // indicate which signal caused the process + // to terminate. Mirror this exit code + // encoding scheme. + const exitSignalBase = 128 + signal = int(status.Signal()) + exitCode = exitSignalBase + signal + } + } + } else { + e.logger.Printf("[DEBUG] executor: unexpected Wait() error type: %v", err) + } + + e.exitState = &ProcessState{Pid: 0, ExitCode: exitCode, Signal: signal, IsolationConfig: ic, Time: time.Now()} +} + +var ( + // finishedErr is the error message received when trying to kill and already + // exited process. + finishedErr = "os: process already finished" +) + +// ClientCleanup is the cleanup routine that a Nomad Client uses to remove the +// reminants of a child UniversalExecutor. +func ClientCleanup(ic *dstructs.IsolationConfig, pid int) error { + return clientCleanup(ic, pid) +} + +// Exit cleans up the alloc directory, destroys resource container and kills the +// user process +func (e *UniversalExecutor) Exit() error { + var merr multierror.Error + if e.syslogServer != nil { + e.syslogServer.Shutdown() + } + e.lre.Close() + e.lro.Close() + + if e.consulSyncer != nil { + e.consulSyncer.Shutdown() + } + + // If the executor did not launch a process, return. + if e.command == nil { + return nil + } + + // Prefer killing the process via the resource container. + if e.cmd.Process != nil && !e.command.ResourceLimits { + proc, err := os.FindProcess(e.cmd.Process.Pid) + if err != nil { + e.logger.Printf("[ERR] executor: can't find process with pid: %v, err: %v", + e.cmd.Process.Pid, err) + } else if err := proc.Kill(); err != nil && err.Error() != finishedErr { + merr.Errors = append(merr.Errors, + fmt.Errorf("can't kill process with pid: %v, err: %v", e.cmd.Process.Pid, err)) + } + } + + if e.command.ResourceLimits { + if err := e.resConCtx.executorCleanup(); err != nil { + merr.Errors = append(merr.Errors, err) + } + } + + if e.command.FSIsolation { + if err := e.removeChrootMounts(); err != nil { + merr.Errors = append(merr.Errors, err) + } + } + return merr.ErrorOrNil() +} + +// Shutdown sends an interrupt signal to the user process +func (e *UniversalExecutor) ShutDown() error { + if e.cmd.Process == nil { + return fmt.Errorf("executor.shutdown error: no process found") + } + proc, err := os.FindProcess(e.cmd.Process.Pid) + if err != nil { + return fmt.Errorf("executor.shutdown failed to find process: %v", err) + } + if runtime.GOOS == "windows" { + if err := proc.Kill(); err != nil && err.Error() != finishedErr { + return err + } + return nil + } + if err = proc.Signal(os.Interrupt); err != nil && err.Error() != finishedErr { + return fmt.Errorf("executor.shutdown error: %v", err) + } + return nil +} + +// SyncServices syncs the services of the task that the executor is running with +// Consul +func (e *UniversalExecutor) SyncServices(ctx *ConsulContext) error { + e.logger.Printf("[INFO] executor: registering services") + e.consulCtx = ctx + if e.consulSyncer == nil { + cs, err := consul.NewSyncer(ctx.ConsulConfig, e.shutdownCh, e.logger) + if err != nil { + return err + } + e.consulSyncer = cs + go e.consulSyncer.Run() + } + e.interpolateServices(e.ctx.Task) + e.consulSyncer.SetDelegatedChecks(e.createCheckMap(), e.createCheck) + e.consulSyncer.SetAddrFinder(e.ctx.Task.FindHostAndPortFor) + domain := consul.NewExecutorDomain(e.ctx.AllocID, e.ctx.Task.Name) + serviceMap := generateServiceKeys(e.ctx.AllocID, e.ctx.Task.Services) + e.consulSyncer.SetServices(domain, serviceMap) + return nil +} + +// DeregisterServices removes the services of the task that the executor is +// running from Consul +func (e *UniversalExecutor) DeregisterServices() error { + e.logger.Printf("[INFO] executor: de-registering services and shutting down consul service") + if e.consulSyncer != nil { + return e.consulSyncer.Shutdown() + } + return nil +} + +// pidStats returns the resource usage stats per pid +func (e *UniversalExecutor) pidStats() (map[string]*cstructs.ResourceUsage, error) { + stats := make(map[string]*cstructs.ResourceUsage) + e.pidLock.RLock() + pids := make(map[int]*nomadPid, len(e.pids)) + for k, v := range e.pids { + pids[k] = v + } + e.pidLock.RUnlock() + for pid, np := range pids { + p, err := process.NewProcess(int32(pid)) + if err != nil { + e.logger.Printf("[DEBUG] executor: unable to create new process with pid: %v", pid) + continue + } + ms := &cstructs.MemoryStats{} + if memInfo, err := p.MemoryInfo(); err == nil { + ms.RSS = memInfo.RSS + ms.Swap = memInfo.Swap + ms.Measured = ExecutorBasicMeasuredMemStats + } + + cs := &cstructs.CpuStats{} + if cpuStats, err := p.Times(); err == nil { + cs.SystemMode = np.cpuStatsSys.Percent(cpuStats.System * float64(time.Second)) + cs.UserMode = np.cpuStatsUser.Percent(cpuStats.User * float64(time.Second)) + cs.Measured = ExecutorBasicMeasuredCpuStats + + // calculate cpu usage percent + cs.Percent = np.cpuStatsTotal.Percent(cpuStats.Total() * float64(time.Second)) + } + stats[strconv.Itoa(pid)] = &cstructs.ResourceUsage{MemoryStats: ms, CpuStats: cs} + } + + return stats, nil +} + +// configureTaskDir sets the task dir in the executor +func (e *UniversalExecutor) configureTaskDir() error { + taskDir, ok := e.ctx.AllocDir.TaskDirs[e.ctx.Task.Name] + e.taskDir = taskDir + if !ok { + return fmt.Errorf("couldn't find task directory for task %v", e.ctx.Task.Name) + } + e.cmd.Dir = taskDir + return nil +} + +// lookupBin looks for path to the binary to run by looking for the binary in +// the following locations, in-order: task/local/, task/, based on host $PATH. +// The return path is absolute. +func (e *UniversalExecutor) lookupBin(bin string) (string, error) { + // Check in the local directory + local := filepath.Join(e.taskDir, allocdir.TaskLocal, bin) + if _, err := os.Stat(local); err == nil { + return local, nil + } + + // Check at the root of the task's directory + root := filepath.Join(e.taskDir, bin) + if _, err := os.Stat(root); err == nil { + return root, nil + } + + // Check the $PATH + if host, err := exec.LookPath(bin); err == nil { + return host, nil + } + + return "", fmt.Errorf("binary %q could not be found", bin) +} + +// makeExecutable makes the given file executable for root,group,others. +func (e *UniversalExecutor) makeExecutable(binPath string) error { + if runtime.GOOS == "windows" { + return nil + } + + fi, err := os.Stat(binPath) + if err != nil { + if os.IsNotExist(err) { + return fmt.Errorf("binary %q does not exist", binPath) + } + return fmt.Errorf("specified binary is invalid: %v", err) + } + + // If it is not executable, make it so. + perm := fi.Mode().Perm() + req := os.FileMode(0555) + if perm&req != req { + if err := os.Chmod(binPath, perm|req); err != nil { + return fmt.Errorf("error making %q executable: %s", binPath, err) + } + } + return nil +} + +// getFreePort returns a free port ready to be listened on between upper and +// lower bounds +func (e *UniversalExecutor) getListener(lowerBound uint, upperBound uint) (net.Listener, error) { + if runtime.GOOS == "windows" { + return e.listenerTCP(lowerBound, upperBound) + } + + return e.listenerUnix() +} + +// listenerTCP creates a TCP listener using an unused port between an upper and +// lower bound +func (e *UniversalExecutor) listenerTCP(lowerBound uint, upperBound uint) (net.Listener, error) { + for i := lowerBound; i <= upperBound; i++ { + addr, err := net.ResolveTCPAddr("tcp", fmt.Sprintf("localhost:%v", i)) + if err != nil { + return nil, err + } + l, err := net.ListenTCP("tcp", addr) + if err != nil { + continue + } + return l, nil + } + return nil, fmt.Errorf("No free port found") +} + +// listenerUnix creates a Unix domain socket +func (e *UniversalExecutor) listenerUnix() (net.Listener, error) { + f, err := ioutil.TempFile("", "plugin") + if err != nil { + return nil, err + } + path := f.Name() + + if err := f.Close(); err != nil { + return nil, err + } + if err := os.Remove(path); err != nil { + return nil, err + } + + return net.Listen("unix", path) +} + +// createCheckMap creates a map of checks that the executor will handle on it's +// own +func (e *UniversalExecutor) createCheckMap() map[string]struct{} { + checks := map[string]struct{}{ + "script": struct{}{}, + } + return checks +} + +// createCheck creates NomadCheck from a ServiceCheck +func (e *UniversalExecutor) createCheck(check *structs.ServiceCheck, checkID string) (consul.Check, error) { + if check.Type == structs.ServiceCheckScript && e.ctx.Driver == "docker" { + return &DockerScriptCheck{ + id: checkID, + interval: check.Interval, + timeout: check.Timeout, + containerID: e.consulCtx.ContainerID, + logger: e.logger, + cmd: check.Command, + args: check.Args, + }, nil + } + + if check.Type == structs.ServiceCheckScript && (e.ctx.Driver == "exec" || + e.ctx.Driver == "raw_exec" || e.ctx.Driver == "java") { + return &ExecScriptCheck{ + id: checkID, + interval: check.Interval, + timeout: check.Timeout, + cmd: check.Command, + args: check.Args, + taskDir: e.taskDir, + FSIsolation: e.command.FSIsolation, + }, nil + + } + return nil, fmt.Errorf("couldn't create check for %v", check.Name) +} + +// interpolateServices interpolates tags in a service and checks with values from the +// task's environment. +func (e *UniversalExecutor) interpolateServices(task *structs.Task) { + e.ctx.TaskEnv.Build() + for _, service := range task.Services { + for _, check := range service.Checks { + if check.Type == structs.ServiceCheckScript { + check.Name = e.ctx.TaskEnv.ReplaceEnv(check.Name) + check.Command = e.ctx.TaskEnv.ReplaceEnv(check.Command) + check.Args = e.ctx.TaskEnv.ParseAndReplace(check.Args) + check.Path = e.ctx.TaskEnv.ReplaceEnv(check.Path) + check.Protocol = e.ctx.TaskEnv.ReplaceEnv(check.Protocol) + } + } + service.Name = e.ctx.TaskEnv.ReplaceEnv(service.Name) + service.Tags = e.ctx.TaskEnv.ParseAndReplace(service.Tags) + } +} + +// collectPids collects the pids of the child processes that the executor is +// running every 5 seconds +func (e *UniversalExecutor) collectPids() { + // Fire the timer right away when the executor starts from there on the pids + // are collected every scan interval + timer := time.NewTimer(0) + defer timer.Stop() + for { + select { + case <-timer.C: + pids, err := e.getAllPids() + if err != nil { + e.logger.Printf("[DEBUG] executor: error collecting pids: %v", err) + } + e.pidLock.Lock() + + // Adding pids which are not being tracked + for pid, np := range pids { + if _, ok := e.pids[pid]; !ok { + e.pids[pid] = np + } + } + // Removing pids which are no longer present + for pid := range e.pids { + if _, ok := pids[pid]; !ok { + delete(e.pids, pid) + } + } + e.pidLock.Unlock() + timer.Reset(pidScanInterval) + case <-e.processExited: + return + } + } +} + +// scanPids scans all the pids on the machine running the current executor and +// returns the child processes of the executor. +func (e *UniversalExecutor) scanPids(parentPid int, allPids []ps.Process) (map[int]*nomadPid, error) { + processFamily := make(map[int]struct{}) + processFamily[parentPid] = struct{}{} + + // A buffer for holding pids which haven't matched with any parent pid + var pidsRemaining []ps.Process + for { + // flag to indicate if we have found a match + foundNewPid := false + + for _, pid := range allPids { + _, childPid := processFamily[pid.PPid()] + + // checking if the pid is a child of any of the parents + if childPid { + processFamily[pid.Pid()] = struct{}{} + foundNewPid = true + } else { + // if it is not, then we add the pid to the buffer + pidsRemaining = append(pidsRemaining, pid) + } + // scan only the pids which are left in the buffer + allPids = pidsRemaining + } + + // not scanning anymore if we couldn't find a single match + if !foundNewPid { + break + } + } + res := make(map[int]*nomadPid) + for pid := range processFamily { + np := nomadPid{ + pid: pid, + cpuStatsTotal: stats.NewCpuStats(), + cpuStatsUser: stats.NewCpuStats(), + cpuStatsSys: stats.NewCpuStats(), + } + res[pid] = &np + } + return res, nil +} + +// aggregatedResourceUsage aggregates the resource usage of all the pids and +// returns a TaskResourceUsage data point +func (e *UniversalExecutor) aggregatedResourceUsage(pidStats map[string]*cstructs.ResourceUsage) *cstructs.TaskResourceUsage { + ts := time.Now().UTC().UnixNano() + var ( + systemModeCPU, userModeCPU, percent float64 + totalRSS, totalSwap uint64 + ) + + for _, pidStat := range pidStats { + systemModeCPU += pidStat.CpuStats.SystemMode + userModeCPU += pidStat.CpuStats.UserMode + percent += pidStat.CpuStats.Percent + + totalRSS += pidStat.MemoryStats.RSS + totalSwap += pidStat.MemoryStats.Swap + } + + totalCPU := &cstructs.CpuStats{ + SystemMode: systemModeCPU, + UserMode: userModeCPU, + Percent: percent, + Measured: ExecutorBasicMeasuredCpuStats, + TotalTicks: e.systemCpuStats.TicksConsumed(percent), + } + + totalMemory := &cstructs.MemoryStats{ + RSS: totalRSS, + Swap: totalSwap, + Measured: ExecutorBasicMeasuredMemStats, + } + + resourceUsage := cstructs.ResourceUsage{ + MemoryStats: totalMemory, + CpuStats: totalCPU, + } + return &cstructs.TaskResourceUsage{ + ResourceUsage: &resourceUsage, + Timestamp: ts, + Pids: pidStats, + } +} diff --git a/vendor/github.com/hashicorp/nomad/client/driver/executor/executor_basic.go b/vendor/github.com/hashicorp/nomad/client/driver/executor/executor_basic.go new file mode 100644 index 000000000..123ed4703 --- /dev/null +++ b/vendor/github.com/hashicorp/nomad/client/driver/executor/executor_basic.go @@ -0,0 +1,46 @@ +// +build darwin dragonfly freebsd netbsd openbsd solaris windows + +package executor + +import ( + "os" + + cstructs "github.com/hashicorp/nomad/client/structs" + "github.com/mitchellh/go-ps" +) + +func (e *UniversalExecutor) configureChroot() error { + return nil +} + +func (e *UniversalExecutor) removeChrootMounts() error { + return nil +} + +func (e *UniversalExecutor) runAs(userid string) error { + return nil +} + +func (e *UniversalExecutor) applyLimits(pid int) error { + return nil +} + +func (e *UniversalExecutor) configureIsolation() error { + return nil +} + +func (e *UniversalExecutor) Stats() (*cstructs.TaskResourceUsage, error) { + pidStats, err := e.pidStats() + if err != nil { + return nil, err + } + return e.aggregatedResourceUsage(pidStats), nil +} + +func (e *UniversalExecutor) getAllPids() (map[int]*nomadPid, error) { + allProcesses, err := ps.Processes() + if err != nil { + return nil, err + } + return e.scanPids(os.Getpid(), allProcesses) +} diff --git a/vendor/github.com/hashicorp/nomad/client/driver/executor/executor_linux.go b/vendor/github.com/hashicorp/nomad/client/driver/executor/executor_linux.go new file mode 100644 index 000000000..c673555cc --- /dev/null +++ b/vendor/github.com/hashicorp/nomad/client/driver/executor/executor_linux.go @@ -0,0 +1,373 @@ +package executor + +import ( + "fmt" + "os" + "os/user" + "path/filepath" + "strconv" + "strings" + "syscall" + "time" + + "github.com/hashicorp/go-multierror" + "github.com/mitchellh/go-ps" + "github.com/opencontainers/runc/libcontainer/cgroups" + cgroupFs "github.com/opencontainers/runc/libcontainer/cgroups/fs" + cgroupConfig "github.com/opencontainers/runc/libcontainer/configs" + "github.com/opencontainers/runc/libcontainer/system" + + "github.com/hashicorp/nomad/client/allocdir" + "github.com/hashicorp/nomad/client/stats" + cstructs "github.com/hashicorp/nomad/client/structs" + "github.com/hashicorp/nomad/nomad/structs" +) + +var ( + // A mapping of directories on the host OS to attempt to embed inside each + // task's chroot. + chrootEnv = map[string]string{ + "/bin": "/bin", + "/etc": "/etc", + "/lib": "/lib", + "/lib32": "/lib32", + "/lib64": "/lib64", + "/run/resolvconf": "/run/resolvconf", + "/sbin": "/sbin", + "/usr": "/usr", + } + + // clockTicks is the clocks per second of the machine + clockTicks = uint64(system.GetClockTicks()) + + // The statistics the executor exposes when using cgroups + ExecutorCgroupMeasuredMemStats = []string{"RSS", "Cache", "Swap", "Max Usage", "Kernel Usage", "Kernel Max Usage"} + ExecutorCgroupMeasuredCpuStats = []string{"System Mode", "User Mode", "Throttled Periods", "Throttled Time", "Percent"} +) + +// configureIsolation configures chroot and creates cgroups +func (e *UniversalExecutor) configureIsolation() error { + if e.command.FSIsolation { + if err := e.configureChroot(); err != nil { + return err + } + } + + if e.command.ResourceLimits { + if err := e.configureCgroups(e.ctx.Task.Resources); err != nil { + return fmt.Errorf("error creating cgroups: %v", err) + } + } + return nil +} + +// applyLimits puts a process in a pre-configured cgroup +func (e *UniversalExecutor) applyLimits(pid int) error { + if !e.command.ResourceLimits { + return nil + } + + // Entering the process in the cgroup + manager := getCgroupManager(e.resConCtx.groups, nil) + if err := manager.Apply(pid); err != nil { + e.logger.Printf("[ERR] executor: error applying pid to cgroup: %v", err) + if er := e.removeChrootMounts(); er != nil { + e.logger.Printf("[ERR] executor: error removing chroot: %v", er) + } + return err + } + e.resConCtx.cgPaths = manager.GetPaths() + cgConfig := cgroupConfig.Config{Cgroups: e.resConCtx.groups} + if err := manager.Set(&cgConfig); err != nil { + e.logger.Printf("[ERR] executor: error setting cgroup config: %v", err) + if er := DestroyCgroup(e.resConCtx.groups, e.resConCtx.cgPaths, os.Getpid()); er != nil { + e.logger.Printf("[ERR] executor: error destroying cgroup: %v", er) + } + if er := e.removeChrootMounts(); er != nil { + e.logger.Printf("[ERR] executor: error removing chroot: %v", er) + } + return err + } + return nil +} + +// configureCgroups converts a Nomad Resources specification into the equivalent +// cgroup configuration. It returns an error if the resources are invalid. +func (e *UniversalExecutor) configureCgroups(resources *structs.Resources) error { + e.resConCtx.groups = &cgroupConfig.Cgroup{} + e.resConCtx.groups.Resources = &cgroupConfig.Resources{} + cgroupName := structs.GenerateUUID() + e.resConCtx.groups.Path = filepath.Join("/nomad", cgroupName) + + // TODO: verify this is needed for things like network access + e.resConCtx.groups.Resources.AllowAllDevices = true + + if resources.MemoryMB > 0 { + // Total amount of memory allowed to consume + e.resConCtx.groups.Resources.Memory = int64(resources.MemoryMB * 1024 * 1024) + // Disable swap to avoid issues on the machine + e.resConCtx.groups.Resources.MemorySwap = int64(-1) + } + + if resources.CPU < 2 { + return fmt.Errorf("resources.CPU must be equal to or greater than 2: %v", resources.CPU) + } + + // Set the relative CPU shares for this cgroup. + e.resConCtx.groups.Resources.CpuShares = int64(resources.CPU) + + if resources.IOPS != 0 { + // Validate it is in an acceptable range. + if resources.IOPS < 10 || resources.IOPS > 1000 { + return fmt.Errorf("resources.IOPS must be between 10 and 1000: %d", resources.IOPS) + } + + e.resConCtx.groups.Resources.BlkioWeight = uint16(resources.IOPS) + } + + return nil +} + +// Stats reports the resource utilization of the cgroup. If there is no resource +// isolation we aggregate the resource utilization of all the pids launched by +// the executor. +func (e *UniversalExecutor) Stats() (*cstructs.TaskResourceUsage, error) { + if !e.command.ResourceLimits { + pidStats, err := e.pidStats() + if err != nil { + return nil, err + } + return e.aggregatedResourceUsage(pidStats), nil + } + ts := time.Now() + manager := getCgroupManager(e.resConCtx.groups, e.resConCtx.cgPaths) + stats, err := manager.GetStats() + if err != nil { + return nil, err + } + + // Memory Related Stats + swap := stats.MemoryStats.SwapUsage + maxUsage := stats.MemoryStats.Usage.MaxUsage + rss := stats.MemoryStats.Stats["rss"] + cache := stats.MemoryStats.Stats["cache"] + ms := &cstructs.MemoryStats{ + RSS: rss, + Cache: cache, + Swap: swap.Usage, + MaxUsage: maxUsage, + KernelUsage: stats.MemoryStats.KernelUsage.Usage, + KernelMaxUsage: stats.MemoryStats.KernelUsage.MaxUsage, + Measured: ExecutorCgroupMeasuredMemStats, + } + + // CPU Related Stats + totalProcessCPUUsage := float64(stats.CpuStats.CpuUsage.TotalUsage) + userModeTime := float64(stats.CpuStats.CpuUsage.UsageInUsermode) + kernelModeTime := float64(stats.CpuStats.CpuUsage.UsageInKernelmode) + + totalPercent := e.totalCpuStats.Percent(totalProcessCPUUsage) + cs := &cstructs.CpuStats{ + SystemMode: e.systemCpuStats.Percent(kernelModeTime), + UserMode: e.userCpuStats.Percent(userModeTime), + Percent: totalPercent, + ThrottledPeriods: stats.CpuStats.ThrottlingData.ThrottledPeriods, + ThrottledTime: stats.CpuStats.ThrottlingData.ThrottledTime, + TotalTicks: e.systemCpuStats.TicksConsumed(totalPercent), + Measured: ExecutorCgroupMeasuredCpuStats, + } + taskResUsage := cstructs.TaskResourceUsage{ + ResourceUsage: &cstructs.ResourceUsage{ + MemoryStats: ms, + CpuStats: cs, + }, + Timestamp: ts.UTC().UnixNano(), + } + if pidStats, err := e.pidStats(); err == nil { + taskResUsage.Pids = pidStats + } + return &taskResUsage, nil +} + +// runAs takes a user id as a string and looks up the user, and sets the command +// to execute as that user. +func (e *UniversalExecutor) runAs(userid string) error { + u, err := user.Lookup(userid) + if err != nil { + return fmt.Errorf("Failed to identify user %v: %v", userid, err) + } + + // Convert the uid and gid + uid, err := strconv.ParseUint(u.Uid, 10, 32) + if err != nil { + return fmt.Errorf("Unable to convert userid to uint32: %s", err) + } + gid, err := strconv.ParseUint(u.Gid, 10, 32) + if err != nil { + return fmt.Errorf("Unable to convert groupid to uint32: %s", err) + } + + // Set the command to run as that user and group. + if e.cmd.SysProcAttr == nil { + e.cmd.SysProcAttr = &syscall.SysProcAttr{} + } + if e.cmd.SysProcAttr.Credential == nil { + e.cmd.SysProcAttr.Credential = &syscall.Credential{} + } + e.cmd.SysProcAttr.Credential.Uid = uint32(uid) + e.cmd.SysProcAttr.Credential.Gid = uint32(gid) + + return nil +} + +// configureChroot configures a chroot +func (e *UniversalExecutor) configureChroot() error { + allocDir := e.ctx.AllocDir + if err := allocDir.MountSharedDir(e.ctx.Task.Name); err != nil { + return err + } + + chroot := chrootEnv + if len(e.ctx.ChrootEnv) > 0 { + chroot = e.ctx.ChrootEnv + } + + if err := allocDir.Embed(e.ctx.Task.Name, chroot); err != nil { + return err + } + + // Set the tasks AllocDir environment variable. + e.ctx.TaskEnv. + SetAllocDir(filepath.Join("/", allocdir.SharedAllocName)). + SetTaskLocalDir(filepath.Join("/", allocdir.TaskLocal)). + Build() + + if e.cmd.SysProcAttr == nil { + e.cmd.SysProcAttr = &syscall.SysProcAttr{} + } + e.cmd.SysProcAttr.Chroot = e.taskDir + e.cmd.Dir = "/" + + if err := allocDir.MountSpecialDirs(e.taskDir); err != nil { + return err + } + + e.fsIsolationEnforced = true + return nil +} + +// cleanTaskDir is an idempotent operation to clean the task directory and +// should be called when tearing down the task. +func (e *UniversalExecutor) removeChrootMounts() error { + // Prevent a race between Wait/ForceStop + e.resConCtx.cgLock.Lock() + defer e.resConCtx.cgLock.Unlock() + return e.ctx.AllocDir.UnmountAll() +} + +// getAllPids returns the pids of all the processes spun up by the executor. We +// use the libcontainer apis to get the pids when the user is using cgroup +// isolation and we scan the entire process table if the user is not using any +// isolation +func (e *UniversalExecutor) getAllPids() (map[int]*nomadPid, error) { + if e.command.ResourceLimits { + manager := getCgroupManager(e.resConCtx.groups, e.resConCtx.cgPaths) + pids, err := manager.GetAllPids() + if err != nil { + return nil, err + } + np := make(map[int]*nomadPid, len(pids)) + for _, pid := range pids { + np[pid] = &nomadPid{ + pid: pid, + cpuStatsTotal: stats.NewCpuStats(), + cpuStatsSys: stats.NewCpuStats(), + cpuStatsUser: stats.NewCpuStats(), + } + } + return np, nil + } + allProcesses, err := ps.Processes() + if err != nil { + return nil, err + } + return e.scanPids(os.Getpid(), allProcesses) +} + +// destroyCgroup kills all processes in the cgroup and removes the cgroup +// configuration from the host. This function is idempotent. +func DestroyCgroup(groups *cgroupConfig.Cgroup, cgPaths map[string]string, executorPid int) error { + mErrs := new(multierror.Error) + if groups == nil { + return fmt.Errorf("Can't destroy: cgroup configuration empty") + } + + // Move the executor into the global cgroup so that the task specific + // cgroup can be destroyed. + nilGroup := &cgroupConfig.Cgroup{} + nilGroup.Path = "/" + nilGroup.Resources = groups.Resources + nilManager := getCgroupManager(nilGroup, nil) + err := nilManager.Apply(executorPid) + if err != nil && !strings.Contains(err.Error(), "no such process") { + return fmt.Errorf("failed to remove executor pid %d: %v", executorPid, err) + } + + // Freeze the Cgroup so that it can not continue to fork/exec. + manager := getCgroupManager(groups, cgPaths) + err = manager.Freeze(cgroupConfig.Frozen) + if err != nil && !strings.Contains(err.Error(), "no such file or directory") { + return fmt.Errorf("failed to freeze cgroup: %v", err) + } + + var procs []*os.Process + pids, err := manager.GetAllPids() + if err != nil { + multierror.Append(mErrs, fmt.Errorf("error getting pids: %v", err)) + + // Unfreeze the cgroup. + err = manager.Freeze(cgroupConfig.Thawed) + if err != nil && !strings.Contains(err.Error(), "no such file or directory") { + multierror.Append(mErrs, fmt.Errorf("failed to unfreeze cgroup: %v", err)) + } + return mErrs.ErrorOrNil() + } + + // Kill the processes in the cgroup + for _, pid := range pids { + proc, err := os.FindProcess(pid) + if err != nil { + multierror.Append(mErrs, fmt.Errorf("error finding process %v: %v", pid, err)) + continue + } + + procs = append(procs, proc) + if e := proc.Kill(); e != nil { + multierror.Append(mErrs, fmt.Errorf("error killing process %v: %v", pid, e)) + } + } + + // Unfreeze the cgroug so we can wait. + err = manager.Freeze(cgroupConfig.Thawed) + if err != nil && !strings.Contains(err.Error(), "no such file or directory") { + multierror.Append(mErrs, fmt.Errorf("failed to unfreeze cgroup: %v", err)) + } + + // Wait on the killed processes to ensure they are cleaned up. + for _, proc := range procs { + // Don't capture the error because we expect this to fail for + // processes we didn't fork. + proc.Wait() + } + + // Remove the cgroup. + if err := manager.Destroy(); err != nil { + multierror.Append(mErrs, fmt.Errorf("failed to delete the cgroup directories: %v", err)) + } + return mErrs.ErrorOrNil() +} + +// getCgroupManager returns the correct libcontainer cgroup manager. +func getCgroupManager(groups *cgroupConfig.Cgroup, paths map[string]string) cgroups.Manager { + return &cgroupFs.Manager{Cgroups: groups, Paths: paths} +} diff --git a/vendor/github.com/hashicorp/nomad/client/driver/executor/executor_unix.go b/vendor/github.com/hashicorp/nomad/client/driver/executor/executor_unix.go new file mode 100644 index 000000000..7c8ddf724 --- /dev/null +++ b/vendor/github.com/hashicorp/nomad/client/driver/executor/executor_unix.go @@ -0,0 +1,50 @@ +// +build darwin dragonfly freebsd linux netbsd openbsd solaris + +package executor + +import ( + "fmt" + "io" + "log/syslog" + + "github.com/hashicorp/nomad/client/driver/logging" +) + +func (e *UniversalExecutor) LaunchSyslogServer(ctx *ExecutorContext) (*SyslogServerState, error) { + e.ctx = ctx + + // configuring the task dir + if err := e.configureTaskDir(); err != nil { + return nil, err + } + + e.syslogChan = make(chan *logging.SyslogMessage, 2048) + l, err := e.getListener(e.ctx.PortLowerBound, e.ctx.PortUpperBound) + if err != nil { + return nil, err + } + e.logger.Printf("[DEBUG] sylog-server: launching syslog server on addr: %v", l.Addr().String()) + if err := e.configureLoggers(); err != nil { + return nil, err + } + + e.syslogServer = logging.NewSyslogServer(l, e.syslogChan, e.logger) + go e.syslogServer.Start() + go e.collectLogs(e.lre, e.lro) + syslogAddr := fmt.Sprintf("%s://%s", l.Addr().Network(), l.Addr().String()) + return &SyslogServerState{Addr: syslogAddr}, nil +} + +func (e *UniversalExecutor) collectLogs(we io.Writer, wo io.Writer) { + for logParts := range e.syslogChan { + // If the severity of the log line is err then we write to stderr + // otherwise all messages go to stdout + if logParts.Severity == syslog.LOG_ERR { + e.lre.Write(logParts.Message) + e.lre.Write([]byte{'\n'}) + } else { + e.lro.Write(logParts.Message) + e.lro.Write([]byte{'\n'}) + } + } +} diff --git a/vendor/github.com/hashicorp/nomad/client/driver/executor/executor_windows.go b/vendor/github.com/hashicorp/nomad/client/driver/executor/executor_windows.go new file mode 100644 index 000000000..e93f936e7 --- /dev/null +++ b/vendor/github.com/hashicorp/nomad/client/driver/executor/executor_windows.go @@ -0,0 +1,5 @@ +package executor + +func (e *UniversalExecutor) LaunchSyslogServer(ctx *ExecutorContext) (*SyslogServerState, error) { + return nil, nil +} diff --git a/vendor/github.com/hashicorp/nomad/client/driver/executor/resource_container_default.go b/vendor/github.com/hashicorp/nomad/client/driver/executor/resource_container_default.go new file mode 100644 index 000000000..6e9503206 --- /dev/null +++ b/vendor/github.com/hashicorp/nomad/client/driver/executor/resource_container_default.go @@ -0,0 +1,24 @@ +// +build darwin dragonfly freebsd netbsd openbsd solaris windows + +package executor + +import ( + dstructs "github.com/hashicorp/nomad/client/driver/structs" +) + +// resourceContainerContext is a platform-specific struct for managing a +// resource container. +type resourceContainerContext struct { +} + +func clientCleanup(ic *dstructs.IsolationConfig, pid int) error { + return nil +} + +func (rc *resourceContainerContext) executorCleanup() error { + return nil +} + +func (rc *resourceContainerContext) getIsolationConfig() *dstructs.IsolationConfig { + return nil +} diff --git a/vendor/github.com/hashicorp/nomad/client/driver/executor/resource_container_linux.go b/vendor/github.com/hashicorp/nomad/client/driver/executor/resource_container_linux.go new file mode 100644 index 000000000..ad57de03f --- /dev/null +++ b/vendor/github.com/hashicorp/nomad/client/driver/executor/resource_container_linux.go @@ -0,0 +1,42 @@ +package executor + +import ( + "os" + "sync" + + dstructs "github.com/hashicorp/nomad/client/driver/structs" + cgroupConfig "github.com/opencontainers/runc/libcontainer/configs" +) + +// resourceContainerContext is a platform-specific struct for managing a +// resource container. In the case of Linux, this is used to control Cgroups. +type resourceContainerContext struct { + groups *cgroupConfig.Cgroup + cgPaths map[string]string + cgLock sync.Mutex +} + +// clientCleanup remoevs this host's Cgroup from the Nomad Client's context +func clientCleanup(ic *dstructs.IsolationConfig, pid int) error { + if err := DestroyCgroup(ic.Cgroup, ic.CgroupPaths, pid); err != nil { + return err + } + return nil +} + +// cleanup removes this host's Cgroup from within an Executor's context +func (rc *resourceContainerContext) executorCleanup() error { + rc.cgLock.Lock() + defer rc.cgLock.Unlock() + if err := DestroyCgroup(rc.groups, rc.cgPaths, os.Getpid()); err != nil { + return err + } + return nil +} + +func (rc *resourceContainerContext) getIsolationConfig() *dstructs.IsolationConfig { + return &dstructs.IsolationConfig{ + Cgroup: rc.groups, + CgroupPaths: rc.cgPaths, + } +} diff --git a/vendor/github.com/hashicorp/nomad/client/driver/executor_plugin.go b/vendor/github.com/hashicorp/nomad/client/driver/executor_plugin.go new file mode 100644 index 000000000..1fc9d7e45 --- /dev/null +++ b/vendor/github.com/hashicorp/nomad/client/driver/executor_plugin.go @@ -0,0 +1,181 @@ +package driver + +import ( + "encoding/gob" + "log" + "net/rpc" + + "github.com/hashicorp/go-plugin" + "github.com/hashicorp/nomad/client/driver/executor" + cstructs "github.com/hashicorp/nomad/client/structs" + "github.com/hashicorp/nomad/nomad/structs" +) + +// Registering these types since we have to serialize and de-serialize the Task +// structs over the wire between drivers and the executor. +func init() { + gob.Register([]interface{}{}) + gob.Register(map[string]interface{}{}) + gob.Register([]map[string]string{}) + gob.Register([]map[string]int{}) +} + +type ExecutorRPC struct { + client *rpc.Client + logger *log.Logger +} + +// LaunchCmdArgs wraps a user command and the args for the purposes of RPC +type LaunchCmdArgs struct { + Cmd *executor.ExecCommand + Ctx *executor.ExecutorContext +} + +// LaunchSyslogServerArgs wraps the executor context for the purposes of RPC +type LaunchSyslogServerArgs struct { + Ctx *executor.ExecutorContext +} + +// SyncServicesArgs wraps the consul context for the purposes of RPC +type SyncServicesArgs struct { + Ctx *executor.ConsulContext +} + +func (e *ExecutorRPC) LaunchCmd(cmd *executor.ExecCommand, ctx *executor.ExecutorContext) (*executor.ProcessState, error) { + var ps *executor.ProcessState + err := e.client.Call("Plugin.LaunchCmd", LaunchCmdArgs{Cmd: cmd, Ctx: ctx}, &ps) + return ps, err +} + +func (e *ExecutorRPC) LaunchSyslogServer(ctx *executor.ExecutorContext) (*executor.SyslogServerState, error) { + var ss *executor.SyslogServerState + err := e.client.Call("Plugin.LaunchSyslogServer", LaunchSyslogServerArgs{Ctx: ctx}, &ss) + return ss, err +} + +func (e *ExecutorRPC) Wait() (*executor.ProcessState, error) { + var ps executor.ProcessState + err := e.client.Call("Plugin.Wait", new(interface{}), &ps) + return &ps, err +} + +func (e *ExecutorRPC) ShutDown() error { + return e.client.Call("Plugin.ShutDown", new(interface{}), new(interface{})) +} + +func (e *ExecutorRPC) Exit() error { + return e.client.Call("Plugin.Exit", new(interface{}), new(interface{})) +} + +func (e *ExecutorRPC) UpdateLogConfig(logConfig *structs.LogConfig) error { + return e.client.Call("Plugin.UpdateLogConfig", logConfig, new(interface{})) +} + +func (e *ExecutorRPC) UpdateTask(task *structs.Task) error { + return e.client.Call("Plugin.UpdateTask", task, new(interface{})) +} + +func (e *ExecutorRPC) SyncServices(ctx *executor.ConsulContext) error { + return e.client.Call("Plugin.SyncServices", SyncServicesArgs{Ctx: ctx}, new(interface{})) +} + +func (e *ExecutorRPC) DeregisterServices() error { + return e.client.Call("Plugin.DeregisterServices", new(interface{}), new(interface{})) +} + +func (e *ExecutorRPC) Version() (*executor.ExecutorVersion, error) { + var version executor.ExecutorVersion + err := e.client.Call("Plugin.Version", new(interface{}), &version) + return &version, err +} + +func (e *ExecutorRPC) Stats() (*cstructs.TaskResourceUsage, error) { + var resourceUsage cstructs.TaskResourceUsage + err := e.client.Call("Plugin.Stats", new(interface{}), &resourceUsage) + return &resourceUsage, err +} + +type ExecutorRPCServer struct { + Impl executor.Executor + logger *log.Logger +} + +func (e *ExecutorRPCServer) LaunchCmd(args LaunchCmdArgs, ps *executor.ProcessState) error { + state, err := e.Impl.LaunchCmd(args.Cmd, args.Ctx) + if state != nil { + *ps = *state + } + return err +} + +func (e *ExecutorRPCServer) LaunchSyslogServer(args LaunchSyslogServerArgs, ss *executor.SyslogServerState) error { + state, err := e.Impl.LaunchSyslogServer(args.Ctx) + if state != nil { + *ss = *state + } + return err +} + +func (e *ExecutorRPCServer) Wait(args interface{}, ps *executor.ProcessState) error { + state, err := e.Impl.Wait() + if state != nil { + *ps = *state + } + return err +} + +func (e *ExecutorRPCServer) ShutDown(args interface{}, resp *interface{}) error { + return e.Impl.ShutDown() +} + +func (e *ExecutorRPCServer) Exit(args interface{}, resp *interface{}) error { + return e.Impl.Exit() +} + +func (e *ExecutorRPCServer) UpdateLogConfig(args *structs.LogConfig, resp *interface{}) error { + return e.Impl.UpdateLogConfig(args) +} + +func (e *ExecutorRPCServer) UpdateTask(args *structs.Task, resp *interface{}) error { + return e.Impl.UpdateTask(args) +} + +func (e *ExecutorRPCServer) SyncServices(args SyncServicesArgs, resp *interface{}) error { + return e.Impl.SyncServices(args.Ctx) +} + +func (e *ExecutorRPCServer) DeregisterServices(args interface{}, resp *interface{}) error { + return e.Impl.DeregisterServices() +} + +func (e *ExecutorRPCServer) Version(args interface{}, version *executor.ExecutorVersion) error { + ver, err := e.Impl.Version() + if ver != nil { + *version = *ver + } + return err +} + +func (e *ExecutorRPCServer) Stats(args interface{}, resourceUsage *cstructs.TaskResourceUsage) error { + ru, err := e.Impl.Stats() + if ru != nil { + *resourceUsage = *ru + } + return err +} + +type ExecutorPlugin struct { + logger *log.Logger + Impl *ExecutorRPCServer +} + +func (p *ExecutorPlugin) Server(*plugin.MuxBroker) (interface{}, error) { + if p.Impl == nil { + p.Impl = &ExecutorRPCServer{Impl: executor.NewExecutor(p.logger), logger: p.logger} + } + return p.Impl, nil +} + +func (p *ExecutorPlugin) Client(b *plugin.MuxBroker, c *rpc.Client) (interface{}, error) { + return &ExecutorRPC{client: c, logger: p.logger}, nil +} diff --git a/vendor/github.com/hashicorp/nomad/client/driver/java.go b/vendor/github.com/hashicorp/nomad/client/driver/java.go new file mode 100644 index 000000000..24e6ce6a8 --- /dev/null +++ b/vendor/github.com/hashicorp/nomad/client/driver/java.go @@ -0,0 +1,416 @@ +package driver + +import ( + "bytes" + "encoding/json" + "fmt" + "log" + "os/exec" + "path/filepath" + "runtime" + "strings" + "syscall" + "time" + + "github.com/hashicorp/go-multierror" + "github.com/hashicorp/go-plugin" + "github.com/mitchellh/mapstructure" + + "github.com/hashicorp/nomad/client/allocdir" + "github.com/hashicorp/nomad/client/config" + "github.com/hashicorp/nomad/client/driver/executor" + dstructs "github.com/hashicorp/nomad/client/driver/structs" + "github.com/hashicorp/nomad/client/fingerprint" + cstructs "github.com/hashicorp/nomad/client/structs" + "github.com/hashicorp/nomad/helper/discover" + "github.com/hashicorp/nomad/helper/fields" + "github.com/hashicorp/nomad/nomad/structs" +) + +const ( + // The key populated in Node Attributes to indicate presence of the Java + // driver + javaDriverAttr = "driver.java" +) + +// JavaDriver is a simple driver to execute applications packaged in Jars. +// It literally just fork/execs tasks with the java command. +type JavaDriver struct { + DriverContext + fingerprint.StaticFingerprinter +} + +type JavaDriverConfig struct { + JarPath string `mapstructure:"jar_path"` + JvmOpts []string `mapstructure:"jvm_options"` + Args []string `mapstructure:"args"` +} + +// javaHandle is returned from Start/Open as a handle to the PID +type javaHandle struct { + pluginClient *plugin.Client + userPid int + executor executor.Executor + isolationConfig *dstructs.IsolationConfig + + taskDir string + allocDir *allocdir.AllocDir + killTimeout time.Duration + maxKillTimeout time.Duration + version string + logger *log.Logger + waitCh chan *dstructs.WaitResult + doneCh chan struct{} +} + +// NewJavaDriver is used to create a new exec driver +func NewJavaDriver(ctx *DriverContext) Driver { + return &JavaDriver{DriverContext: *ctx} +} + +// Validate is used to validate the driver configuration +func (d *JavaDriver) Validate(config map[string]interface{}) error { + fd := &fields.FieldData{ + Raw: config, + Schema: map[string]*fields.FieldSchema{ + "jar_path": &fields.FieldSchema{ + Type: fields.TypeString, + Required: true, + }, + "jvm_options": &fields.FieldSchema{ + Type: fields.TypeArray, + }, + "args": &fields.FieldSchema{ + Type: fields.TypeArray, + }, + }, + } + + if err := fd.Validate(); err != nil { + return err + } + + return nil +} + +func (d *JavaDriver) Fingerprint(cfg *config.Config, node *structs.Node) (bool, error) { + // Get the current status so that we can log any debug messages only if the + // state changes + _, currentlyEnabled := node.Attributes[javaDriverAttr] + + // Only enable if we are root and cgroups are mounted when running on linux systems. + if runtime.GOOS == "linux" && (syscall.Geteuid() != 0 || !d.cgroupsMounted(node)) { + if currentlyEnabled { + d.logger.Printf("[DEBUG] driver.java: root priviledges and mounted cgroups required on linux, disabling") + } + delete(node.Attributes, "driver.java") + return false, nil + } + + // Find java version + var out bytes.Buffer + var erOut bytes.Buffer + cmd := exec.Command("java", "-version") + cmd.Stdout = &out + cmd.Stderr = &erOut + err := cmd.Run() + if err != nil { + // assume Java wasn't found + delete(node.Attributes, javaDriverAttr) + return false, nil + } + + // 'java -version' returns output on Stderr typically. + // Check stdout, but it's probably empty + var infoString string + if out.String() != "" { + infoString = out.String() + } + + if erOut.String() != "" { + infoString = erOut.String() + } + + if infoString == "" { + if currentlyEnabled { + d.logger.Println("[WARN] driver.java: error parsing Java version information, aborting") + } + delete(node.Attributes, javaDriverAttr) + return false, nil + } + + // Assume 'java -version' returns 3 lines: + // java version "1.6.0_36" + // OpenJDK Runtime Environment (IcedTea6 1.13.8) (6b36-1.13.8-0ubuntu1~12.04) + // OpenJDK 64-Bit Server VM (build 23.25-b01, mixed mode) + // Each line is terminated by \n + info := strings.Split(infoString, "\n") + versionString := info[0] + versionString = strings.TrimPrefix(versionString, "java version ") + versionString = strings.Trim(versionString, "\"") + node.Attributes[javaDriverAttr] = "1" + node.Attributes["driver.java.version"] = versionString + node.Attributes["driver.java.runtime"] = info[1] + node.Attributes["driver.java.vm"] = info[2] + + return true, nil +} + +func (d *JavaDriver) Start(ctx *ExecContext, task *structs.Task) (DriverHandle, error) { + var driverConfig JavaDriverConfig + if err := mapstructure.WeakDecode(task.Config, &driverConfig); err != nil { + return nil, err + } + + // Set the host environment variables. + filter := strings.Split(d.config.ReadDefault("env.blacklist", config.DefaultEnvBlacklist), ",") + d.taskEnv.AppendHostEnvvars(filter) + + taskDir, ok := ctx.AllocDir.TaskDirs[d.DriverContext.taskName] + if !ok { + return nil, fmt.Errorf("Could not find task directory for task: %v", d.DriverContext.taskName) + } + + if driverConfig.JarPath == "" { + return nil, fmt.Errorf("jar_path must be specified") + } + + args := []string{} + // Look for jvm options + if len(driverConfig.JvmOpts) != 0 { + d.logger.Printf("[DEBUG] driver.java: found JVM options: %s", driverConfig.JvmOpts) + args = append(args, driverConfig.JvmOpts...) + } + + // Build the argument list. + args = append(args, "-jar", driverConfig.JarPath) + if len(driverConfig.Args) != 0 { + args = append(args, driverConfig.Args...) + } + + bin, err := discover.NomadExecutable() + if err != nil { + return nil, fmt.Errorf("unable to find the nomad binary: %v", err) + } + + pluginLogFile := filepath.Join(taskDir, fmt.Sprintf("%s-executor.out", task.Name)) + pluginConfig := &plugin.ClientConfig{ + Cmd: exec.Command(bin, "executor", pluginLogFile), + } + + execIntf, pluginClient, err := createExecutor(pluginConfig, d.config.LogOutput, d.config) + if err != nil { + return nil, err + } + executorCtx := &executor.ExecutorContext{ + TaskEnv: d.taskEnv, + Driver: "java", + AllocDir: ctx.AllocDir, + AllocID: ctx.AllocID, + ChrootEnv: d.config.ChrootEnv, + Task: task, + } + + absPath, err := GetAbsolutePath("java") + if err != nil { + return nil, err + } + + ps, err := execIntf.LaunchCmd(&executor.ExecCommand{ + Cmd: absPath, + Args: args, + FSIsolation: true, + ResourceLimits: true, + User: getExecutorUser(task), + }, executorCtx) + if err != nil { + pluginClient.Kill() + return nil, err + } + d.logger.Printf("[DEBUG] driver.java: started process with pid: %v", ps.Pid) + + // Return a driver handle + maxKill := d.DriverContext.config.MaxKillTimeout + h := &javaHandle{ + pluginClient: pluginClient, + executor: execIntf, + userPid: ps.Pid, + isolationConfig: ps.IsolationConfig, + taskDir: taskDir, + allocDir: ctx.AllocDir, + killTimeout: GetKillTimeout(task.KillTimeout, maxKill), + maxKillTimeout: maxKill, + version: d.config.Version, + logger: d.logger, + doneCh: make(chan struct{}), + waitCh: make(chan *dstructs.WaitResult, 1), + } + if err := h.executor.SyncServices(consulContext(d.config, "")); err != nil { + d.logger.Printf("[ERR] driver.java: error registering services with consul for task: %q: %v", task.Name, err) + } + go h.run() + return h, nil +} + +// cgroupsMounted returns true if the cgroups are mounted on a system otherwise +// returns false +func (d *JavaDriver) cgroupsMounted(node *structs.Node) bool { + _, ok := node.Attributes["unique.cgroup.mountpoint"] + return ok +} + +type javaId struct { + Version string + KillTimeout time.Duration + MaxKillTimeout time.Duration + PluginConfig *PluginReattachConfig + IsolationConfig *dstructs.IsolationConfig + TaskDir string + AllocDir *allocdir.AllocDir + UserPid int +} + +func (d *JavaDriver) Open(ctx *ExecContext, handleID string) (DriverHandle, error) { + id := &javaId{} + if err := json.Unmarshal([]byte(handleID), id); err != nil { + return nil, fmt.Errorf("Failed to parse handle '%s': %v", handleID, err) + } + + pluginConfig := &plugin.ClientConfig{ + Reattach: id.PluginConfig.PluginConfig(), + } + exec, pluginClient, err := createExecutor(pluginConfig, d.config.LogOutput, d.config) + if err != nil { + merrs := new(multierror.Error) + merrs.Errors = append(merrs.Errors, err) + d.logger.Println("[ERR] driver.java: error connecting to plugin so destroying plugin pid and user pid") + if e := destroyPlugin(id.PluginConfig.Pid, id.UserPid); e != nil { + merrs.Errors = append(merrs.Errors, fmt.Errorf("error destroying plugin and userpid: %v", e)) + } + if id.IsolationConfig != nil { + ePid := pluginConfig.Reattach.Pid + if e := executor.ClientCleanup(id.IsolationConfig, ePid); e != nil { + merrs.Errors = append(merrs.Errors, fmt.Errorf("destroying resource container failed: %v", e)) + } + } + if e := ctx.AllocDir.UnmountAll(); e != nil { + merrs.Errors = append(merrs.Errors, e) + } + + return nil, fmt.Errorf("error connecting to plugin: %v", merrs.ErrorOrNil()) + } + + ver, _ := exec.Version() + d.logger.Printf("[DEBUG] driver.java: version of executor: %v", ver.Version) + + // Return a driver handle + h := &javaHandle{ + pluginClient: pluginClient, + executor: exec, + userPid: id.UserPid, + isolationConfig: id.IsolationConfig, + taskDir: id.TaskDir, + allocDir: id.AllocDir, + logger: d.logger, + version: id.Version, + killTimeout: id.KillTimeout, + maxKillTimeout: id.MaxKillTimeout, + doneCh: make(chan struct{}), + waitCh: make(chan *dstructs.WaitResult, 1), + } + if err := h.executor.SyncServices(consulContext(d.config, "")); err != nil { + d.logger.Printf("[ERR] driver.java: error registering services with consul: %v", err) + } + + go h.run() + return h, nil +} + +func (h *javaHandle) ID() string { + id := javaId{ + Version: h.version, + KillTimeout: h.killTimeout, + MaxKillTimeout: h.maxKillTimeout, + PluginConfig: NewPluginReattachConfig(h.pluginClient.ReattachConfig()), + UserPid: h.userPid, + TaskDir: h.taskDir, + AllocDir: h.allocDir, + IsolationConfig: h.isolationConfig, + } + + data, err := json.Marshal(id) + if err != nil { + h.logger.Printf("[ERR] driver.java: failed to marshal ID to JSON: %s", err) + } + return string(data) +} + +func (h *javaHandle) WaitCh() chan *dstructs.WaitResult { + return h.waitCh +} + +func (h *javaHandle) Update(task *structs.Task) error { + // Store the updated kill timeout. + h.killTimeout = GetKillTimeout(task.KillTimeout, h.maxKillTimeout) + h.executor.UpdateTask(task) + + // Update is not possible + return nil +} + +func (h *javaHandle) Kill() error { + if err := h.executor.ShutDown(); err != nil { + if h.pluginClient.Exited() { + return nil + } + return fmt.Errorf("executor Shutdown failed: %v", err) + } + + select { + case <-h.doneCh: + return nil + case <-time.After(h.killTimeout): + if h.pluginClient.Exited() { + return nil + } + if err := h.executor.Exit(); err != nil { + return fmt.Errorf("executor Exit failed: %v", err) + } + + return nil + } +} + +func (h *javaHandle) Stats() (*cstructs.TaskResourceUsage, error) { + return h.executor.Stats() +} + +func (h *javaHandle) run() { + ps, err := h.executor.Wait() + close(h.doneCh) + if ps.ExitCode == 0 && err != nil { + if h.isolationConfig != nil { + ePid := h.pluginClient.ReattachConfig().Pid + if e := executor.ClientCleanup(h.isolationConfig, ePid); e != nil { + h.logger.Printf("[ERR] driver.java: destroying resource container failed: %v", e) + } + } else { + if e := killProcess(h.userPid); e != nil { + h.logger.Printf("[ERR] driver.java: error killing user process: %v", e) + } + } + if e := h.allocDir.UnmountAll(); e != nil { + h.logger.Printf("[ERR] driver.java: unmounting dev,proc and alloc dirs failed: %v", e) + } + } + h.waitCh <- &dstructs.WaitResult{ExitCode: ps.ExitCode, Signal: ps.Signal, Err: err} + close(h.waitCh) + + // Remove services + if err := h.executor.DeregisterServices(); err != nil { + h.logger.Printf("[ERR] driver.java: failed to kill the deregister services: %v", err) + } + + h.executor.Exit() + h.pluginClient.Kill() +} diff --git a/vendor/github.com/hashicorp/nomad/client/driver/logging/collector_windows.go b/vendor/github.com/hashicorp/nomad/client/driver/logging/collector_windows.go new file mode 100644 index 000000000..a3a3c1169 --- /dev/null +++ b/vendor/github.com/hashicorp/nomad/client/driver/logging/collector_windows.go @@ -0,0 +1,71 @@ +package logging + +import ( + "log" + + "github.com/hashicorp/nomad/client/allocdir" + cstructs "github.com/hashicorp/nomad/client/driver/structs" + "github.com/hashicorp/nomad/nomad/structs" +) + +// LogCollectorContext holds context to configure the syslog server +type LogCollectorContext struct { + // TaskName is the name of the Task + TaskName string + + // AllocDir is the handle to do operations on the alloc dir of + // the task + AllocDir *allocdir.AllocDir + + // LogConfig provides configuration related to log rotation + LogConfig *structs.LogConfig + + // PortUpperBound is the upper bound of the ports that we can use to start + // the syslog server + PortUpperBound uint + + // PortLowerBound is the lower bound of the ports that we can use to start + // the syslog server + PortLowerBound uint +} + +// SyslogCollectorState holds the address and islation information of a launched +// syslog server +type SyslogCollectorState struct { + IsolationConfig *cstructs.IsolationConfig + Addr string +} + +// LogCollector is an interface which allows a driver to launch a log server +// and update log configuration +type LogCollector interface { + LaunchCollector(ctx *LogCollectorContext) (*SyslogCollectorState, error) + Exit() error + UpdateLogConfig(logConfig *structs.LogConfig) error +} + +// SyslogCollector is a LogCollector which starts a syslog server and does +// rotation to incoming stream +type SyslogCollector struct { +} + +// NewSyslogCollector returns an implementation of the SyslogCollector +func NewSyslogCollector(logger *log.Logger) *SyslogCollector { + return &SyslogCollector{} +} + +// LaunchCollector launches a new syslog server and starts writing log lines to +// files and rotates them +func (s *SyslogCollector) LaunchCollector(ctx *LogCollectorContext) (*SyslogCollectorState, error) { + return nil, nil +} + +// Exit kills the syslog server +func (s *SyslogCollector) Exit() error { + return nil +} + +// UpdateLogConfig updates the log configuration +func (s *SyslogCollector) UpdateLogConfig(logConfig *structs.LogConfig) error { + return nil +} diff --git a/vendor/github.com/hashicorp/nomad/client/driver/logging/rotator.go b/vendor/github.com/hashicorp/nomad/client/driver/logging/rotator.go new file mode 100644 index 000000000..5cf200ee2 --- /dev/null +++ b/vendor/github.com/hashicorp/nomad/client/driver/logging/rotator.go @@ -0,0 +1,285 @@ +package logging + +import ( + "bufio" + "fmt" + "io/ioutil" + "log" + "os" + "path/filepath" + "sort" + "strconv" + "strings" + "sync" + "time" +) + +const ( + bufSize = 32768 + flushDur = 100 * time.Millisecond +) + +// FileRotator writes bytes to a rotated set of files +type FileRotator struct { + MaxFiles int // MaxFiles is the maximum number of rotated files allowed in a path + FileSize int64 // FileSize is the size a rotated file is allowed to grow + + path string // path is the path on the file system where the rotated set of files are opened + baseFileName string // baseFileName is the base file name of the rotated files + logFileIdx int // logFileIdx is the current index of the rotated files + oldestLogFileIdx int // oldestLogFileIdx is the index of the oldest log file in a path + + currentFile *os.File // currentFile is the file that is currently getting written + currentWr int64 // currentWr is the number of bytes written to the current file + bufw *bufio.Writer + bufLock sync.Mutex + + flushTicker *time.Ticker + logger *log.Logger + purgeCh chan struct{} + doneCh chan struct{} + + closed bool + closedLock sync.Mutex +} + +// NewFileRotator returns a new file rotator +func NewFileRotator(path string, baseFile string, maxFiles int, + fileSize int64, logger *log.Logger) (*FileRotator, error) { + rotator := &FileRotator{ + MaxFiles: maxFiles, + FileSize: fileSize, + + path: path, + baseFileName: baseFile, + + flushTicker: time.NewTicker(flushDur), + logger: logger, + purgeCh: make(chan struct{}, 1), + doneCh: make(chan struct{}, 1), + } + if err := rotator.lastFile(); err != nil { + return nil, err + } + go rotator.purgeOldFiles() + go rotator.flushPeriodically() + return rotator, nil +} + +// Write writes a byte array to a file and rotates the file if it's size becomes +// equal to the maximum size the user has defined. +func (f *FileRotator) Write(p []byte) (n int, err error) { + n = 0 + var nw int + + for n < len(p) { + // Check if we still have space in the current file, otherwise close and + // open the next file + if f.currentWr >= f.FileSize { + f.flushBuffer() + f.currentFile.Close() + if err := f.nextFile(); err != nil { + f.logger.Printf("[ERROR] driver.rotator: error creating next file: %v", err) + return 0, err + } + } + // Calculate the remaining size on this file + remainingSize := f.FileSize - f.currentWr + + // Check if the number of bytes that we have to write is less than the + // remaining size of the file + if remainingSize < int64(len(p[n:])) { + // Write the number of bytes that we can write on the current file + li := int64(n) + remainingSize + nw, err = f.writeToBuffer(p[n:li]) + } else { + // Write all the bytes in the current file + nw, err = f.writeToBuffer(p[n:]) + } + + // Increment the number of bytes written so far in this method + // invocation + n += nw + + // Increment the total number of bytes in the file + f.currentWr += int64(n) + if err != nil { + f.logger.Printf("[ERROR] driver.rotator: error writing to file: %v", err) + return + } + } + return +} + +// nextFile opens the next file and purges older files if the number of rotated +// files is larger than the maximum files configured by the user +func (f *FileRotator) nextFile() error { + nextFileIdx := f.logFileIdx + for { + nextFileIdx += 1 + logFileName := filepath.Join(f.path, fmt.Sprintf("%s.%d", f.baseFileName, nextFileIdx)) + if fi, err := os.Stat(logFileName); err == nil { + if fi.IsDir() || fi.Size() >= f.FileSize { + continue + } + } + f.logFileIdx = nextFileIdx + if err := f.createFile(); err != nil { + return err + } + break + } + // Purge old files if we have more files than MaxFiles + f.closedLock.Lock() + defer f.closedLock.Unlock() + if f.logFileIdx-f.oldestLogFileIdx >= f.MaxFiles && !f.closed { + select { + case f.purgeCh <- struct{}{}: + default: + } + } + return nil +} + +// lastFile finds out the rotated file with the largest index in a path. +func (f *FileRotator) lastFile() error { + finfos, err := ioutil.ReadDir(f.path) + if err != nil { + return err + } + + prefix := fmt.Sprintf("%s.", f.baseFileName) + for _, fi := range finfos { + if fi.IsDir() { + continue + } + if strings.HasPrefix(fi.Name(), prefix) { + fileIdx := strings.TrimPrefix(fi.Name(), prefix) + n, err := strconv.Atoi(fileIdx) + if err != nil { + continue + } + if n > f.logFileIdx { + f.logFileIdx = n + } + } + } + if err := f.createFile(); err != nil { + return err + } + return nil +} + +// createFile opens a new or existing file for writing +func (f *FileRotator) createFile() error { + logFileName := filepath.Join(f.path, fmt.Sprintf("%s.%d", f.baseFileName, f.logFileIdx)) + cFile, err := os.OpenFile(logFileName, os.O_RDWR|os.O_CREATE|os.O_APPEND, 0666) + if err != nil { + return err + } + f.currentFile = cFile + fi, err := f.currentFile.Stat() + if err != nil { + return err + } + f.currentWr = fi.Size() + f.createOrResetBuffer() + return nil +} + +// flushPeriodically flushes the buffered writer every 100ms to the underlying +// file +func (f *FileRotator) flushPeriodically() { + for _ = range f.flushTicker.C { + f.flushBuffer() + } +} + +func (f *FileRotator) Close() { + f.closedLock.Lock() + defer f.closedLock.Unlock() + + // Stop the ticker and flush for one last time + f.flushTicker.Stop() + f.flushBuffer() + + // Stop the purge go routine + if !f.closed { + f.doneCh <- struct{}{} + close(f.purgeCh) + f.closed = true + } +} + +// purgeOldFiles removes older files and keeps only the last N files rotated for +// a file +func (f *FileRotator) purgeOldFiles() { + for { + select { + case <-f.purgeCh: + var fIndexes []int + files, err := ioutil.ReadDir(f.path) + if err != nil { + return + } + // Inserting all the rotated files in a slice + for _, fi := range files { + if strings.HasPrefix(fi.Name(), f.baseFileName) { + fileIdx := strings.TrimPrefix(fi.Name(), fmt.Sprintf("%s.", f.baseFileName)) + n, err := strconv.Atoi(fileIdx) + if err != nil { + continue + } + fIndexes = append(fIndexes, n) + } + } + + // Not continuing to delete files if the number of files is not more + // than MaxFiles + if len(fIndexes) <= f.MaxFiles { + continue + } + + // Sorting the file indexes so that we can purge the older files and keep + // only the number of files as configured by the user + sort.Sort(sort.IntSlice(fIndexes)) + toDelete := fIndexes[0 : len(fIndexes)-f.MaxFiles] + for _, fIndex := range toDelete { + fname := filepath.Join(f.path, fmt.Sprintf("%s.%d", f.baseFileName, fIndex)) + os.RemoveAll(fname) + } + f.oldestLogFileIdx = fIndexes[0] + case <-f.doneCh: + return + } + } +} + +// flushBuffer flushes the buffer +func (f *FileRotator) flushBuffer() error { + f.bufLock.Lock() + defer f.bufLock.Unlock() + if f.bufw != nil { + return f.bufw.Flush() + } + return nil +} + +// writeToBuffer writes the byte array to buffer +func (f *FileRotator) writeToBuffer(p []byte) (int, error) { + f.bufLock.Lock() + defer f.bufLock.Unlock() + return f.bufw.Write(p) +} + +// createOrResetBuffer creates a new buffer if we don't have one otherwise +// resets the buffer +func (f *FileRotator) createOrResetBuffer() { + f.bufLock.Lock() + defer f.bufLock.Unlock() + if f.bufw == nil { + f.bufw = bufio.NewWriterSize(f.currentFile, bufSize) + } else { + f.bufw.Reset(f.currentFile) + } +} diff --git a/vendor/github.com/hashicorp/nomad/client/driver/logging/syslog_parser_unix.go b/vendor/github.com/hashicorp/nomad/client/driver/logging/syslog_parser_unix.go new file mode 100644 index 000000000..4e0fec555 --- /dev/null +++ b/vendor/github.com/hashicorp/nomad/client/driver/logging/syslog_parser_unix.go @@ -0,0 +1,158 @@ +// +build darwin dragonfly freebsd linux netbsd openbsd solaris + +package logging + +import ( + "fmt" + "log" + "log/syslog" + "strconv" +) + +// Errors related to parsing priority +var ( + ErrPriorityNoStart = fmt.Errorf("No start char found for priority") + ErrPriorityEmpty = fmt.Errorf("Priority field empty") + ErrPriorityNoEnd = fmt.Errorf("No end char found for priority") + ErrPriorityTooShort = fmt.Errorf("Priority field too short") + ErrPriorityTooLong = fmt.Errorf("Priority field too long") + ErrPriorityNonDigit = fmt.Errorf("Non digit found in priority") +) + +// Priority header and ending characters +const ( + PRI_PART_START = '<' + PRI_PART_END = '>' +) + +// SyslogMessage represents a log line received +type SyslogMessage struct { + Message []byte + Severity syslog.Priority +} + +// Priority holds all the priority bits in a syslog log line +type Priority struct { + Pri int + Facility syslog.Priority + Severity syslog.Priority +} + +// DockerLogParser parses a line of log message that the docker daemon ships +type DockerLogParser struct { + logger *log.Logger +} + +// NewDockerLogParser creates a new DockerLogParser +func NewDockerLogParser(logger *log.Logger) *DockerLogParser { + return &DockerLogParser{logger: logger} +} + +// Parse parses a syslog log line +func (d *DockerLogParser) Parse(line []byte) *SyslogMessage { + pri, _, _ := d.parsePriority(line) + msgIdx := d.logContentIndex(line) + + // Create a copy of the line so that subsequent Scans do not override the + // message + lineCopy := make([]byte, len(line[msgIdx:])) + copy(lineCopy, line[msgIdx:]) + + return &SyslogMessage{ + Severity: pri.Severity, + Message: lineCopy, + } +} + +// logContentIndex finds out the index of the start index of the content in a +// syslog line +func (d *DockerLogParser) logContentIndex(line []byte) int { + cursor := 0 + numSpace := 0 + numColons := 0 + // first look for at least 2 colons. This matches into the date that has no more spaces in it + // DefaultFormatter log line look: '<30>2016-07-06T15:13:11Z00:00 hostname docker/9648c64f5037[16200]' + // UnixFormatter log line look: '<30>Jul 6 15:13:11 docker/9648c64f5037[16200]' + for i := 0; i < len(line); i++ { + if line[i] == ':' { + numColons += 1 + if numColons == 2 { + cursor = i + break + } + } + } + // then look for the next space + for i := cursor; i < len(line); i++ { + if line[i] == ' ' { + numSpace += 1 + if numSpace == 1 { + cursor = i + break + } + } + } + // then the colon is what seperates it, followed by a space + for i := cursor; i < len(line); i++ { + if line[i] == ':' && i+1 < len(line) && line[i+1] == ' ' { + cursor = i + 1 + break + } + } + // return the cursor to the next character + return cursor + 1 +} + +// parsePriority parses the priority in a syslog message +func (d *DockerLogParser) parsePriority(line []byte) (Priority, int, error) { + cursor := 0 + pri := d.newPriority(0) + if len(line) <= 0 { + return pri, cursor, ErrPriorityEmpty + } + if line[cursor] != PRI_PART_START { + return pri, cursor, ErrPriorityNoStart + } + i := 1 + priDigit := 0 + for i < len(line) { + if i >= 5 { + return pri, cursor, ErrPriorityTooLong + } + c := line[i] + if c == PRI_PART_END { + if i == 1 { + return pri, cursor, ErrPriorityTooShort + } + cursor = i + 1 + return d.newPriority(priDigit), cursor, nil + } + if d.isDigit(c) { + v, e := strconv.Atoi(string(c)) + if e != nil { + return pri, cursor, e + } + priDigit = (priDigit * 10) + v + } else { + return pri, cursor, ErrPriorityNonDigit + } + i++ + } + return pri, cursor, ErrPriorityNoEnd +} + +// isDigit checks if a byte is a numeric char +func (d *DockerLogParser) isDigit(c byte) bool { + return c >= '0' && c <= '9' +} + +// newPriority creates a new default priority +func (d *DockerLogParser) newPriority(p int) Priority { + // The Priority value is calculated by first multiplying the Facility + // number by 8 and then adding the numerical value of the Severity. + return Priority{ + Pri: p, + Facility: syslog.Priority(p / 8), + Severity: syslog.Priority(p % 8), + } +} diff --git a/vendor/github.com/hashicorp/nomad/client/driver/logging/syslog_server_unix.go b/vendor/github.com/hashicorp/nomad/client/driver/logging/syslog_server_unix.go new file mode 100644 index 000000000..4fb450a8f --- /dev/null +++ b/vendor/github.com/hashicorp/nomad/client/driver/logging/syslog_server_unix.go @@ -0,0 +1,86 @@ +// +build !windows + +package logging + +import ( + "bufio" + "log" + "net" + "sync" +) + +// SyslogServer is a server which listens to syslog messages and parses them +type SyslogServer struct { + listener net.Listener + messages chan *SyslogMessage + parser *DockerLogParser + + doneCh chan interface{} + done bool + doneLock sync.Mutex + + logger *log.Logger +} + +// NewSyslogServer creates a new syslog server +func NewSyslogServer(l net.Listener, messages chan *SyslogMessage, logger *log.Logger) *SyslogServer { + parser := NewDockerLogParser(logger) + return &SyslogServer{ + listener: l, + messages: messages, + parser: parser, + logger: logger, + doneCh: make(chan interface{}), + } +} + +// Start starts accepting syslog connections +func (s *SyslogServer) Start() { + for { + select { + case <-s.doneCh: + s.listener.Close() + return + default: + connection, err := s.listener.Accept() + if err != nil { + s.logger.Printf("[ERR] logcollector.server: error in accepting connection: %v", err) + continue + } + go s.read(connection) + } + } +} + +// read reads the bytes from a connection +func (s *SyslogServer) read(connection net.Conn) { + defer connection.Close() + scanner := bufio.NewScanner(bufio.NewReader(connection)) + + for { + select { + case <-s.doneCh: + return + default: + } + if scanner.Scan() { + b := scanner.Bytes() + msg := s.parser.Parse(b) + s.messages <- msg + } else { + return + } + } +} + +// Shutdown shutsdown the syslog server +func (s *SyslogServer) Shutdown() { + s.doneLock.Lock() + s.doneLock.Unlock() + + if !s.done { + close(s.doneCh) + close(s.messages) + s.done = true + } +} diff --git a/vendor/github.com/hashicorp/nomad/client/driver/logging/syslog_server_windows.go b/vendor/github.com/hashicorp/nomad/client/driver/logging/syslog_server_windows.go new file mode 100644 index 000000000..cc6a60840 --- /dev/null +++ b/vendor/github.com/hashicorp/nomad/client/driver/logging/syslog_server_windows.go @@ -0,0 +1,10 @@ +package logging + +type SyslogServer struct { +} + +func (s *SyslogServer) Shutdown() { +} + +type SyslogMessage struct { +} diff --git a/vendor/github.com/hashicorp/nomad/client/driver/logging/universal_collector_unix.go b/vendor/github.com/hashicorp/nomad/client/driver/logging/universal_collector_unix.go new file mode 100644 index 000000000..1db92fce4 --- /dev/null +++ b/vendor/github.com/hashicorp/nomad/client/driver/logging/universal_collector_unix.go @@ -0,0 +1,207 @@ +// +build darwin dragonfly freebsd linux netbsd openbsd solaris + +package logging + +import ( + "fmt" + "io" + "io/ioutil" + "log" + "log/syslog" + "net" + "os" + "runtime" + + "github.com/hashicorp/nomad/client/allocdir" + cstructs "github.com/hashicorp/nomad/client/driver/structs" + "github.com/hashicorp/nomad/nomad/structs" +) + +// LogCollectorContext holds context to configure the syslog server +type LogCollectorContext struct { + // TaskName is the name of the Task + TaskName string + + // AllocDir is the handle to do operations on the alloc dir of + // the task + AllocDir *allocdir.AllocDir + + // LogConfig provides configuration related to log rotation + LogConfig *structs.LogConfig + + // PortUpperBound is the upper bound of the ports that we can use to start + // the syslog server + PortUpperBound uint + + // PortLowerBound is the lower bound of the ports that we can use to start + // the syslog server + PortLowerBound uint +} + +// SyslogCollectorState holds the address and islation information of a launched +// syslog server +type SyslogCollectorState struct { + IsolationConfig *cstructs.IsolationConfig + Addr string +} + +// LogCollector is an interface which allows a driver to launch a log server +// and update log configuration +type LogCollector interface { + LaunchCollector(ctx *LogCollectorContext) (*SyslogCollectorState, error) + Exit() error + UpdateLogConfig(logConfig *structs.LogConfig) error +} + +// SyslogCollector is a LogCollector which starts a syslog server and does +// rotation to incoming stream +type SyslogCollector struct { + addr net.Addr + logConfig *structs.LogConfig + ctx *LogCollectorContext + + lro *FileRotator + lre *FileRotator + server *SyslogServer + syslogChan chan *SyslogMessage + taskDir string + + logger *log.Logger +} + +// NewSyslogCollector returns an implementation of the SyslogCollector +func NewSyslogCollector(logger *log.Logger) *SyslogCollector { + return &SyslogCollector{logger: logger, syslogChan: make(chan *SyslogMessage, 2048)} +} + +// LaunchCollector launches a new syslog server and starts writing log lines to +// files and rotates them +func (s *SyslogCollector) LaunchCollector(ctx *LogCollectorContext) (*SyslogCollectorState, error) { + l, err := s.getListener(ctx.PortLowerBound, ctx.PortUpperBound) + if err != nil { + return nil, err + } + s.logger.Printf("[DEBUG] sylog-server: launching syslog server on addr: %v", l.Addr().String()) + s.ctx = ctx + // configuring the task dir + if err := s.configureTaskDir(); err != nil { + return nil, err + } + + s.server = NewSyslogServer(l, s.syslogChan, s.logger) + go s.server.Start() + logFileSize := int64(ctx.LogConfig.MaxFileSizeMB * 1024 * 1024) + + lro, err := NewFileRotator(ctx.AllocDir.LogDir(), fmt.Sprintf("%v.stdout", ctx.TaskName), + ctx.LogConfig.MaxFiles, logFileSize, s.logger) + + if err != nil { + return nil, err + } + s.lro = lro + + lre, err := NewFileRotator(ctx.AllocDir.LogDir(), fmt.Sprintf("%v.stderr", ctx.TaskName), + ctx.LogConfig.MaxFiles, logFileSize, s.logger) + if err != nil { + return nil, err + } + s.lre = lre + + go s.collectLogs(lre, lro) + syslogAddr := fmt.Sprintf("%s://%s", l.Addr().Network(), l.Addr().String()) + return &SyslogCollectorState{Addr: syslogAddr}, nil +} + +func (s *SyslogCollector) collectLogs(we io.Writer, wo io.Writer) { + for logParts := range s.syslogChan { + // If the severity of the log line is err then we write to stderr + // otherwise all messages go to stdout + if logParts.Severity == syslog.LOG_ERR { + s.lre.Write(logParts.Message) + s.lre.Write([]byte{'\n'}) + } else { + s.lro.Write(logParts.Message) + s.lro.Write([]byte{'\n'}) + } + } +} + +// Exit kills the syslog server +func (s *SyslogCollector) Exit() error { + s.server.Shutdown() + s.lre.Close() + s.lro.Close() + return nil +} + +// UpdateLogConfig updates the log configuration +func (s *SyslogCollector) UpdateLogConfig(logConfig *structs.LogConfig) error { + s.ctx.LogConfig = logConfig + if s.lro == nil { + return fmt.Errorf("log rotator for stdout doesn't exist") + } + s.lro.MaxFiles = logConfig.MaxFiles + s.lro.FileSize = int64(logConfig.MaxFileSizeMB * 1024 * 1024) + + if s.lre == nil { + return fmt.Errorf("log rotator for stderr doesn't exist") + } + s.lre.MaxFiles = logConfig.MaxFiles + s.lre.FileSize = int64(logConfig.MaxFileSizeMB * 1024 * 1024) + return nil +} + +// configureTaskDir sets the task dir in the SyslogCollector +func (s *SyslogCollector) configureTaskDir() error { + taskDir, ok := s.ctx.AllocDir.TaskDirs[s.ctx.TaskName] + if !ok { + return fmt.Errorf("couldn't find task directory for task %v", s.ctx.TaskName) + } + s.taskDir = taskDir + return nil +} + +// getFreePort returns a free port ready to be listened on between upper and +// lower bounds +func (s *SyslogCollector) getListener(lowerBound uint, upperBound uint) (net.Listener, error) { + if runtime.GOOS == "windows" { + return s.listenerTCP(lowerBound, upperBound) + } + + return s.listenerUnix() +} + +// listenerTCP creates a TCP listener using an unused port between an upper and +// lower bound +func (s *SyslogCollector) listenerTCP(lowerBound uint, upperBound uint) (net.Listener, error) { + for i := lowerBound; i <= upperBound; i++ { + addr, err := net.ResolveTCPAddr("tcp", fmt.Sprintf("localhost:%v", i)) + if err != nil { + return nil, err + } + l, err := net.ListenTCP("tcp", addr) + if err != nil { + continue + } + return l, nil + } + return nil, fmt.Errorf("No free port found") +} + +// listenerUnix creates a Unix domain socket +func (s *SyslogCollector) listenerUnix() (net.Listener, error) { + f, err := ioutil.TempFile("", "plugin") + if err != nil { + return nil, err + } + path := f.Name() + + if err := f.Close(); err != nil { + return nil, err + } + if err := os.Remove(path); err != nil { + return nil, err + } + + return net.Listen("unix", path) +} diff --git a/vendor/github.com/hashicorp/nomad/client/driver/plugins.go b/vendor/github.com/hashicorp/nomad/client/driver/plugins.go new file mode 100644 index 000000000..4808d81e1 --- /dev/null +++ b/vendor/github.com/hashicorp/nomad/client/driver/plugins.go @@ -0,0 +1,51 @@ +package driver + +import ( + "io" + "log" + "net" + + "github.com/hashicorp/go-plugin" +) + +var HandshakeConfig = plugin.HandshakeConfig{ + ProtocolVersion: 1, + MagicCookieKey: "NOMAD_PLUGIN_MAGIC_COOKIE", + MagicCookieValue: "e4327c2e01eabfd75a8a67adb114fb34a757d57eee7728d857a8cec6e91a7255", +} + +func GetPluginMap(w io.Writer) map[string]plugin.Plugin { + e := new(ExecutorPlugin) + e.logger = log.New(w, "", log.LstdFlags) + + s := new(SyslogCollectorPlugin) + s.logger = log.New(w, "", log.LstdFlags) + return map[string]plugin.Plugin{ + "executor": e, + "syslogcollector": s, + } +} + +// ExecutorReattachConfig is the config that we seralize and de-serialize and +// store in disk +type PluginReattachConfig struct { + Pid int + AddrNet string + AddrName string +} + +// PluginConfig returns a config from an ExecutorReattachConfig +func (c *PluginReattachConfig) PluginConfig() *plugin.ReattachConfig { + var addr net.Addr + switch c.AddrNet { + case "unix", "unixgram", "unixpacket": + addr, _ = net.ResolveUnixAddr(c.AddrNet, c.AddrName) + case "tcp", "tcp4", "tcp6": + addr, _ = net.ResolveTCPAddr(c.AddrNet, c.AddrName) + } + return &plugin.ReattachConfig{Pid: c.Pid, Addr: addr} +} + +func NewPluginReattachConfig(c *plugin.ReattachConfig) *PluginReattachConfig { + return &PluginReattachConfig{Pid: c.Pid, AddrNet: c.Addr.Network(), AddrName: c.Addr.String()} +} diff --git a/vendor/github.com/hashicorp/nomad/client/driver/qemu.go b/vendor/github.com/hashicorp/nomad/client/driver/qemu.go new file mode 100644 index 000000000..d109f3912 --- /dev/null +++ b/vendor/github.com/hashicorp/nomad/client/driver/qemu.go @@ -0,0 +1,412 @@ +package driver + +import ( + "encoding/json" + "fmt" + "log" + "os/exec" + "path/filepath" + "regexp" + "runtime" + "strings" + "time" + + "github.com/hashicorp/go-plugin" + "github.com/hashicorp/nomad/client/allocdir" + "github.com/hashicorp/nomad/client/config" + "github.com/hashicorp/nomad/client/driver/executor" + dstructs "github.com/hashicorp/nomad/client/driver/structs" + "github.com/hashicorp/nomad/client/fingerprint" + cstructs "github.com/hashicorp/nomad/client/structs" + "github.com/hashicorp/nomad/helper/discover" + "github.com/hashicorp/nomad/helper/fields" + "github.com/hashicorp/nomad/nomad/structs" + "github.com/mitchellh/mapstructure" +) + +var ( + reQemuVersion = regexp.MustCompile(`version (\d[\.\d+]+)`) +) + +const ( + // The key populated in Node Attributes to indicate presence of the Qemu + // driver + qemuDriverAttr = "driver.qemu" +) + +// QemuDriver is a driver for running images via Qemu +// We attempt to chose sane defaults for now, with more configuration available +// planned in the future +type QemuDriver struct { + DriverContext + fingerprint.StaticFingerprinter +} + +type QemuDriverConfig struct { + ImagePath string `mapstructure:"image_path"` + Accelerator string `mapstructure:"accelerator"` + PortMap []map[string]int `mapstructure:"port_map"` // A map of host port labels and to guest ports. + Args []string `mapstructure:"args"` // extra arguments to qemu executable +} + +// qemuHandle is returned from Start/Open as a handle to the PID +type qemuHandle struct { + pluginClient *plugin.Client + userPid int + executor executor.Executor + allocDir *allocdir.AllocDir + killTimeout time.Duration + maxKillTimeout time.Duration + logger *log.Logger + version string + waitCh chan *dstructs.WaitResult + doneCh chan struct{} +} + +// NewQemuDriver is used to create a new exec driver +func NewQemuDriver(ctx *DriverContext) Driver { + return &QemuDriver{DriverContext: *ctx} +} + +// Validate is used to validate the driver configuration +func (d *QemuDriver) Validate(config map[string]interface{}) error { + fd := &fields.FieldData{ + Raw: config, + Schema: map[string]*fields.FieldSchema{ + "image_path": &fields.FieldSchema{ + Type: fields.TypeString, + Required: true, + }, + "accelerator": &fields.FieldSchema{ + Type: fields.TypeString, + }, + "port_map": &fields.FieldSchema{ + Type: fields.TypeArray, + }, + "args": &fields.FieldSchema{ + Type: fields.TypeArray, + }, + }, + } + + if err := fd.Validate(); err != nil { + return err + } + + return nil +} + +func (d *QemuDriver) Fingerprint(cfg *config.Config, node *structs.Node) (bool, error) { + // Get the current status so that we can log any debug messages only if the + // state changes + _, currentlyEnabled := node.Attributes[qemuDriverAttr] + + bin := "qemu-system-x86_64" + if runtime.GOOS == "windows" { + // On windows, the "qemu-system-x86_64" command does not respond to the + // version flag. + bin = "qemu-img" + } + outBytes, err := exec.Command(bin, "--version").Output() + if err != nil { + delete(node.Attributes, qemuDriverAttr) + return false, nil + } + out := strings.TrimSpace(string(outBytes)) + + matches := reQemuVersion.FindStringSubmatch(out) + if len(matches) != 2 { + delete(node.Attributes, qemuDriverAttr) + return false, fmt.Errorf("Unable to parse Qemu version string: %#v", matches) + } + + if !currentlyEnabled { + d.logger.Printf("[DEBUG] driver.qemu: enabling driver") + } + node.Attributes[qemuDriverAttr] = "1" + node.Attributes["driver.qemu.version"] = matches[1] + return true, nil +} + +// Run an existing Qemu image. Start() will pull down an existing, valid Qemu +// image and save it to the Drivers Allocation Dir +func (d *QemuDriver) Start(ctx *ExecContext, task *structs.Task) (DriverHandle, error) { + var driverConfig QemuDriverConfig + if err := mapstructure.WeakDecode(task.Config, &driverConfig); err != nil { + return nil, err + } + + if len(driverConfig.PortMap) > 1 { + return nil, fmt.Errorf("Only one port_map block is allowed in the qemu driver config") + } + + // Get the image source + vmPath := driverConfig.ImagePath + if vmPath == "" { + return nil, fmt.Errorf("image_path must be set") + } + vmID := filepath.Base(vmPath) + + // Get the tasks local directory. + taskDir, ok := ctx.AllocDir.TaskDirs[d.DriverContext.taskName] + if !ok { + return nil, fmt.Errorf("Could not find task directory for task: %v", d.DriverContext.taskName) + } + + // Parse configuration arguments + // Create the base arguments + accelerator := "tcg" + if driverConfig.Accelerator != "" { + accelerator = driverConfig.Accelerator + } + // TODO: Check a lower bounds, e.g. the default 128 of Qemu + mem := fmt.Sprintf("%dM", task.Resources.MemoryMB) + + absPath, err := GetAbsolutePath("qemu-system-x86_64") + if err != nil { + return nil, err + } + + args := []string{ + absPath, + "-machine", "type=pc,accel=" + accelerator, + "-name", vmID, + "-m", mem, + "-drive", "file=" + vmPath, + "-nographic", + } + + // Add pass through arguments to qemu executable. A user can specify + // these arguments in driver task configuration. These arguments are + // passed directly to the qemu driver as command line options. + // For example, args = [ "-nodefconfig", "-nodefaults" ] + // This will allow a VM with embedded configuration to boot successfully. + args = append(args, driverConfig.Args...) + + // Check the Resources required Networks to add port mappings. If no resources + // are required, we assume the VM is a purely compute job and does not require + // the outside world to be able to reach it. VMs ran without port mappings can + // still reach out to the world, but without port mappings it is effectively + // firewalled + protocols := []string{"udp", "tcp"} + if len(task.Resources.Networks) > 0 && len(driverConfig.PortMap) == 1 { + // Loop through the port map and construct the hostfwd string, to map + // reserved ports to the ports listenting in the VM + // Ex: hostfwd=tcp::22000-:22,hostfwd=tcp::80-:8080 + var forwarding []string + taskPorts := task.Resources.Networks[0].MapLabelToValues(nil) + for label, guest := range driverConfig.PortMap[0] { + host, ok := taskPorts[label] + if !ok { + return nil, fmt.Errorf("Unknown port label %q", label) + } + + for _, p := range protocols { + forwarding = append(forwarding, fmt.Sprintf("hostfwd=%s::%d-:%d", p, host, guest)) + } + } + + if len(forwarding) != 0 { + args = append(args, + "-netdev", + fmt.Sprintf("user,id=user.0,%s", strings.Join(forwarding, ",")), + "-device", "virtio-net,netdev=user.0", + ) + } + } + + // If using KVM, add optimization args + if accelerator == "kvm" { + args = append(args, + "-enable-kvm", + "-cpu", "host", + // Do we have cores information available to the Driver? + // "-smp", fmt.Sprintf("%d", cores), + ) + } + + d.logger.Printf("[DEBUG] Starting QemuVM command: %q", strings.Join(args, " ")) + bin, err := discover.NomadExecutable() + if err != nil { + return nil, fmt.Errorf("unable to find the nomad binary: %v", err) + } + + pluginLogFile := filepath.Join(taskDir, fmt.Sprintf("%s-executor.out", task.Name)) + pluginConfig := &plugin.ClientConfig{ + Cmd: exec.Command(bin, "executor", pluginLogFile), + } + + exec, pluginClient, err := createExecutor(pluginConfig, d.config.LogOutput, d.config) + if err != nil { + return nil, err + } + executorCtx := &executor.ExecutorContext{ + TaskEnv: d.taskEnv, + Driver: "qemu", + AllocDir: ctx.AllocDir, + AllocID: ctx.AllocID, + Task: task, + } + ps, err := exec.LaunchCmd(&executor.ExecCommand{ + Cmd: args[0], + Args: args[1:], + User: task.User, + }, executorCtx) + if err != nil { + pluginClient.Kill() + return nil, err + } + d.logger.Printf("[INFO] Started new QemuVM: %s", vmID) + + // Create and Return Handle + maxKill := d.DriverContext.config.MaxKillTimeout + h := &qemuHandle{ + pluginClient: pluginClient, + executor: exec, + userPid: ps.Pid, + allocDir: ctx.AllocDir, + killTimeout: GetKillTimeout(task.KillTimeout, maxKill), + maxKillTimeout: maxKill, + version: d.config.Version, + logger: d.logger, + doneCh: make(chan struct{}), + waitCh: make(chan *dstructs.WaitResult, 1), + } + + if err := h.executor.SyncServices(consulContext(d.config, "")); err != nil { + h.logger.Printf("[ERR] driver.qemu: error registering services for task: %q: %v", task.Name, err) + } + go h.run() + return h, nil +} + +type qemuId struct { + Version string + KillTimeout time.Duration + MaxKillTimeout time.Duration + UserPid int + PluginConfig *PluginReattachConfig + AllocDir *allocdir.AllocDir +} + +func (d *QemuDriver) Open(ctx *ExecContext, handleID string) (DriverHandle, error) { + id := &qemuId{} + if err := json.Unmarshal([]byte(handleID), id); err != nil { + return nil, fmt.Errorf("Failed to parse handle '%s': %v", handleID, err) + } + + pluginConfig := &plugin.ClientConfig{ + Reattach: id.PluginConfig.PluginConfig(), + } + + exec, pluginClient, err := createExecutor(pluginConfig, d.config.LogOutput, d.config) + if err != nil { + d.logger.Println("[ERR] driver.qemu: error connecting to plugin so destroying plugin pid and user pid") + if e := destroyPlugin(id.PluginConfig.Pid, id.UserPid); e != nil { + d.logger.Printf("[ERR] driver.qemu: error destroying plugin and userpid: %v", e) + } + return nil, fmt.Errorf("error connecting to plugin: %v", err) + } + + ver, _ := exec.Version() + d.logger.Printf("[DEBUG] driver.qemu: version of executor: %v", ver.Version) + // Return a driver handle + h := &qemuHandle{ + pluginClient: pluginClient, + executor: exec, + userPid: id.UserPid, + allocDir: id.AllocDir, + logger: d.logger, + killTimeout: id.KillTimeout, + maxKillTimeout: id.MaxKillTimeout, + version: id.Version, + doneCh: make(chan struct{}), + waitCh: make(chan *dstructs.WaitResult, 1), + } + if err := h.executor.SyncServices(consulContext(d.config, "")); err != nil { + h.logger.Printf("[ERR] driver.qemu: error registering services: %v", err) + } + go h.run() + return h, nil +} + +func (h *qemuHandle) ID() string { + id := qemuId{ + Version: h.version, + KillTimeout: h.killTimeout, + MaxKillTimeout: h.maxKillTimeout, + PluginConfig: NewPluginReattachConfig(h.pluginClient.ReattachConfig()), + UserPid: h.userPid, + AllocDir: h.allocDir, + } + + data, err := json.Marshal(id) + if err != nil { + h.logger.Printf("[ERR] driver.qemu: failed to marshal ID to JSON: %s", err) + } + return string(data) +} + +func (h *qemuHandle) WaitCh() chan *dstructs.WaitResult { + return h.waitCh +} + +func (h *qemuHandle) Update(task *structs.Task) error { + // Store the updated kill timeout. + h.killTimeout = GetKillTimeout(task.KillTimeout, h.maxKillTimeout) + h.executor.UpdateTask(task) + + // Update is not possible + return nil +} + +// TODO: allow a 'shutdown_command' that can be executed over a ssh connection +// to the VM +func (h *qemuHandle) Kill() error { + if err := h.executor.ShutDown(); err != nil { + if h.pluginClient.Exited() { + return nil + } + return fmt.Errorf("executor Shutdown failed: %v", err) + } + + select { + case <-h.doneCh: + return nil + case <-time.After(h.killTimeout): + if h.pluginClient.Exited() { + return nil + } + if err := h.executor.Exit(); err != nil { + return fmt.Errorf("executor Exit failed: %v", err) + } + + return nil + } +} + +func (h *qemuHandle) Stats() (*cstructs.TaskResourceUsage, error) { + return h.executor.Stats() +} + +func (h *qemuHandle) run() { + ps, err := h.executor.Wait() + if ps.ExitCode == 0 && err != nil { + if e := killProcess(h.userPid); e != nil { + h.logger.Printf("[ERR] driver.qemu: error killing user process: %v", e) + } + if e := h.allocDir.UnmountAll(); e != nil { + h.logger.Printf("[ERR] driver.qemu: unmounting dev,proc and alloc dirs failed: %v", e) + } + } + close(h.doneCh) + h.waitCh <- &dstructs.WaitResult{ExitCode: ps.ExitCode, Signal: ps.Signal, Err: err} + close(h.waitCh) + // Remove services + if err := h.executor.DeregisterServices(); err != nil { + h.logger.Printf("[ERR] driver.qemu: failed to deregister services: %v", err) + } + + h.executor.Exit() + h.pluginClient.Kill() +} diff --git a/vendor/github.com/hashicorp/nomad/client/driver/raw_exec.go b/vendor/github.com/hashicorp/nomad/client/driver/raw_exec.go new file mode 100644 index 000000000..684632b06 --- /dev/null +++ b/vendor/github.com/hashicorp/nomad/client/driver/raw_exec.go @@ -0,0 +1,307 @@ +package driver + +import ( + "encoding/json" + "fmt" + "log" + "os/exec" + "path/filepath" + "strings" + "time" + + "github.com/hashicorp/go-plugin" + "github.com/hashicorp/nomad/client/allocdir" + "github.com/hashicorp/nomad/client/config" + "github.com/hashicorp/nomad/client/driver/executor" + dstructs "github.com/hashicorp/nomad/client/driver/structs" + "github.com/hashicorp/nomad/client/fingerprint" + cstructs "github.com/hashicorp/nomad/client/structs" + "github.com/hashicorp/nomad/helper/discover" + "github.com/hashicorp/nomad/helper/fields" + "github.com/hashicorp/nomad/nomad/structs" + "github.com/mitchellh/mapstructure" +) + +const ( + // The option that enables this driver in the Config.Options map. + rawExecConfigOption = "driver.raw_exec.enable" + + // The key populated in Node Attributes to indicate presence of the Raw Exec + // driver + rawExecDriverAttr = "driver.raw_exec" +) + +// The RawExecDriver is a privileged version of the exec driver. It provides no +// resource isolation and just fork/execs. The Exec driver should be preferred +// and this should only be used when explicitly needed. +type RawExecDriver struct { + DriverContext + fingerprint.StaticFingerprinter +} + +// rawExecHandle is returned from Start/Open as a handle to the PID +type rawExecHandle struct { + version string + pluginClient *plugin.Client + userPid int + executor executor.Executor + killTimeout time.Duration + maxKillTimeout time.Duration + allocDir *allocdir.AllocDir + logger *log.Logger + waitCh chan *dstructs.WaitResult + doneCh chan struct{} +} + +// NewRawExecDriver is used to create a new raw exec driver +func NewRawExecDriver(ctx *DriverContext) Driver { + return &RawExecDriver{DriverContext: *ctx} +} + +// Validate is used to validate the driver configuration +func (d *RawExecDriver) Validate(config map[string]interface{}) error { + fd := &fields.FieldData{ + Raw: config, + Schema: map[string]*fields.FieldSchema{ + "command": &fields.FieldSchema{ + Type: fields.TypeString, + Required: true, + }, + "args": &fields.FieldSchema{ + Type: fields.TypeArray, + }, + }, + } + + if err := fd.Validate(); err != nil { + return err + } + + return nil +} + +func (d *RawExecDriver) Fingerprint(cfg *config.Config, node *structs.Node) (bool, error) { + // Get the current status so that we can log any debug messages only if the + // state changes + _, currentlyEnabled := node.Attributes[rawExecDriverAttr] + + // Check that the user has explicitly enabled this executor. + enabled := cfg.ReadBoolDefault(rawExecConfigOption, false) + + if enabled { + if currentlyEnabled { + d.logger.Printf("[WARN] driver.raw_exec: raw exec is enabled. Only enable if needed") + } + node.Attributes[rawExecDriverAttr] = "1" + return true, nil + } + + delete(node.Attributes, rawExecDriverAttr) + return false, nil +} + +func (d *RawExecDriver) Start(ctx *ExecContext, task *structs.Task) (DriverHandle, error) { + var driverConfig ExecDriverConfig + if err := mapstructure.WeakDecode(task.Config, &driverConfig); err != nil { + return nil, err + } + // Get the tasks local directory. + taskName := d.DriverContext.taskName + taskDir, ok := ctx.AllocDir.TaskDirs[taskName] + if !ok { + return nil, fmt.Errorf("Could not find task directory for task: %v", d.DriverContext.taskName) + } + + // Get the command to be ran + command := driverConfig.Command + if err := validateCommand(command, "args"); err != nil { + return nil, err + } + + // Set the host environment variables. + filter := strings.Split(d.config.ReadDefault("env.blacklist", config.DefaultEnvBlacklist), ",") + d.taskEnv.AppendHostEnvvars(filter) + + bin, err := discover.NomadExecutable() + if err != nil { + return nil, fmt.Errorf("unable to find the nomad binary: %v", err) + } + pluginLogFile := filepath.Join(taskDir, fmt.Sprintf("%s-executor.out", task.Name)) + pluginConfig := &plugin.ClientConfig{ + Cmd: exec.Command(bin, "executor", pluginLogFile), + } + + exec, pluginClient, err := createExecutor(pluginConfig, d.config.LogOutput, d.config) + if err != nil { + return nil, err + } + executorCtx := &executor.ExecutorContext{ + TaskEnv: d.taskEnv, + Driver: "raw_exec", + AllocDir: ctx.AllocDir, + AllocID: ctx.AllocID, + Task: task, + } + + ps, err := exec.LaunchCmd(&executor.ExecCommand{ + Cmd: command, + Args: driverConfig.Args, + User: task.User, + }, executorCtx) + if err != nil { + pluginClient.Kill() + return nil, err + } + d.logger.Printf("[DEBUG] driver.raw_exec: started process with pid: %v", ps.Pid) + + // Return a driver handle + maxKill := d.DriverContext.config.MaxKillTimeout + h := &rawExecHandle{ + pluginClient: pluginClient, + executor: exec, + userPid: ps.Pid, + killTimeout: GetKillTimeout(task.KillTimeout, maxKill), + maxKillTimeout: maxKill, + allocDir: ctx.AllocDir, + version: d.config.Version, + logger: d.logger, + doneCh: make(chan struct{}), + waitCh: make(chan *dstructs.WaitResult, 1), + } + if err := h.executor.SyncServices(consulContext(d.config, "")); err != nil { + h.logger.Printf("[ERR] driver.raw_exec: error registering services with consul for task: %q: %v", task.Name, err) + } + go h.run() + return h, nil +} + +type rawExecId struct { + Version string + KillTimeout time.Duration + MaxKillTimeout time.Duration + UserPid int + PluginConfig *PluginReattachConfig + AllocDir *allocdir.AllocDir +} + +func (d *RawExecDriver) Open(ctx *ExecContext, handleID string) (DriverHandle, error) { + id := &rawExecId{} + if err := json.Unmarshal([]byte(handleID), id); err != nil { + return nil, fmt.Errorf("Failed to parse handle '%s': %v", handleID, err) + } + + pluginConfig := &plugin.ClientConfig{ + Reattach: id.PluginConfig.PluginConfig(), + } + exec, pluginClient, err := createExecutor(pluginConfig, d.config.LogOutput, d.config) + if err != nil { + d.logger.Println("[ERR] driver.raw_exec: error connecting to plugin so destroying plugin pid and user pid") + if e := destroyPlugin(id.PluginConfig.Pid, id.UserPid); e != nil { + d.logger.Printf("[ERR] driver.raw_exec: error destroying plugin and userpid: %v", e) + } + return nil, fmt.Errorf("error connecting to plugin: %v", err) + } + + ver, _ := exec.Version() + d.logger.Printf("[DEBUG] driver.raw_exec: version of executor: %v", ver.Version) + + // Return a driver handle + h := &rawExecHandle{ + pluginClient: pluginClient, + executor: exec, + userPid: id.UserPid, + logger: d.logger, + killTimeout: id.KillTimeout, + maxKillTimeout: id.MaxKillTimeout, + allocDir: id.AllocDir, + version: id.Version, + doneCh: make(chan struct{}), + waitCh: make(chan *dstructs.WaitResult, 1), + } + if err := h.executor.SyncServices(consulContext(d.config, "")); err != nil { + h.logger.Printf("[ERR] driver.raw_exec: error registering services with consul: %v", err) + } + go h.run() + return h, nil +} + +func (h *rawExecHandle) ID() string { + id := rawExecId{ + Version: h.version, + KillTimeout: h.killTimeout, + MaxKillTimeout: h.maxKillTimeout, + PluginConfig: NewPluginReattachConfig(h.pluginClient.ReattachConfig()), + UserPid: h.userPid, + AllocDir: h.allocDir, + } + + data, err := json.Marshal(id) + if err != nil { + h.logger.Printf("[ERR] driver.raw_exec: failed to marshal ID to JSON: %s", err) + } + return string(data) +} + +func (h *rawExecHandle) WaitCh() chan *dstructs.WaitResult { + return h.waitCh +} + +func (h *rawExecHandle) Update(task *structs.Task) error { + // Store the updated kill timeout. + h.killTimeout = GetKillTimeout(task.KillTimeout, h.maxKillTimeout) + h.executor.UpdateTask(task) + + // Update is not possible + return nil +} + +func (h *rawExecHandle) Kill() error { + if err := h.executor.ShutDown(); err != nil { + if h.pluginClient.Exited() { + return nil + } + return fmt.Errorf("executor Shutdown failed: %v", err) + } + + select { + case <-h.doneCh: + return nil + case <-time.After(h.killTimeout): + if h.pluginClient.Exited() { + return nil + } + if err := h.executor.Exit(); err != nil { + return fmt.Errorf("executor Exit failed: %v", err) + } + + return nil + } +} + +func (h *rawExecHandle) Stats() (*cstructs.TaskResourceUsage, error) { + return h.executor.Stats() +} + +func (h *rawExecHandle) run() { + ps, err := h.executor.Wait() + close(h.doneCh) + if ps.ExitCode == 0 && err != nil { + if e := killProcess(h.userPid); e != nil { + h.logger.Printf("[ERR] driver.raw_exec: error killing user process: %v", e) + } + if e := h.allocDir.UnmountAll(); e != nil { + h.logger.Printf("[ERR] driver.raw_exec: unmounting dev,proc and alloc dirs failed: %v", e) + } + } + h.waitCh <- &dstructs.WaitResult{ExitCode: ps.ExitCode, Signal: ps.Signal, Err: err} + close(h.waitCh) + // Remove services + if err := h.executor.DeregisterServices(); err != nil { + h.logger.Printf("[ERR] driver.raw_exec: failed to deregister services: %v", err) + } + + if err := h.executor.Exit(); err != nil { + h.logger.Printf("[ERR] driver.raw_exec: error killing executor: %v", err) + } + h.pluginClient.Kill() +} diff --git a/vendor/github.com/hashicorp/nomad/client/driver/rkt.go b/vendor/github.com/hashicorp/nomad/client/driver/rkt.go new file mode 100644 index 000000000..875372394 --- /dev/null +++ b/vendor/github.com/hashicorp/nomad/client/driver/rkt.go @@ -0,0 +1,436 @@ +package driver + +import ( + "bytes" + "encoding/json" + "fmt" + "log" + "net" + "os/exec" + "path/filepath" + "regexp" + "runtime" + "strings" + "syscall" + "time" + + "github.com/hashicorp/go-plugin" + "github.com/hashicorp/go-version" + "github.com/hashicorp/nomad/client/allocdir" + "github.com/hashicorp/nomad/client/config" + "github.com/hashicorp/nomad/client/driver/executor" + dstructs "github.com/hashicorp/nomad/client/driver/structs" + "github.com/hashicorp/nomad/client/fingerprint" + cstructs "github.com/hashicorp/nomad/client/structs" + "github.com/hashicorp/nomad/helper/discover" + "github.com/hashicorp/nomad/helper/fields" + "github.com/hashicorp/nomad/nomad/structs" + "github.com/mitchellh/mapstructure" +) + +var ( + reRktVersion = regexp.MustCompile(`rkt [vV]ersion[:]? (\d[.\d]+)`) + reAppcVersion = regexp.MustCompile(`appc [vV]ersion[:]? (\d[.\d]+)`) +) + +const ( + // minRktVersion is the earliest supported version of rkt. rkt added support + // for CPU and memory isolators in 0.14.0. We cannot support an earlier + // version to maintain an uniform interface across all drivers + minRktVersion = "0.14.0" + + // The key populated in the Node Attributes to indicate the presence of the + // Rkt driver + rktDriverAttr = "driver.rkt" +) + +// RktDriver is a driver for running images via Rkt +// We attempt to chose sane defaults for now, with more configuration available +// planned in the future +type RktDriver struct { + DriverContext + fingerprint.StaticFingerprinter +} + +type RktDriverConfig struct { + ImageName string `mapstructure:"image"` + Command string `mapstructure:"command"` + Args []string `mapstructure:"args"` + TrustPrefix string `mapstructure:"trust_prefix"` + DNSServers []string `mapstructure:"dns_servers"` // DNS Server for containers + DNSSearchDomains []string `mapstructure:"dns_search_domains"` // DNS Search domains for containers + Debug bool `mapstructure:"debug"` // Enable debug option for rkt command +} + +// rktHandle is returned from Start/Open as a handle to the PID +type rktHandle struct { + pluginClient *plugin.Client + executorPid int + executor executor.Executor + allocDir *allocdir.AllocDir + logger *log.Logger + killTimeout time.Duration + maxKillTimeout time.Duration + waitCh chan *dstructs.WaitResult + doneCh chan struct{} +} + +// rktPID is a struct to map the pid running the process to the vm image on +// disk +type rktPID struct { + PluginConfig *PluginReattachConfig + AllocDir *allocdir.AllocDir + ExecutorPid int + KillTimeout time.Duration + MaxKillTimeout time.Duration +} + +// NewRktDriver is used to create a new exec driver +func NewRktDriver(ctx *DriverContext) Driver { + return &RktDriver{DriverContext: *ctx} +} + +// Validate is used to validate the driver configuration +func (d *RktDriver) Validate(config map[string]interface{}) error { + fd := &fields.FieldData{ + Raw: config, + Schema: map[string]*fields.FieldSchema{ + "image": &fields.FieldSchema{ + Type: fields.TypeString, + Required: true, + }, + "command": &fields.FieldSchema{ + Type: fields.TypeString, + }, + "args": &fields.FieldSchema{ + Type: fields.TypeArray, + }, + "trust_prefix": &fields.FieldSchema{ + Type: fields.TypeString, + }, + "dns_servers": &fields.FieldSchema{ + Type: fields.TypeArray, + }, + "dns_search_domains": &fields.FieldSchema{ + Type: fields.TypeArray, + }, + "debug": &fields.FieldSchema{ + Type: fields.TypeBool, + }, + }, + } + + if err := fd.Validate(); err != nil { + return err + } + + return nil +} + +func (d *RktDriver) Fingerprint(cfg *config.Config, node *structs.Node) (bool, error) { + // Get the current status so that we can log any debug messages only if the + // state changes + _, currentlyEnabled := node.Attributes[rktDriverAttr] + + // Only enable if we are root when running on non-windows systems. + if runtime.GOOS != "windows" && syscall.Geteuid() != 0 { + if currentlyEnabled { + d.logger.Printf("[DEBUG] driver.rkt: must run as root user, disabling") + } + delete(node.Attributes, rktDriverAttr) + return false, nil + } + + outBytes, err := exec.Command("rkt", "version").Output() + if err != nil { + delete(node.Attributes, rktDriverAttr) + return false, nil + } + out := strings.TrimSpace(string(outBytes)) + + rktMatches := reRktVersion.FindStringSubmatch(out) + appcMatches := reAppcVersion.FindStringSubmatch(out) + if len(rktMatches) != 2 || len(appcMatches) != 2 { + delete(node.Attributes, rktDriverAttr) + return false, fmt.Errorf("Unable to parse Rkt version string: %#v", rktMatches) + } + + node.Attributes[rktDriverAttr] = "1" + node.Attributes["driver.rkt.version"] = rktMatches[1] + node.Attributes["driver.rkt.appc.version"] = appcMatches[1] + + minVersion, _ := version.NewVersion(minRktVersion) + currentVersion, _ := version.NewVersion(node.Attributes["driver.rkt.version"]) + if currentVersion.LessThan(minVersion) { + // Do not allow rkt < 0.14.0 + d.logger.Printf("[WARN] driver.rkt: please upgrade rkt to a version >= %s", minVersion) + node.Attributes[rktDriverAttr] = "0" + } + return true, nil +} + +// Run an existing Rkt image. +func (d *RktDriver) Start(ctx *ExecContext, task *structs.Task) (DriverHandle, error) { + var driverConfig RktDriverConfig + if err := mapstructure.WeakDecode(task.Config, &driverConfig); err != nil { + return nil, err + } + + // ACI image + img := driverConfig.ImageName + + // Get the tasks local directory. + taskName := d.DriverContext.taskName + taskDir, ok := ctx.AllocDir.TaskDirs[taskName] + if !ok { + return nil, fmt.Errorf("Could not find task directory for task: %v", d.DriverContext.taskName) + } + + // Build the command. + var cmdArgs []string + + // Add debug option to rkt command. + debug := driverConfig.Debug + + // Add the given trust prefix + trustPrefix := driverConfig.TrustPrefix + insecure := false + if trustPrefix != "" { + var outBuf, errBuf bytes.Buffer + cmd := exec.Command("rkt", "trust", "--skip-fingerprint-review=true", fmt.Sprintf("--prefix=%s", trustPrefix), fmt.Sprintf("--debug=%t", debug)) + cmd.Stdout = &outBuf + cmd.Stderr = &errBuf + if err := cmd.Run(); err != nil { + return nil, fmt.Errorf("Error running rkt trust: %s\n\nOutput: %s\n\nError: %s", + err, outBuf.String(), errBuf.String()) + } + d.logger.Printf("[DEBUG] driver.rkt: added trust prefix: %q", trustPrefix) + } else { + // Disble signature verification if the trust command was not run. + insecure = true + } + cmdArgs = append(cmdArgs, "run") + cmdArgs = append(cmdArgs, fmt.Sprintf("--volume=%s,kind=host,source=%s", task.Name, ctx.AllocDir.SharedDir)) + cmdArgs = append(cmdArgs, fmt.Sprintf("--mount=volume=%s,target=%s", task.Name, ctx.AllocDir.SharedDir)) + cmdArgs = append(cmdArgs, img) + if insecure == true { + cmdArgs = append(cmdArgs, "--insecure-options=all") + } + cmdArgs = append(cmdArgs, fmt.Sprintf("--debug=%t", debug)) + + // Inject environment variables + for k, v := range d.taskEnv.EnvMap() { + cmdArgs = append(cmdArgs, fmt.Sprintf("--set-env=%v=%v", k, v)) + } + + // Check if the user has overridden the exec command. + if driverConfig.Command != "" { + cmdArgs = append(cmdArgs, fmt.Sprintf("--exec=%v", driverConfig.Command)) + } + + // Add memory isolator + cmdArgs = append(cmdArgs, fmt.Sprintf("--memory=%vM", int64(task.Resources.MemoryMB))) + + // Add CPU isolator + cmdArgs = append(cmdArgs, fmt.Sprintf("--cpu=%vm", int64(task.Resources.CPU))) + + // Add DNS servers + for _, ip := range driverConfig.DNSServers { + if err := net.ParseIP(ip); err == nil { + msg := fmt.Errorf("invalid ip address for container dns server %q", ip) + d.logger.Printf("[DEBUG] driver.rkt: %v", msg) + return nil, msg + } else { + cmdArgs = append(cmdArgs, fmt.Sprintf("--dns=%s", ip)) + } + } + + // set DNS search domains + for _, domain := range driverConfig.DNSSearchDomains { + cmdArgs = append(cmdArgs, fmt.Sprintf("--dns-search=%s", domain)) + } + + // Add user passed arguments. + if len(driverConfig.Args) != 0 { + parsed := d.taskEnv.ParseAndReplace(driverConfig.Args) + + // Need to start arguments with "--" + if len(parsed) > 0 { + cmdArgs = append(cmdArgs, "--") + } + + for _, arg := range parsed { + cmdArgs = append(cmdArgs, fmt.Sprintf("%v", arg)) + } + } + + // Set the host environment variables. + filter := strings.Split(d.config.ReadDefault("env.blacklist", config.DefaultEnvBlacklist), ",") + d.taskEnv.AppendHostEnvvars(filter) + + bin, err := discover.NomadExecutable() + if err != nil { + return nil, fmt.Errorf("unable to find the nomad binary: %v", err) + } + + pluginLogFile := filepath.Join(taskDir, fmt.Sprintf("%s-executor.out", task.Name)) + pluginConfig := &plugin.ClientConfig{ + Cmd: exec.Command(bin, "executor", pluginLogFile), + } + + execIntf, pluginClient, err := createExecutor(pluginConfig, d.config.LogOutput, d.config) + if err != nil { + return nil, err + } + executorCtx := &executor.ExecutorContext{ + TaskEnv: d.taskEnv, + Driver: "rkt", + AllocDir: ctx.AllocDir, + AllocID: ctx.AllocID, + Task: task, + } + + absPath, err := GetAbsolutePath("rkt") + if err != nil { + return nil, err + } + + ps, err := execIntf.LaunchCmd(&executor.ExecCommand{ + Cmd: absPath, + Args: cmdArgs, + User: task.User, + }, executorCtx) + if err != nil { + pluginClient.Kill() + return nil, err + } + + d.logger.Printf("[DEBUG] driver.rkt: started ACI %q with: %v", img, cmdArgs) + maxKill := d.DriverContext.config.MaxKillTimeout + h := &rktHandle{ + pluginClient: pluginClient, + executor: execIntf, + executorPid: ps.Pid, + allocDir: ctx.AllocDir, + logger: d.logger, + killTimeout: GetKillTimeout(task.KillTimeout, maxKill), + maxKillTimeout: maxKill, + doneCh: make(chan struct{}), + waitCh: make(chan *dstructs.WaitResult, 1), + } + if err := h.executor.SyncServices(consulContext(d.config, "")); err != nil { + h.logger.Printf("[ERR] driver.rkt: error registering services for task: %q: %v", task.Name, err) + } + go h.run() + return h, nil +} + +func (d *RktDriver) Open(ctx *ExecContext, handleID string) (DriverHandle, error) { + // Parse the handle + pidBytes := []byte(strings.TrimPrefix(handleID, "Rkt:")) + id := &rktPID{} + if err := json.Unmarshal(pidBytes, id); err != nil { + return nil, fmt.Errorf("failed to parse Rkt handle '%s': %v", handleID, err) + } + + pluginConfig := &plugin.ClientConfig{ + Reattach: id.PluginConfig.PluginConfig(), + } + exec, pluginClient, err := createExecutor(pluginConfig, d.config.LogOutput, d.config) + if err != nil { + d.logger.Println("[ERROR] driver.rkt: error connecting to plugin so destroying plugin pid and user pid") + if e := destroyPlugin(id.PluginConfig.Pid, id.ExecutorPid); e != nil { + d.logger.Printf("[ERROR] driver.rkt: error destroying plugin and executor pid: %v", e) + } + return nil, fmt.Errorf("error connecting to plugin: %v", err) + } + + ver, _ := exec.Version() + d.logger.Printf("[DEBUG] driver.rkt: version of executor: %v", ver.Version) + // Return a driver handle + h := &rktHandle{ + pluginClient: pluginClient, + executorPid: id.ExecutorPid, + allocDir: id.AllocDir, + executor: exec, + logger: d.logger, + killTimeout: id.KillTimeout, + maxKillTimeout: id.MaxKillTimeout, + doneCh: make(chan struct{}), + waitCh: make(chan *dstructs.WaitResult, 1), + } + if err := h.executor.SyncServices(consulContext(d.config, "")); err != nil { + h.logger.Printf("[ERR] driver.rkt: error registering services: %v", err) + } + go h.run() + return h, nil +} + +func (h *rktHandle) ID() string { + // Return a handle to the PID + pid := &rktPID{ + PluginConfig: NewPluginReattachConfig(h.pluginClient.ReattachConfig()), + KillTimeout: h.killTimeout, + MaxKillTimeout: h.maxKillTimeout, + ExecutorPid: h.executorPid, + AllocDir: h.allocDir, + } + data, err := json.Marshal(pid) + if err != nil { + h.logger.Printf("[ERR] driver.rkt: failed to marshal rkt PID to JSON: %s", err) + } + return fmt.Sprintf("Rkt:%s", string(data)) +} + +func (h *rktHandle) WaitCh() chan *dstructs.WaitResult { + return h.waitCh +} + +func (h *rktHandle) Update(task *structs.Task) error { + // Store the updated kill timeout. + h.killTimeout = GetKillTimeout(task.KillTimeout, h.maxKillTimeout) + h.executor.UpdateTask(task) + + // Update is not possible + return nil +} + +// Kill is used to terminate the task. We send an Interrupt +// and then provide a 5 second grace period before doing a Kill. +func (h *rktHandle) Kill() error { + h.executor.ShutDown() + select { + case <-h.doneCh: + return nil + case <-time.After(h.killTimeout): + return h.executor.Exit() + } +} + +func (h *rktHandle) Stats() (*cstructs.TaskResourceUsage, error) { + return nil, fmt.Errorf("stats not implemented for rkt") +} + +func (h *rktHandle) run() { + ps, err := h.executor.Wait() + close(h.doneCh) + if ps.ExitCode == 0 && err != nil { + if e := killProcess(h.executorPid); e != nil { + h.logger.Printf("[ERROR] driver.rkt: error killing user process: %v", e) + } + if e := h.allocDir.UnmountAll(); e != nil { + h.logger.Printf("[ERROR] driver.rkt: unmounting dev,proc and alloc dirs failed: %v", e) + } + } + h.waitCh <- dstructs.NewWaitResult(ps.ExitCode, 0, err) + close(h.waitCh) + // Remove services + if err := h.executor.DeregisterServices(); err != nil { + h.logger.Printf("[ERR] driver.rkt: failed to deregister services: %v", err) + } + + if err := h.executor.Exit(); err != nil { + h.logger.Printf("[ERR] driver.rkt: error killing executor: %v", err) + } + h.pluginClient.Kill() +} diff --git a/vendor/github.com/hashicorp/nomad/client/driver/structs/structs.go b/vendor/github.com/hashicorp/nomad/client/driver/structs/structs.go new file mode 100644 index 000000000..7714d5ac5 --- /dev/null +++ b/vendor/github.com/hashicorp/nomad/client/driver/structs/structs.go @@ -0,0 +1,77 @@ +package structs + +import ( + "fmt" + "time" +) + +const ( + // The default user that the executor uses to run tasks + DefaultUnpriviledgedUser = "nobody" + + // CheckBufSize is the size of the check output result + CheckBufSize = 4 * 1024 +) + +// WaitResult stores the result of a Wait operation. +type WaitResult struct { + ExitCode int + Signal int + Err error +} + +func NewWaitResult(code, signal int, err error) *WaitResult { + return &WaitResult{ + ExitCode: code, + Signal: signal, + Err: err, + } +} + +func (r *WaitResult) Successful() bool { + return r.ExitCode == 0 && r.Signal == 0 && r.Err == nil +} + +func (r *WaitResult) String() string { + return fmt.Sprintf("Wait returned exit code %v, signal %v, and error %v", + r.ExitCode, r.Signal, r.Err) +} + +// RecoverableError wraps an error and marks whether it is recoverable and could +// be retried or it is fatal. +type RecoverableError struct { + Err error + Recoverable bool +} + +// NewRecoverableError is used to wrap an error and mark it as recoverable or +// not. +func NewRecoverableError(e error, recoverable bool) *RecoverableError { + return &RecoverableError{ + Err: e, + Recoverable: recoverable, + } +} + +func (r *RecoverableError) Error() string { + return r.Err.Error() +} + +// CheckResult encapsulates the result of a check +type CheckResult struct { + + // ExitCode is the exit code of the check + ExitCode int + + // Output is the output of the check script + Output string + + // Timestamp is the time at which the check was executed + Timestamp time.Time + + // Duration is the time it took the check to run + Duration time.Duration + + // Err is the error that a check returned + Err error +} diff --git a/vendor/github.com/hashicorp/nomad/client/driver/structs/structs_default.go b/vendor/github.com/hashicorp/nomad/client/driver/structs/structs_default.go new file mode 100644 index 000000000..c14afe407 --- /dev/null +++ b/vendor/github.com/hashicorp/nomad/client/driver/structs/structs_default.go @@ -0,0 +1,12 @@ +// +build darwin dragonfly freebsd netbsd openbsd solaris windows + +package structs + +// IsolationConfig has information about the isolation mechanism the executor +// uses to put resource constraints and isolation on the user process. The +// default implementation is empty. Platforms that support resource isolation +// (e.g. Linux's Cgroups) should build their own platform-specific copy. This +// information is transmitted via RPC so it is not permissable to change the +// API. +type IsolationConfig struct { +} diff --git a/vendor/github.com/hashicorp/nomad/client/driver/structs/structs_linux.go b/vendor/github.com/hashicorp/nomad/client/driver/structs/structs_linux.go new file mode 100644 index 000000000..48f32f0be --- /dev/null +++ b/vendor/github.com/hashicorp/nomad/client/driver/structs/structs_linux.go @@ -0,0 +1,10 @@ +package structs + +import cgroupConfig "github.com/opencontainers/runc/libcontainer/configs" + +// IsolationConfig has information about the isolation mechanism the executor +// uses to put resource constraints and isolation on the user process +type IsolationConfig struct { + Cgroup *cgroupConfig.Cgroup + CgroupPaths map[string]string +} diff --git a/vendor/github.com/hashicorp/nomad/client/driver/syslog_plugin.go b/vendor/github.com/hashicorp/nomad/client/driver/syslog_plugin.go new file mode 100644 index 000000000..55237cd2d --- /dev/null +++ b/vendor/github.com/hashicorp/nomad/client/driver/syslog_plugin.go @@ -0,0 +1,69 @@ +package driver + +import ( + "log" + "net/rpc" + + "github.com/hashicorp/go-plugin" + "github.com/hashicorp/nomad/client/driver/logging" + "github.com/hashicorp/nomad/nomad/structs" +) + +type SyslogCollectorRPC struct { + client *rpc.Client +} + +type LaunchCollectorArgs struct { + Ctx *logging.LogCollectorContext +} + +func (e *SyslogCollectorRPC) LaunchCollector(ctx *logging.LogCollectorContext) (*logging.SyslogCollectorState, error) { + var ss *logging.SyslogCollectorState + err := e.client.Call("Plugin.LaunchCollector", LaunchCollectorArgs{Ctx: ctx}, &ss) + return ss, err +} + +func (e *SyslogCollectorRPC) Exit() error { + return e.client.Call("Plugin.Exit", new(interface{}), new(interface{})) +} + +func (e *SyslogCollectorRPC) UpdateLogConfig(logConfig *structs.LogConfig) error { + return e.client.Call("Plugin.UpdateLogConfig", logConfig, new(interface{})) +} + +type SyslogCollectorRPCServer struct { + Impl logging.LogCollector +} + +func (s *SyslogCollectorRPCServer) LaunchCollector(args LaunchCollectorArgs, + resp *logging.SyslogCollectorState) error { + ss, err := s.Impl.LaunchCollector(args.Ctx) + if ss != nil { + *resp = *ss + } + return err +} + +func (s *SyslogCollectorRPCServer) Exit(args interface{}, resp *interface{}) error { + return s.Impl.Exit() +} + +func (s *SyslogCollectorRPCServer) UpdateLogConfig(logConfig *structs.LogConfig, resp *interface{}) error { + return s.Impl.UpdateLogConfig(logConfig) +} + +type SyslogCollectorPlugin struct { + logger *log.Logger + Impl *SyslogCollectorRPCServer +} + +func (p *SyslogCollectorPlugin) Server(*plugin.MuxBroker) (interface{}, error) { + if p.Impl == nil { + p.Impl = &SyslogCollectorRPCServer{Impl: logging.NewSyslogCollector(p.logger)} + } + return p.Impl, nil +} + +func (p *SyslogCollectorPlugin) Client(b *plugin.MuxBroker, c *rpc.Client) (interface{}, error) { + return &SyslogCollectorRPC{client: c}, nil +} diff --git a/vendor/github.com/hashicorp/nomad/client/driver/utils.go b/vendor/github.com/hashicorp/nomad/client/driver/utils.go new file mode 100644 index 000000000..562e3165e --- /dev/null +++ b/vendor/github.com/hashicorp/nomad/client/driver/utils.go @@ -0,0 +1,170 @@ +package driver + +import ( + "fmt" + "io" + "os" + "os/exec" + "path/filepath" + "strings" + "time" + + "github.com/hashicorp/go-multierror" + "github.com/hashicorp/go-plugin" + "github.com/hashicorp/nomad/client/config" + "github.com/hashicorp/nomad/client/driver/executor" + "github.com/hashicorp/nomad/client/driver/logging" + cstructs "github.com/hashicorp/nomad/client/driver/structs" + "github.com/hashicorp/nomad/nomad/structs" +) + +// createExecutor launches an executor plugin and returns an instance of the +// Executor interface +func createExecutor(config *plugin.ClientConfig, w io.Writer, + clientConfig *config.Config) (executor.Executor, *plugin.Client, error) { + config.HandshakeConfig = HandshakeConfig + config.Plugins = GetPluginMap(w) + config.MaxPort = clientConfig.ClientMaxPort + config.MinPort = clientConfig.ClientMinPort + + // setting the setsid of the plugin process so that it doesn't get signals sent to + // the nomad client. + if config.Cmd != nil { + isolateCommand(config.Cmd) + } + + executorClient := plugin.NewClient(config) + rpcClient, err := executorClient.Client() + if err != nil { + return nil, nil, fmt.Errorf("error creating rpc client for executor plugin: %v", err) + } + + raw, err := rpcClient.Dispense("executor") + if err != nil { + return nil, nil, fmt.Errorf("unable to dispense the executor plugin: %v", err) + } + executorPlugin := raw.(executor.Executor) + return executorPlugin, executorClient, nil +} + +func createLogCollector(config *plugin.ClientConfig, w io.Writer, + clientConfig *config.Config) (logging.LogCollector, *plugin.Client, error) { + config.HandshakeConfig = HandshakeConfig + config.Plugins = GetPluginMap(w) + config.MaxPort = clientConfig.ClientMaxPort + config.MinPort = clientConfig.ClientMinPort + if config.Cmd != nil { + isolateCommand(config.Cmd) + } + + syslogClient := plugin.NewClient(config) + rpcCLient, err := syslogClient.Client() + if err != nil { + return nil, nil, fmt.Errorf("error creating rpc client for syslog plugin: %v", err) + } + + raw, err := rpcCLient.Dispense("syslogcollector") + if err != nil { + return nil, nil, fmt.Errorf("unable to dispense the syslog plugin: %v", err) + } + logCollector := raw.(logging.LogCollector) + return logCollector, syslogClient, nil +} + +func consulContext(clientConfig *config.Config, containerID string) *executor.ConsulContext { + return &executor.ConsulContext{ + ConsulConfig: clientConfig.ConsulConfig, + ContainerID: containerID, + DockerEndpoint: clientConfig.Read("docker.endpoint"), + TLSCa: clientConfig.Read("docker.tls.ca"), + TLSCert: clientConfig.Read("docker.tls.cert"), + TLSKey: clientConfig.Read("docker.tls.key"), + } +} + +// killProcess kills a process with the given pid +func killProcess(pid int) error { + proc, err := os.FindProcess(pid) + if err != nil { + return err + } + return proc.Kill() +} + +// destroyPlugin kills the plugin with the given pid and also kills the user +// process +func destroyPlugin(pluginPid int, userPid int) error { + var merr error + if err := killProcess(pluginPid); err != nil { + merr = multierror.Append(merr, err) + } + + if err := killProcess(userPid); err != nil { + merr = multierror.Append(merr, err) + } + return merr +} + +// validateCommand validates that the command only has a single value and +// returns a user friendly error message telling them to use the passed +// argField. +func validateCommand(command, argField string) error { + trimmed := strings.TrimSpace(command) + if len(trimmed) == 0 { + return fmt.Errorf("command empty: %q", command) + } + + if len(trimmed) != len(command) { + return fmt.Errorf("command contains extra white space: %q", command) + } + + split := strings.Split(trimmed, " ") + if len(split) != 1 { + return fmt.Errorf("command contained more than one input. Use %q field to pass arguments", argField) + } + + return nil +} + +// GetKillTimeout returns the kill timeout to use given the tasks desired kill +// timeout and the operator configured max kill timeout. +func GetKillTimeout(desired, max time.Duration) time.Duration { + maxNanos := max.Nanoseconds() + desiredNanos := desired.Nanoseconds() + + // Make the minimum time between signal and kill, 1 second. + if desiredNanos <= 0 { + desiredNanos = (1 * time.Second).Nanoseconds() + } + + // Protect against max not being set properly. + if maxNanos <= 0 { + maxNanos = (10 * time.Second).Nanoseconds() + } + + if desiredNanos < maxNanos { + return time.Duration(desiredNanos) + } + + return max +} + +// GetAbsolutePath returns the absolute path of the passed binary by resolving +// it in the path and following symlinks. +func GetAbsolutePath(bin string) (string, error) { + lp, err := exec.LookPath(bin) + if err != nil { + return "", fmt.Errorf("failed to resolve path to %q executable: %v", bin, err) + } + + return filepath.EvalSymlinks(lp) +} + +// getExecutorUser returns the user of the task, defaulting to +// cstructs.DefaultUnprivilegedUser if none was given. +func getExecutorUser(task *structs.Task) string { + if task.User == "" { + return cstructs.DefaultUnpriviledgedUser + } + return task.User +} diff --git a/vendor/github.com/hashicorp/nomad/client/driver/utils_unix.go b/vendor/github.com/hashicorp/nomad/client/driver/utils_unix.go new file mode 100644 index 000000000..474cdcf17 --- /dev/null +++ b/vendor/github.com/hashicorp/nomad/client/driver/utils_unix.go @@ -0,0 +1,18 @@ +// +build darwin dragonfly freebsd linux netbsd openbsd solaris + +package driver + +import ( + "os/exec" + "syscall" +) + +// isolateCommand sets the setsid flag in exec.Cmd to true so that the process +// becomes the process leader in a new session and doesn't receive signals that +// are sent to the parent process. +func isolateCommand(cmd *exec.Cmd) { + if cmd.SysProcAttr == nil { + cmd.SysProcAttr = &syscall.SysProcAttr{} + } + cmd.SysProcAttr.Setsid = true +} diff --git a/vendor/github.com/hashicorp/nomad/client/driver/utils_windows.go b/vendor/github.com/hashicorp/nomad/client/driver/utils_windows.go new file mode 100644 index 000000000..5b2b7d842 --- /dev/null +++ b/vendor/github.com/hashicorp/nomad/client/driver/utils_windows.go @@ -0,0 +1,9 @@ +package driver + +import ( + "os/exec" +) + +// TODO Figure out if this is needed in Wondows +func isolateCommand(cmd *exec.Cmd) { +} diff --git a/vendor/github.com/hashicorp/nomad/client/fingerprint/arch.go b/vendor/github.com/hashicorp/nomad/client/fingerprint/arch.go new file mode 100644 index 000000000..16d8c99a8 --- /dev/null +++ b/vendor/github.com/hashicorp/nomad/client/fingerprint/arch.go @@ -0,0 +1,26 @@ +package fingerprint + +import ( + "log" + "runtime" + + client "github.com/hashicorp/nomad/client/config" + "github.com/hashicorp/nomad/nomad/structs" +) + +// ArchFingerprint is used to fingerprint the architecture +type ArchFingerprint struct { + StaticFingerprinter + logger *log.Logger +} + +// NewArchFingerprint is used to create an OS fingerprint +func NewArchFingerprint(logger *log.Logger) Fingerprint { + f := &ArchFingerprint{logger: logger} + return f +} + +func (f *ArchFingerprint) Fingerprint(config *client.Config, node *structs.Node) (bool, error) { + node.Attributes["arch"] = runtime.GOARCH + return true, nil +} diff --git a/vendor/github.com/hashicorp/nomad/client/fingerprint/cgroup.go b/vendor/github.com/hashicorp/nomad/client/fingerprint/cgroup.go new file mode 100644 index 000000000..1ec8d8793 --- /dev/null +++ b/vendor/github.com/hashicorp/nomad/client/fingerprint/cgroup.go @@ -0,0 +1,59 @@ +// +build linux + +package fingerprint + +import ( + "log" + "time" + + "github.com/hashicorp/nomad/nomad/structs" +) + +const ( + cgroupAvailable = "available" + cgroupUnavailable = "unavailable" + interval = 15 +) + +type CGroupFingerprint struct { + logger *log.Logger + lastState string + mountPointDetector MountPointDetector +} + +// An interface to isolate calls to the cgroup library +// This facilitates testing where we can implement +// fake mount points to test various code paths +type MountPointDetector interface { + MountPoint() (string, error) +} + +// Implements the interface detector which calls the cgroups library directly +type DefaultMountPointDetector struct { +} + +// Call out to the default cgroup library +func (b *DefaultMountPointDetector) MountPoint() (string, error) { + return FindCgroupMountpointDir() +} + +// NewCGroupFingerprint returns a new cgroup fingerprinter +func NewCGroupFingerprint(logger *log.Logger) Fingerprint { + f := &CGroupFingerprint{ + logger: logger, + lastState: cgroupUnavailable, + mountPointDetector: &DefaultMountPointDetector{}, + } + return f +} + +// clearCGroupAttributes clears any node attributes related to cgroups that might +// have been set in a previous fingerprint run. +func (f *CGroupFingerprint) clearCGroupAttributes(n *structs.Node) { + delete(n.Attributes, "unique.cgroup.mountpoint") +} + +// Periodic determines the interval at which the periodic fingerprinter will run. +func (f *CGroupFingerprint) Periodic() (bool, time.Duration) { + return true, interval * time.Second +} diff --git a/vendor/github.com/hashicorp/nomad/client/fingerprint/cgroup_linux.go b/vendor/github.com/hashicorp/nomad/client/fingerprint/cgroup_linux.go new file mode 100644 index 000000000..9abb959b5 --- /dev/null +++ b/vendor/github.com/hashicorp/nomad/client/fingerprint/cgroup_linux.go @@ -0,0 +1,57 @@ +// +build linux + +package fingerprint + +import ( + "fmt" + + client "github.com/hashicorp/nomad/client/config" + "github.com/hashicorp/nomad/nomad/structs" + "github.com/opencontainers/runc/libcontainer/cgroups" +) + +// FindCgroupMountpointDir is used to find the cgroup mount point on a Linux +// system. +func FindCgroupMountpointDir() (string, error) { + mount, err := cgroups.FindCgroupMountpointDir() + if err != nil { + switch e := err.(type) { + case *cgroups.NotFoundError: + // It's okay if the mount point is not discovered + return "", nil + default: + // All other errors are passed back as is + return "", e + } + } + return mount, nil +} + +// Fingerprint tries to find a valid cgroup moint point +func (f *CGroupFingerprint) Fingerprint(cfg *client.Config, node *structs.Node) (bool, error) { + mount, err := f.mountPointDetector.MountPoint() + if err != nil { + f.clearCGroupAttributes(node) + return false, fmt.Errorf("Failed to discover cgroup mount point: %s", err) + } + + // Check if a cgroup mount point was found + if mount == "" { + // Clear any attributes from the previous fingerprint. + f.clearCGroupAttributes(node) + + if f.lastState == cgroupAvailable { + f.logger.Printf("[INFO] fingerprint.cgroups: cgroups are unavailable") + } + f.lastState = cgroupUnavailable + return true, nil + } + + node.Attributes["unique.cgroup.mountpoint"] = mount + + if f.lastState == cgroupUnavailable { + f.logger.Printf("[INFO] fingerprint.cgroups: cgroups are available") + } + f.lastState = cgroupAvailable + return true, nil +} diff --git a/vendor/github.com/hashicorp/nomad/client/fingerprint/consul.go b/vendor/github.com/hashicorp/nomad/client/fingerprint/consul.go new file mode 100644 index 000000000..149324601 --- /dev/null +++ b/vendor/github.com/hashicorp/nomad/client/fingerprint/consul.go @@ -0,0 +1,100 @@ +package fingerprint + +import ( + "fmt" + "log" + "strconv" + "time" + + consul "github.com/hashicorp/consul/api" + + client "github.com/hashicorp/nomad/client/config" + "github.com/hashicorp/nomad/nomad/structs" +) + +const ( + consulAvailable = "available" + consulUnavailable = "unavailable" +) + +// ConsulFingerprint is used to fingerprint the architecture +type ConsulFingerprint struct { + logger *log.Logger + client *consul.Client + lastState string +} + +// NewConsulFingerprint is used to create an OS fingerprint +func NewConsulFingerprint(logger *log.Logger) Fingerprint { + return &ConsulFingerprint{logger: logger, lastState: consulUnavailable} +} + +func (f *ConsulFingerprint) Fingerprint(config *client.Config, node *structs.Node) (bool, error) { + // Guard against uninitialized Links + if node.Links == nil { + node.Links = map[string]string{} + } + + // Only create the client once to avoid creating too many connections to + // Consul. + if f.client == nil { + consulConfig, err := config.ConsulConfig.ApiConfig() + if err != nil { + return false, fmt.Errorf("Failed to initialize the Consul client config: %v", err) + } + + f.client, err = consul.NewClient(consulConfig) + if err != nil { + return false, fmt.Errorf("Failed to initialize consul client: %s", err) + } + } + + // We'll try to detect consul by making a query to to the agent's self API. + // If we can't hit this URL consul is probably not running on this machine. + info, err := f.client.Agent().Self() + if err != nil { + // Clear any attributes set by a previous fingerprint. + f.clearConsulAttributes(node) + + // Print a message indicating that the Consul Agent is not available + // anymore + if f.lastState == consulAvailable { + f.logger.Printf("[INFO] fingerprint.consul: consul agent is unavailable") + } + f.lastState = consulUnavailable + return false, nil + } + + node.Attributes["consul.server"] = strconv.FormatBool(info["Config"]["Server"].(bool)) + node.Attributes["consul.version"] = info["Config"]["Version"].(string) + node.Attributes["consul.revision"] = info["Config"]["Revision"].(string) + node.Attributes["unique.consul.name"] = info["Config"]["NodeName"].(string) + node.Attributes["consul.datacenter"] = info["Config"]["Datacenter"].(string) + + node.Links["consul"] = fmt.Sprintf("%s.%s", + node.Attributes["consul.datacenter"], + node.Attributes["unique.consul.name"]) + + // If the Consul Agent was previously unavailable print a message to + // indicate the Agent is available now + if f.lastState == consulUnavailable { + f.logger.Printf("[INFO] fingerprint.consul: consul agent is available") + } + f.lastState = consulAvailable + return true, nil +} + +// clearConsulAttributes removes consul attributes and links from the passed +// Node. +func (f *ConsulFingerprint) clearConsulAttributes(n *structs.Node) { + delete(n.Attributes, "consul.server") + delete(n.Attributes, "consul.version") + delete(n.Attributes, "consul.revision") + delete(n.Attributes, "unique.consul.name") + delete(n.Attributes, "consul.datacenter") + delete(n.Links, "consul") +} + +func (f *ConsulFingerprint) Periodic() (bool, time.Duration) { + return true, 15 * time.Second +} diff --git a/vendor/github.com/hashicorp/nomad/client/fingerprint/cpu.go b/vendor/github.com/hashicorp/nomad/client/fingerprint/cpu.go new file mode 100644 index 000000000..00f9b848c --- /dev/null +++ b/vendor/github.com/hashicorp/nomad/client/fingerprint/cpu.go @@ -0,0 +1,52 @@ +package fingerprint + +import ( + "fmt" + "log" + + "github.com/hashicorp/nomad/client/config" + "github.com/hashicorp/nomad/helper/stats" + "github.com/hashicorp/nomad/nomad/structs" +) + +// CPUFingerprint is used to fingerprint the CPU +type CPUFingerprint struct { + StaticFingerprinter + logger *log.Logger +} + +// NewCPUFingerprint is used to create a CPU fingerprint +func NewCPUFingerprint(logger *log.Logger) Fingerprint { + f := &CPUFingerprint{logger: logger} + return f +} + +func (f *CPUFingerprint) Fingerprint(cfg *config.Config, node *structs.Node) (bool, error) { + if err := stats.Init(); err != nil { + return false, fmt.Errorf("Unable to obtain CPU information: %v", err) + } + + modelName := stats.CPUModelName() + if modelName != "" { + node.Attributes["cpu.modelname"] = modelName + } + + mhz := stats.CPUMHzPerCore() + node.Attributes["cpu.frequency"] = fmt.Sprintf("%.0f", mhz) + f.logger.Printf("[DEBUG] fingerprint.cpu: frequency: %.0f MHz", mhz) + + numCores := stats.CPUNumCores() + node.Attributes["cpu.numcores"] = fmt.Sprintf("%d", numCores) + f.logger.Printf("[DEBUG] fingerprint.cpu: core count: %d", numCores) + + tt := stats.TotalTicksAvailable() + node.Attributes["cpu.totalcompute"] = fmt.Sprintf("%.0f", tt) + + if node.Resources == nil { + node.Resources = &structs.Resources{} + } + + node.Resources.CPU = int(tt) + + return true, nil +} diff --git a/vendor/github.com/hashicorp/nomad/client/fingerprint/env_aws.go b/vendor/github.com/hashicorp/nomad/client/fingerprint/env_aws.go new file mode 100644 index 000000000..53a32ec8f --- /dev/null +++ b/vendor/github.com/hashicorp/nomad/client/fingerprint/env_aws.go @@ -0,0 +1,250 @@ +package fingerprint + +import ( + "fmt" + "io/ioutil" + "log" + "net/http" + "net/url" + "os" + "regexp" + "strings" + "time" + + "github.com/hashicorp/go-cleanhttp" + "github.com/hashicorp/nomad/client/config" + "github.com/hashicorp/nomad/nomad/structs" +) + +// This is where the AWS metadata server normally resides. We hardcode the +// "instance" path as well since it's the only one we access here. +const DEFAULT_AWS_URL = "http://169.254.169.254/latest/meta-data/" + +// map of instance type to approximate speed, in Mbits/s +// http://serverfault.com/questions/324883/aws-bandwidth-and-content-delivery/326797#326797 +// which itself cites these sources: +// - http://blog.rightscale.com/2007/10/28/network-performance-within-amazon-ec2-and-to-amazon-s3/ +// - http://www.soc.napier.ac.uk/~bill/chris_p.pdf +// +// This data is meant for a loose approximation +var ec2InstanceSpeedMap = map[string]int{ + "m4.large": 80, + "m3.medium": 80, + "m3.large": 80, + "c4.large": 80, + "c3.large": 80, + "c3.xlarge": 80, + "r3.large": 80, + "r3.xlarge": 80, + "i2.xlarge": 80, + "d2.xlarge": 80, + "t2.micro": 16, + "t2.small": 16, + "t2.medium": 16, + "t2.large": 16, + "m4.xlarge": 760, + "m4.2xlarge": 760, + "m4.4xlarge": 760, + "m3.xlarge": 760, + "m3.2xlarge": 760, + "c4.xlarge": 760, + "c4.2xlarge": 760, + "c4.4xlarge": 760, + "c3.2xlarge": 760, + "c3.4xlarge": 760, + "g2.2xlarge": 760, + "r3.2xlarge": 760, + "r3.4xlarge": 760, + "i2.2xlarge": 760, + "i2.4xlarge": 760, + "d2.2xlarge": 760, + "d2.4xlarge": 760, + "m4.10xlarge": 10000, + "c4.8xlarge": 10000, + "c3.8xlarge": 10000, + "g2.8xlarge": 10000, + "r3.8xlarge": 10000, + "i2.8xlarge": 10000, + "d2.8xlarge": 10000, +} + +// EnvAWSFingerprint is used to fingerprint AWS metadata +type EnvAWSFingerprint struct { + StaticFingerprinter + logger *log.Logger +} + +// NewEnvAWSFingerprint is used to create a fingerprint from AWS metadata +func NewEnvAWSFingerprint(logger *log.Logger) Fingerprint { + f := &EnvAWSFingerprint{logger: logger} + return f +} + +func (f *EnvAWSFingerprint) Fingerprint(cfg *config.Config, node *structs.Node) (bool, error) { + if !f.isAWS() { + return false, nil + } + + // newNetwork is populated and addded to the Nodes resources + newNetwork := &structs.NetworkResource{ + Device: "eth0", + } + + if node.Links == nil { + node.Links = make(map[string]string) + } + metadataURL := os.Getenv("AWS_ENV_URL") + if metadataURL == "" { + metadataURL = DEFAULT_AWS_URL + } + + // assume 2 seconds is enough time for inside AWS network + client := &http.Client{ + Timeout: 2 * time.Second, + Transport: cleanhttp.DefaultTransport(), + } + + // Keys and whether they should be namespaced as unique. Any key whose value + // uniquely identifies a node, such as ip, should be marked as unique. When + // marked as unique, the key isn't included in the computed node class. + keys := map[string]bool{ + "ami-id": true, + "hostname": true, + "instance-id": true, + "instance-type": false, + "local-hostname": true, + "local-ipv4": true, + "public-hostname": true, + "public-ipv4": true, + "placement/availability-zone": false, + } + for k, unique := range keys { + res, err := client.Get(metadataURL + k) + if res.StatusCode != http.StatusOK { + f.logger.Printf("[WARN]: fingerprint.env_aws: Could not read value for attribute %q", k) + continue + } + if err != nil { + // if it's a URL error, assume we're not in an AWS environment + // TODO: better way to detect AWS? Check xen virtualization? + if _, ok := err.(*url.Error); ok { + return false, nil + } + // not sure what other errors it would return + return false, err + } + resp, err := ioutil.ReadAll(res.Body) + res.Body.Close() + if err != nil { + f.logger.Printf("[ERR]: fingerprint.env_aws: Error reading response body for AWS %s", k) + } + + // assume we want blank entries + key := "platform.aws." + strings.Replace(k, "/", ".", -1) + if unique { + key = structs.UniqueNamespace(key) + } + + node.Attributes[key] = strings.Trim(string(resp), "\n") + } + + // copy over network specific information + if val := node.Attributes["unique.platform.aws.local-ipv4"]; val != "" { + node.Attributes["unique.network.ip-address"] = val + newNetwork.IP = val + newNetwork.CIDR = newNetwork.IP + "/32" + } + + // find LinkSpeed from lookup + if throughput := f.linkSpeed(); throughput > 0 { + newNetwork.MBits = throughput + } + + if node.Resources == nil { + node.Resources = &structs.Resources{} + } + node.Resources.Networks = append(node.Resources.Networks, newNetwork) + + // populate Node Network Resources + + // populate Links + node.Links["aws.ec2"] = fmt.Sprintf("%s.%s", + node.Attributes["platform.aws.placement.availability-zone"], + node.Attributes["unique.platform.aws.instance-id"]) + + return true, nil +} + +func (f *EnvAWSFingerprint) isAWS() bool { + // Read the internal metadata URL from the environment, allowing test files to + // provide their own + metadataURL := os.Getenv("AWS_ENV_URL") + if metadataURL == "" { + metadataURL = DEFAULT_AWS_URL + } + + // assume 2 seconds is enough time for inside AWS network + client := &http.Client{ + Timeout: 2 * time.Second, + Transport: cleanhttp.DefaultTransport(), + } + + // Query the metadata url for the ami-id, to veryify we're on AWS + resp, err := client.Get(metadataURL + "ami-id") + if err != nil { + f.logger.Printf("[DEBUG] fingerprint.env_aws: Error querying AWS Metadata URL, skipping") + return false + } + defer resp.Body.Close() + + if resp.StatusCode >= 400 { + // URL not found, which indicates that this isn't AWS + return false + } + + instanceID, err := ioutil.ReadAll(resp.Body) + if err != nil { + f.logger.Printf("[DEBUG] fingerprint.env_aws: Error reading AWS Instance ID, skipping") + return false + } + + match, err := regexp.MatchString("ami-*", string(instanceID)) + if err != nil || !match { + return false + } + + return true +} + +// EnvAWSFingerprint uses lookup table to approximate network speeds +func (f *EnvAWSFingerprint) linkSpeed() int { + + // Query the API for the instance type, and use the table above to approximate + // the network speed + metadataURL := os.Getenv("AWS_ENV_URL") + if metadataURL == "" { + metadataURL = DEFAULT_AWS_URL + } + + // assume 2 seconds is enough time for inside AWS network + client := &http.Client{ + Timeout: 2 * time.Second, + Transport: cleanhttp.DefaultTransport(), + } + + res, err := client.Get(metadataURL + "instance-type") + body, err := ioutil.ReadAll(res.Body) + res.Body.Close() + if err != nil { + f.logger.Printf("[ERR]: fingerprint.env_aws: Error reading response body for instance-type") + return 0 + } + + key := strings.Trim(string(body), "\n") + v, ok := ec2InstanceSpeedMap[key] + if !ok { + return 0 + } + + return v +} diff --git a/vendor/github.com/hashicorp/nomad/client/fingerprint/env_gce.go b/vendor/github.com/hashicorp/nomad/client/fingerprint/env_gce.go new file mode 100644 index 000000000..6f83ed224 --- /dev/null +++ b/vendor/github.com/hashicorp/nomad/client/fingerprint/env_gce.go @@ -0,0 +1,270 @@ +package fingerprint + +import ( + "encoding/json" + "fmt" + "io/ioutil" + "log" + "net/http" + "net/url" + "os" + "regexp" + "strconv" + "strings" + "time" + + "github.com/hashicorp/go-cleanhttp" + "github.com/hashicorp/nomad/client/config" + "github.com/hashicorp/nomad/nomad/structs" +) + +// This is where the GCE metadata server normally resides. We hardcode the +// "instance" path as well since it's the only one we access here. +const DEFAULT_GCE_URL = "http://169.254.169.254/computeMetadata/v1/instance/" + +type GCEMetadataNetworkInterface struct { + AccessConfigs []struct { + ExternalIp string + Type string + } + ForwardedIps []string + Ip string + Network string +} + +type ReqError struct { + StatusCode int +} + +func (e ReqError) Error() string { + return http.StatusText(e.StatusCode) +} + +func lastToken(s string) string { + index := strings.LastIndex(s, "/") + return s[index+1:] +} + +// EnvGCEFingerprint is used to fingerprint GCE metadata +type EnvGCEFingerprint struct { + StaticFingerprinter + client *http.Client + logger *log.Logger + metadataURL string +} + +// NewEnvGCEFingerprint is used to create a fingerprint from GCE metadata +func NewEnvGCEFingerprint(logger *log.Logger) Fingerprint { + // Read the internal metadata URL from the environment, allowing test files to + // provide their own + metadataURL := os.Getenv("GCE_ENV_URL") + if metadataURL == "" { + metadataURL = DEFAULT_GCE_URL + } + + // assume 2 seconds is enough time for inside GCE network + client := &http.Client{ + Timeout: 2 * time.Second, + Transport: cleanhttp.DefaultTransport(), + } + + return &EnvGCEFingerprint{ + client: client, + logger: logger, + metadataURL: metadataURL, + } +} + +func (f *EnvGCEFingerprint) Get(attribute string, recursive bool) (string, error) { + reqUrl := f.metadataURL + attribute + if recursive { + reqUrl = reqUrl + "?recursive=true" + } + + parsedUrl, err := url.Parse(reqUrl) + if err != nil { + return "", err + } + + req := &http.Request{ + Method: "GET", + URL: parsedUrl, + Header: http.Header{ + "Metadata-Flavor": []string{"Google"}, + }, + } + + res, err := f.client.Do(req) + if err != nil || res.StatusCode != http.StatusOK { + f.logger.Printf("[DEBUG] fingerprint.env_gce: Could not read value for attribute %q", attribute) + return "", err + } + + resp, err := ioutil.ReadAll(res.Body) + res.Body.Close() + if err != nil { + f.logger.Printf("[ERR] fingerprint.env_gce: Error reading response body for GCE %s", attribute) + return "", err + } + + if res.StatusCode >= 400 { + return "", ReqError{res.StatusCode} + } + + return string(resp), nil +} + +func checkError(err error, logger *log.Logger, desc string) error { + // If it's a URL error, assume we're not actually in an GCE environment. + // To the outer layers, this isn't an error so return nil. + if _, ok := err.(*url.Error); ok { + logger.Printf("[DEBUG] fingerprint.env_gce: Error querying GCE " + desc + ", skipping") + return nil + } + // Otherwise pass the error through. + return err +} + +func (f *EnvGCEFingerprint) Fingerprint(cfg *config.Config, node *structs.Node) (bool, error) { + if !f.isGCE() { + return false, nil + } + + if node.Links == nil { + node.Links = make(map[string]string) + } + + // Keys and whether they should be namespaced as unique. Any key whose value + // uniquely identifies a node, such as ip, should be marked as unique. When + // marked as unique, the key isn't included in the computed node class. + keys := map[string]bool{ + "hostname": true, + "id": true, + "cpu-platform": false, + "scheduling/automatic-restart": false, + "scheduling/on-host-maintenance": false, + } + + for k, unique := range keys { + value, err := f.Get(k, false) + if err != nil { + return false, checkError(err, f.logger, k) + } + + // assume we want blank entries + key := "platform.gce." + strings.Replace(k, "/", ".", -1) + if unique { + key = structs.UniqueNamespace(key) + } + node.Attributes[key] = strings.Trim(string(value), "\n") + } + + // These keys need everything before the final slash removed to be usable. + keys = map[string]bool{ + "machine-type": false, + "zone": false, + } + for k, unique := range keys { + value, err := f.Get(k, false) + if err != nil { + return false, checkError(err, f.logger, k) + } + + key := "platform.gce." + k + if unique { + key = structs.UniqueNamespace(key) + } + node.Attributes[key] = strings.Trim(lastToken(value), "\n") + } + + // Get internal and external IPs (if they exist) + value, err := f.Get("network-interfaces/", true) + var interfaces []GCEMetadataNetworkInterface + if err := json.Unmarshal([]byte(value), &interfaces); err != nil { + f.logger.Printf("[WARN] fingerprint.env_gce: Error decoding network interface information: %s", err.Error()) + } + + for _, intf := range interfaces { + prefix := "platform.gce.network." + lastToken(intf.Network) + uniquePrefix := "unique." + prefix + node.Attributes[prefix] = "true" + node.Attributes[uniquePrefix+".ip"] = strings.Trim(intf.Ip, "\n") + for index, accessConfig := range intf.AccessConfigs { + node.Attributes[uniquePrefix+".external-ip."+strconv.Itoa(index)] = accessConfig.ExternalIp + } + } + + var tagList []string + value, err = f.Get("tags", false) + if err != nil { + return false, checkError(err, f.logger, "tags") + } + if err := json.Unmarshal([]byte(value), &tagList); err != nil { + f.logger.Printf("[WARN] fingerprint.env_gce: Error decoding instance tags: %s", err.Error()) + } + for _, tag := range tagList { + attr := "platform.gce.tag." + var key string + + // If the tag is namespaced as unique, we strip it from the tag and + // prepend to the whole attribute. + if structs.IsUniqueNamespace(tag) { + tag = strings.TrimPrefix(tag, structs.NodeUniqueNamespace) + key = fmt.Sprintf("%s%s%s", structs.NodeUniqueNamespace, attr, tag) + } else { + key = fmt.Sprintf("%s%s", attr, tag) + } + + node.Attributes[key] = "true" + } + + var attrDict map[string]string + value, err = f.Get("attributes/", true) + if err != nil { + return false, checkError(err, f.logger, "attributes/") + } + if err := json.Unmarshal([]byte(value), &attrDict); err != nil { + f.logger.Printf("[WARN] fingerprint.env_gce: Error decoding instance attributes: %s", err.Error()) + } + for k, v := range attrDict { + attr := "platform.gce.attr." + var key string + + // If the key is namespaced as unique, we strip it from the + // key and prepend to the whole attribute. + if structs.IsUniqueNamespace(k) { + k = strings.TrimPrefix(k, structs.NodeUniqueNamespace) + key = fmt.Sprintf("%s%s%s", structs.NodeUniqueNamespace, attr, k) + } else { + key = fmt.Sprintf("%s%s", attr, k) + } + + node.Attributes[key] = strings.Trim(v, "\n") + } + + // populate Links + node.Links["gce"] = node.Attributes["unique.platform.gce.id"] + + return true, nil +} + +func (f *EnvGCEFingerprint) isGCE() bool { + // TODO: better way to detect GCE? + + // Query the metadata url for the machine type, to verify we're on GCE + machineType, err := f.Get("machine-type", false) + if err != nil { + if re, ok := err.(ReqError); !ok || re.StatusCode != 404 { + // If it wasn't a 404 error, print an error message. + f.logger.Printf("[DEBUG] fingerprint.env_gce: Error querying GCE Metadata URL, skipping") + } + return false + } + + match, err := regexp.MatchString("projects/.+/machineTypes/.+", machineType) + if err != nil || !match { + return false + } + + return true +} diff --git a/vendor/github.com/hashicorp/nomad/client/fingerprint/fingerprint.go b/vendor/github.com/hashicorp/nomad/client/fingerprint/fingerprint.go new file mode 100644 index 000000000..234cf9430 --- /dev/null +++ b/vendor/github.com/hashicorp/nomad/client/fingerprint/fingerprint.go @@ -0,0 +1,87 @@ +package fingerprint + +import ( + "fmt" + "log" + "sort" + "time" + + "github.com/hashicorp/nomad/client/config" + "github.com/hashicorp/nomad/nomad/structs" +) + +// EmptyDuration is to be used by fingerprinters that are not periodic. +const ( + EmptyDuration = time.Duration(0) +) + +func init() { + builtinFingerprintMap["arch"] = NewArchFingerprint + builtinFingerprintMap["cpu"] = NewCPUFingerprint + builtinFingerprintMap["env_aws"] = NewEnvAWSFingerprint + builtinFingerprintMap["env_gce"] = NewEnvGCEFingerprint + builtinFingerprintMap["host"] = NewHostFingerprint + builtinFingerprintMap["memory"] = NewMemoryFingerprint + builtinFingerprintMap["network"] = NewNetworkFingerprint + builtinFingerprintMap["nomad"] = NewNomadFingerprint + builtinFingerprintMap["storage"] = NewStorageFingerprint + + // Initialize the list of available fingerprinters per platform. Each + // platform defines its own list of available fingerprinters. + initPlatformFingerprints(builtinFingerprintMap) +} + +// builtinFingerprintMap contains the built in registered fingerprints which are +// available for a given platform. +var builtinFingerprintMap = make(map[string]Factory, 16) + +// BuiltinFingerprints is a slice containing the key names of all registered +// fingerprints available, to provided an ordered iteration +func BuiltinFingerprints() []string { + fingerprints := make([]string, 0, len(builtinFingerprintMap)) + for k := range builtinFingerprintMap { + fingerprints = append(fingerprints, k) + } + sort.Strings(fingerprints) + return fingerprints +} + +// NewFingerprint is used to instantiate and return a new fingerprint +// given the name and a logger +func NewFingerprint(name string, logger *log.Logger) (Fingerprint, error) { + // Lookup the factory function + factory, ok := builtinFingerprintMap[name] + if !ok { + return nil, fmt.Errorf("unknown fingerprint '%s'", name) + } + + // Instantiate the fingerprint + f := factory(logger) + return f, nil +} + +// Factory is used to instantiate a new Fingerprint +type Factory func(*log.Logger) Fingerprint + +// Fingerprint is used for doing "fingerprinting" of the +// host to automatically determine attributes, resources, +// and metadata about it. Each of these is a heuristic, and +// many of them can be applied on a particular host. +type Fingerprint interface { + // Fingerprint is used to update properties of the Node, + // and returns if the fingerprint was applicable and a potential error. + Fingerprint(*config.Config, *structs.Node) (bool, error) + + // Periodic is a mechanism for the fingerprinter to indicate that it should + // be run periodically. The return value is a boolean indicating if it + // should be periodic, and if true, a duration. + Periodic() (bool, time.Duration) +} + +// StaticFingerprinter can be embedded in a struct that has a Fingerprint method +// to make it non-periodic. +type StaticFingerprinter struct{} + +func (s *StaticFingerprinter) Periodic() (bool, time.Duration) { + return false, EmptyDuration +} diff --git a/vendor/github.com/hashicorp/nomad/client/fingerprint/fingerprint_default.go b/vendor/github.com/hashicorp/nomad/client/fingerprint/fingerprint_default.go new file mode 100644 index 000000000..e2ae1ec6f --- /dev/null +++ b/vendor/github.com/hashicorp/nomad/client/fingerprint/fingerprint_default.go @@ -0,0 +1,6 @@ +// +build darwin dragonfly freebsd netbsd openbsd solaris windows + +package fingerprint + +func initPlatformFingerprints(fps map[string]Factory) { +} diff --git a/vendor/github.com/hashicorp/nomad/client/fingerprint/fingerprint_linux.go b/vendor/github.com/hashicorp/nomad/client/fingerprint/fingerprint_linux.go new file mode 100644 index 000000000..f52669a7f --- /dev/null +++ b/vendor/github.com/hashicorp/nomad/client/fingerprint/fingerprint_linux.go @@ -0,0 +1,5 @@ +package fingerprint + +func initPlatformFingerprints(fps map[string]Factory) { + fps["cgroup"] = NewCGroupFingerprint +} diff --git a/vendor/github.com/hashicorp/nomad/client/fingerprint/host.go b/vendor/github.com/hashicorp/nomad/client/fingerprint/host.go new file mode 100644 index 000000000..a40a473f6 --- /dev/null +++ b/vendor/github.com/hashicorp/nomad/client/fingerprint/host.go @@ -0,0 +1,51 @@ +package fingerprint + +import ( + "fmt" + "log" + "os/exec" + "runtime" + "strings" + + "github.com/hashicorp/nomad/client/config" + "github.com/hashicorp/nomad/nomad/structs" + "github.com/shirou/gopsutil/host" +) + +// HostFingerprint is used to fingerprint the host +type HostFingerprint struct { + StaticFingerprinter + logger *log.Logger +} + +// NewHostFingerprint is used to create a Host fingerprint +func NewHostFingerprint(logger *log.Logger) Fingerprint { + f := &HostFingerprint{logger: logger} + return f +} + +func (f *HostFingerprint) Fingerprint(cfg *config.Config, node *structs.Node) (bool, error) { + hostInfo, err := host.Info() + if err != nil { + f.logger.Println("[WARN] Error retrieving host information: ", err) + return false, err + } + + node.Attributes["os.name"] = hostInfo.Platform + node.Attributes["os.version"] = hostInfo.PlatformVersion + + node.Attributes["kernel.name"] = runtime.GOOS + node.Attributes["kernel.version"] = "" + + if runtime.GOOS != "windows" { + out, err := exec.Command("uname", "-r").Output() + if err != nil { + return false, fmt.Errorf("Failed to run uname: %s", err) + } + node.Attributes["kernel.version"] = strings.Trim(string(out), "\n") + } + + node.Attributes["unique.hostname"] = hostInfo.Hostname + + return true, nil +} diff --git a/vendor/github.com/hashicorp/nomad/client/fingerprint/memory.go b/vendor/github.com/hashicorp/nomad/client/fingerprint/memory.go new file mode 100644 index 000000000..b249bebf5 --- /dev/null +++ b/vendor/github.com/hashicorp/nomad/client/fingerprint/memory.go @@ -0,0 +1,43 @@ +package fingerprint + +import ( + "fmt" + "log" + + "github.com/hashicorp/nomad/client/config" + "github.com/hashicorp/nomad/nomad/structs" + "github.com/shirou/gopsutil/mem" +) + +// MemoryFingerprint is used to fingerprint the available memory on the node +type MemoryFingerprint struct { + StaticFingerprinter + logger *log.Logger +} + +// NewMemoryFingerprint is used to create a Memory fingerprint +func NewMemoryFingerprint(logger *log.Logger) Fingerprint { + f := &MemoryFingerprint{ + logger: logger, + } + return f +} + +func (f *MemoryFingerprint) Fingerprint(cfg *config.Config, node *structs.Node) (bool, error) { + memInfo, err := mem.VirtualMemory() + if err != nil { + f.logger.Printf("[WARN] Error reading memory information: %s", err) + return false, err + } + + if memInfo.Total > 0 { + node.Attributes["memory.totalbytes"] = fmt.Sprintf("%d", memInfo.Total) + + if node.Resources == nil { + node.Resources = &structs.Resources{} + } + node.Resources.MemoryMB = int(memInfo.Total / 1024 / 1024) + } + + return true, nil +} diff --git a/vendor/github.com/hashicorp/nomad/client/fingerprint/network.go b/vendor/github.com/hashicorp/nomad/client/fingerprint/network.go new file mode 100644 index 000000000..9d4e8683a --- /dev/null +++ b/vendor/github.com/hashicorp/nomad/client/fingerprint/network.go @@ -0,0 +1,167 @@ +package fingerprint + +import ( + "errors" + "fmt" + "log" + "net" + + "github.com/hashicorp/nomad/client/config" + "github.com/hashicorp/nomad/nomad/structs" +) + +// NetworkFingerprint is used to fingerprint the Network capabilities of a node +type NetworkFingerprint struct { + StaticFingerprinter + logger *log.Logger + interfaceDetector NetworkInterfaceDetector +} + +// An interface to isolate calls to various api in net package +// This facilitates testing where we can implement +// fake interfaces and addresses to test varios code paths +type NetworkInterfaceDetector interface { + Interfaces() ([]net.Interface, error) + InterfaceByName(name string) (*net.Interface, error) + Addrs(intf *net.Interface) ([]net.Addr, error) +} + +// Implements the interface detector which calls net directly +type DefaultNetworkInterfaceDetector struct { +} + +func (b *DefaultNetworkInterfaceDetector) Interfaces() ([]net.Interface, error) { + return net.Interfaces() +} + +func (b *DefaultNetworkInterfaceDetector) InterfaceByName(name string) (*net.Interface, error) { + return net.InterfaceByName(name) +} + +func (b *DefaultNetworkInterfaceDetector) Addrs(intf *net.Interface) ([]net.Addr, error) { + return intf.Addrs() +} + +// NewNetworkFingerprint returns a new NetworkFingerprinter with the given +// logger +func NewNetworkFingerprint(logger *log.Logger) Fingerprint { + f := &NetworkFingerprint{logger: logger, interfaceDetector: &DefaultNetworkInterfaceDetector{}} + return f +} + +func (f *NetworkFingerprint) Fingerprint(cfg *config.Config, node *structs.Node) (bool, error) { + // newNetwork is populated and addded to the Nodes resources + newNetwork := &structs.NetworkResource{} + var ip string + + intf, err := f.findInterface(cfg.NetworkInterface) + switch { + case err != nil: + return false, fmt.Errorf("Error while detecting network interface during fingerprinting: %v", err) + case intf == nil: + // No interface could be found + return false, nil + } + + if ip, err = f.ipAddress(intf); err != nil { + return false, fmt.Errorf("Unable to find IP address of interface: %s, err: %v", intf.Name, err) + } + + newNetwork.Device = intf.Name + node.Attributes["unique.network.ip-address"] = ip + newNetwork.IP = ip + newNetwork.CIDR = newNetwork.IP + "/32" + + f.logger.Printf("[DEBUG] fingerprint.network: Detected interface %v with IP %v during fingerprinting", intf.Name, ip) + + if throughput := f.linkSpeed(intf.Name); throughput > 0 { + newNetwork.MBits = throughput + f.logger.Printf("[DEBUG] fingerprint.network: link speed for %v set to %v", intf.Name, newNetwork.MBits) + } else { + f.logger.Printf("[DEBUG] fingerprint.network: Unable to read link speed; setting to default %v", cfg.NetworkSpeed) + newNetwork.MBits = cfg.NetworkSpeed + } + + if node.Resources == nil { + node.Resources = &structs.Resources{} + } + + node.Resources.Networks = append(node.Resources.Networks, newNetwork) + + // return true, because we have a network connection + return true, nil +} + +// Gets the ipv4 addr for a network interface +func (f *NetworkFingerprint) ipAddress(intf *net.Interface) (string, error) { + var addrs []net.Addr + var err error + + if addrs, err = f.interfaceDetector.Addrs(intf); err != nil { + return "", err + } + + if len(addrs) == 0 { + return "", errors.New(fmt.Sprintf("Interface %s has no IP address", intf.Name)) + } + for _, addr := range addrs { + var ip net.IP + switch v := (addr).(type) { + case *net.IPNet: + ip = v.IP + case *net.IPAddr: + ip = v.IP + } + if ip.To4() != nil { + return ip.String(), nil + } + } + + return "", fmt.Errorf("Couldn't parse IP address for interface %s", intf.Name) + +} + +// Checks if the device is marked UP by the operator +func (f *NetworkFingerprint) isDeviceEnabled(intf *net.Interface) bool { + return intf.Flags&net.FlagUp != 0 +} + +// Checks if the device has any IP address configured +func (f *NetworkFingerprint) deviceHasIpAddress(intf *net.Interface) bool { + _, err := f.ipAddress(intf) + return err == nil +} + +func (n *NetworkFingerprint) isDeviceLoopBackOrPointToPoint(intf *net.Interface) bool { + return intf.Flags&(net.FlagLoopback|net.FlagPointToPoint) != 0 +} + +// Returns the interface with the name passed by user +// If the name is blank then it iterates through all the devices +// and finds one which is routable and marked as UP +// It excludes PPP and lo devices unless they are specifically asked +func (f *NetworkFingerprint) findInterface(deviceName string) (*net.Interface, error) { + var interfaces []net.Interface + var err error + + if deviceName != "" { + return f.interfaceDetector.InterfaceByName(deviceName) + } + + var intfs []net.Interface + + if intfs, err = f.interfaceDetector.Interfaces(); err != nil { + return nil, err + } + + for _, intf := range intfs { + if f.isDeviceEnabled(&intf) && !f.isDeviceLoopBackOrPointToPoint(&intf) && f.deviceHasIpAddress(&intf) { + interfaces = append(interfaces, intf) + } + } + + if len(interfaces) == 0 { + return nil, nil + } + return &interfaces[0], nil +} diff --git a/vendor/github.com/hashicorp/nomad/client/fingerprint/network_default.go b/vendor/github.com/hashicorp/nomad/client/fingerprint/network_default.go new file mode 100644 index 000000000..3faa93881 --- /dev/null +++ b/vendor/github.com/hashicorp/nomad/client/fingerprint/network_default.go @@ -0,0 +1,8 @@ +// +build !linux,!windows + +package fingerprint + +// linkSpeed returns the default link speed +func (f *NetworkFingerprint) linkSpeed(device string) int { + return 0 +} diff --git a/vendor/github.com/hashicorp/nomad/client/fingerprint/network_linux.go b/vendor/github.com/hashicorp/nomad/client/fingerprint/network_linux.go new file mode 100644 index 000000000..5a1ba001f --- /dev/null +++ b/vendor/github.com/hashicorp/nomad/client/fingerprint/network_linux.go @@ -0,0 +1,78 @@ +package fingerprint + +import ( + "fmt" + "io/ioutil" + "os/exec" + "regexp" + "strconv" + "strings" +) + +// linkSpeedSys parses link speed in Mb/s from /sys. +func (f *NetworkFingerprint) linkSpeedSys(device string) int { + path := fmt.Sprintf("/sys/class/net/%s/speed", device) + + // Read contents of the device/speed file + content, err := ioutil.ReadFile(path) + if err != nil { + f.logger.Printf("[DEBUG] fingerprint.network: Unable to read link speed from %s", path) + return 0 + } + + lines := strings.Split(string(content), "\n") + mbs, err := strconv.Atoi(lines[0]) + if err != nil || mbs <= 0 { + f.logger.Printf("[DEBUG] fingerprint.network: Unable to parse link speed from %s", path) + return 0 + } + + return mbs +} + +// linkSpeed returns link speed in Mb/s, or 0 when unable to determine it. +func (f *NetworkFingerprint) linkSpeed(device string) int { + // Use LookPath to find the ethtool in the systems $PATH + // If it's not found or otherwise errors, LookPath returns and empty string + // and an error we can ignore for our purposes + ethtoolPath, _ := exec.LookPath("ethtool") + if ethtoolPath != "" { + if speed := f.linkSpeedEthtool(ethtoolPath, device); speed > 0 { + return speed + } + } + + // Fall back on checking a system file for link speed. + return f.linkSpeedSys(device) +} + +// linkSpeedEthtool determines link speed in Mb/s with 'ethtool'. +func (f *NetworkFingerprint) linkSpeedEthtool(path, device string) int { + outBytes, err := exec.Command(path, device).Output() + if err != nil { + f.logger.Printf("[WARN] fingerprint.network: Error calling ethtool (%s %s): %v", path, device, err) + return 0 + } + + output := strings.TrimSpace(string(outBytes)) + re := regexp.MustCompile("Speed: [0-9]+[a-zA-Z]+/s") + m := re.FindString(output) + if m == "" { + // no matches found, output may be in a different format + f.logger.Printf("[WARN] fingerprint.network: Unable to parse Speed in output of '%s %s'", path, device) + return 0 + } + + // Split and trim the Mb/s unit from the string output + args := strings.Split(m, ": ") + raw := strings.TrimSuffix(args[1], "Mb/s") + + // convert to Mb/s + mbs, err := strconv.Atoi(raw) + if err != nil || mbs <= 0 { + f.logger.Printf("[WARN] fingerprint.network: Unable to parse Mb/s in output of '%s %s'", path, device) + return 0 + } + + return mbs +} diff --git a/vendor/github.com/hashicorp/nomad/client/fingerprint/network_windows.go b/vendor/github.com/hashicorp/nomad/client/fingerprint/network_windows.go new file mode 100644 index 000000000..8617a65a8 --- /dev/null +++ b/vendor/github.com/hashicorp/nomad/client/fingerprint/network_windows.go @@ -0,0 +1,52 @@ +package fingerprint + +import ( + "fmt" + "os/exec" + "strconv" + "strings" +) + +// linkSpeed returns link speed in Mb/s, or 0 when unable to determine it. +func (f *NetworkFingerprint) linkSpeed(device string) int { + command := fmt.Sprintf("Get-NetAdapter -IncludeHidden | Where name -eq '%s' | Select -ExpandProperty LinkSpeed", device) + path := "powershell.exe" + outBytes, err := exec.Command(path, command).Output() + + if err != nil { + f.logger.Printf("[WARN] fingerprint.network: Error calling %s (%s): %v", path, command, err) + return 0 + } + + output := strings.TrimSpace(string(outBytes)) + + return f.parseLinkSpeed(output) +} + +func (f *NetworkFingerprint) parseLinkSpeed(commandOutput string) int { + args := strings.Split(commandOutput, " ") + if len(args) != 2 { + f.logger.Printf("[WARN] fingerprint.network: Couldn't split LinkSpeed (%s)", commandOutput) + return 0 + } + + unit := strings.Replace(args[1], "\r\n", "", -1) + value, err := strconv.Atoi(args[0]) + if err != nil { + f.logger.Printf("[WARN] fingerprint.network: Unable to parse LinkSpeed value (%s)", commandOutput) + return 0 + } + + switch unit { + case "Mbps": + return value + case "Kbps": + return value / 1000 + case "Gbps": + return value * 1000 + case "bps": + return value / 1000000 + } + + return 0 +} diff --git a/vendor/github.com/hashicorp/nomad/client/fingerprint/nomad.go b/vendor/github.com/hashicorp/nomad/client/fingerprint/nomad.go new file mode 100644 index 000000000..5ac78d091 --- /dev/null +++ b/vendor/github.com/hashicorp/nomad/client/fingerprint/nomad.go @@ -0,0 +1,26 @@ +package fingerprint + +import ( + "log" + + client "github.com/hashicorp/nomad/client/config" + "github.com/hashicorp/nomad/nomad/structs" +) + +// NomadFingerprint is used to fingerprint the Nomad version +type NomadFingerprint struct { + StaticFingerprinter + logger *log.Logger +} + +// NewNomadFingerprint is used to create a Nomad fingerprint +func NewNomadFingerprint(logger *log.Logger) Fingerprint { + f := &NomadFingerprint{logger: logger} + return f +} + +func (f *NomadFingerprint) Fingerprint(config *client.Config, node *structs.Node) (bool, error) { + node.Attributes["nomad.version"] = config.Version + node.Attributes["nomad.revision"] = config.Revision + return true, nil +} diff --git a/vendor/github.com/hashicorp/nomad/client/fingerprint/storage.go b/vendor/github.com/hashicorp/nomad/client/fingerprint/storage.go new file mode 100644 index 000000000..c60f13154 --- /dev/null +++ b/vendor/github.com/hashicorp/nomad/client/fingerprint/storage.go @@ -0,0 +1,59 @@ +package fingerprint + +import ( + "fmt" + "log" + "os" + "strconv" + + "github.com/hashicorp/nomad/client/config" + "github.com/hashicorp/nomad/nomad/structs" +) + +const bytesPerMegabyte = 1024 * 1024 + +// StorageFingerprint is used to measure the amount of storage free for +// applications that the Nomad agent will run on this machine. +type StorageFingerprint struct { + StaticFingerprinter + logger *log.Logger +} + +func NewStorageFingerprint(logger *log.Logger) Fingerprint { + fp := &StorageFingerprint{logger: logger} + return fp +} + +func (f *StorageFingerprint) Fingerprint(cfg *config.Config, node *structs.Node) (bool, error) { + + // Initialize these to empty defaults + node.Attributes["unique.storage.volume"] = "" + node.Attributes["unique.storage.bytestotal"] = "" + node.Attributes["unique.storage.bytesfree"] = "" + if node.Resources == nil { + node.Resources = &structs.Resources{} + } + + // Guard against unset AllocDir + storageDir := cfg.AllocDir + if storageDir == "" { + var err error + storageDir, err = os.Getwd() + if err != nil { + return false, fmt.Errorf("unable to get CWD from filesystem: %s", err) + } + } + + volume, total, free, err := f.diskFree(storageDir) + if err != nil { + return false, fmt.Errorf("failed to determine disk space for %s: %v", storageDir, err) + } + + node.Attributes["unique.storage.volume"] = volume + node.Attributes["unique.storage.bytestotal"] = strconv.FormatUint(total, 10) + node.Attributes["unique.storage.bytesfree"] = strconv.FormatUint(free, 10) + + node.Resources.DiskMB = int(free / bytesPerMegabyte) + + return true, nil +} diff --git a/vendor/github.com/hashicorp/nomad/client/fingerprint/storage_unix.go b/vendor/github.com/hashicorp/nomad/client/fingerprint/storage_unix.go new file mode 100644 index 000000000..ada8f4bea --- /dev/null +++ b/vendor/github.com/hashicorp/nomad/client/fingerprint/storage_unix.go @@ -0,0 +1,64 @@ +// +build darwin dragonfly freebsd linux netbsd openbsd solaris + +package fingerprint + +import ( + "fmt" + "os/exec" + "path/filepath" + "runtime" + "strconv" + "strings" +) + +// diskFree inspects the filesystem for path and returns the volume name and +// the total and free bytes available on the file system. +func (f *StorageFingerprint) diskFree(path string) (volume string, total, free uint64, err error) { + absPath, err := filepath.Abs(path) + if err != nil { + return "", 0, 0, fmt.Errorf("failed to determine absolute path for %s", path) + } + + // Use -k to standardize the output values between darwin and linux + var dfArgs string + if runtime.GOOS == "linux" { + // df on linux needs the -P option to prevent linebreaks on long filesystem paths + dfArgs = "-kP" + } else { + dfArgs = "-k" + } + + mountOutput, err := exec.Command("df", dfArgs, absPath).Output() + if err != nil { + return "", 0, 0, fmt.Errorf("failed to determine mount point for %s", absPath) + } + // Output looks something like: + // Filesystem 1024-blocks Used Available Capacity iused ifree %iused Mounted on + // /dev/disk1 487385240 423722532 63406708 87% 105994631 15851677 87% / + // [0] volume [1] capacity [2] SKIP [3] free + lines := strings.Split(string(mountOutput), "\n") + if len(lines) < 2 { + return "", 0, 0, fmt.Errorf("failed to parse `df` output; expected at least 2 lines") + } + fields := strings.Fields(lines[1]) + if len(fields) < 4 { + return "", 0, 0, fmt.Errorf("failed to parse `df` output; expected at least 4 columns") + } + volume = fields[0] + + total, err = strconv.ParseUint(fields[1], 10, 64) + if err != nil { + return "", 0, 0, fmt.Errorf("failed to parse storage.bytestotal size in kilobytes") + } + // convert to bytes + total *= 1024 + + free, err = strconv.ParseUint(fields[3], 10, 64) + if err != nil { + return "", 0, 0, fmt.Errorf("failed to parse storage.bytesfree size in kilobytes") + } + // convert to bytes + free *= 1024 + + return volume, total, free, nil +} diff --git a/vendor/github.com/hashicorp/nomad/client/fingerprint/storage_windows.go b/vendor/github.com/hashicorp/nomad/client/fingerprint/storage_windows.go new file mode 100644 index 000000000..716f1baf0 --- /dev/null +++ b/vendor/github.com/hashicorp/nomad/client/fingerprint/storage_windows.go @@ -0,0 +1,33 @@ +package fingerprint + +import ( + "fmt" + "path/filepath" + "syscall" +) + +//go:generate go run $GOROOT/src/syscall/mksyscall_windows.go -output zstorage_windows.go storage_windows.go + +//sys getDiskFreeSpaceEx(dirName *uint16, availableFreeBytes *uint64, totalBytes *uint64, totalFreeBytes *uint64) (err error) = kernel32.GetDiskFreeSpaceExW + +// diskFree inspects the filesystem for path and returns the volume name and +// the total and free bytes available on the file system. +func (f *StorageFingerprint) diskFree(path string) (volume string, total, free uint64, err error) { + absPath, err := filepath.Abs(path) + if err != nil { + return "", 0, 0, fmt.Errorf("failed to determine absolute path for %s", path) + } + + volume = filepath.VolumeName(absPath) + + absPathp, err := syscall.UTF16PtrFromString(absPath) + if err != nil { + return "", 0, 0, fmt.Errorf("failed to convert \"%s\" to UTF16: %v", absPath, err) + } + + if err := getDiskFreeSpaceEx(absPathp, nil, &total, &free); err != nil { + return "", 0, 0, fmt.Errorf("failed to get free disk space for %s: %v", absPath, err) + } + + return volume, total, free, nil +} diff --git a/vendor/github.com/hashicorp/nomad/client/fingerprint/zstorage_windows.go b/vendor/github.com/hashicorp/nomad/client/fingerprint/zstorage_windows.go new file mode 100644 index 000000000..52405b3f2 --- /dev/null +++ b/vendor/github.com/hashicorp/nomad/client/fingerprint/zstorage_windows.go @@ -0,0 +1,26 @@ +// MACHINE GENERATED BY 'go generate' COMMAND; DO NOT EDIT + +package fingerprint + +import "unsafe" +import "syscall" + +var _ unsafe.Pointer + +var ( + modkernel32 = syscall.NewLazyDLL("kernel32.dll") + + procGetDiskFreeSpaceExW = modkernel32.NewProc("GetDiskFreeSpaceExW") +) + +func getDiskFreeSpaceEx(dirName *uint16, availableFreeBytes *uint64, totalBytes *uint64, totalFreeBytes *uint64) (err error) { + r1, _, e1 := syscall.Syscall6(procGetDiskFreeSpaceExW.Addr(), 4, uintptr(unsafe.Pointer(dirName)), uintptr(unsafe.Pointer(availableFreeBytes)), uintptr(unsafe.Pointer(totalBytes)), uintptr(unsafe.Pointer(totalFreeBytes)), 0, 0) + if r1 == 0 { + if e1 != 0 { + err = error(e1) + } else { + err = syscall.EINVAL + } + } + return +} diff --git a/vendor/github.com/hashicorp/nomad/client/stats/cpu.go b/vendor/github.com/hashicorp/nomad/client/stats/cpu.go new file mode 100644 index 000000000..3e98048e9 --- /dev/null +++ b/vendor/github.com/hashicorp/nomad/client/stats/cpu.go @@ -0,0 +1,62 @@ +package stats + +import ( + "runtime" + "time" + + shelpers "github.com/hashicorp/nomad/helper/stats" +) + +// CpuStats calculates cpu usage percentage +type CpuStats struct { + prevCpuTime float64 + prevTime time.Time + clkSpeed float64 + + totalCpus int +} + +// NewCpuStats returns a cpu stats calculator +func NewCpuStats() *CpuStats { + numCpus := runtime.NumCPU() + cpuStats := &CpuStats{ + totalCpus: numCpus, + } + return cpuStats +} + +// Percent calculates the cpu usage percentage based on the current cpu usage +// and the previous cpu usage where usage is given as time in nanoseconds spend +// in the cpu +func (c *CpuStats) Percent(cpuTime float64) float64 { + now := time.Now() + + if c.prevCpuTime == 0.0 { + // invoked first time + c.prevCpuTime = cpuTime + c.prevTime = now + return 0.0 + } + + timeDelta := now.Sub(c.prevTime).Nanoseconds() + ret := c.calculatePercent(c.prevCpuTime, cpuTime, timeDelta) + c.prevCpuTime = cpuTime + c.prevTime = now + return ret +} + +// TicksConsumed calculates the total ticks consumes by the process across all +// cpu cores +func (c *CpuStats) TicksConsumed(percent float64) float64 { + return (percent / 100) * shelpers.TotalTicksAvailable() / float64(c.totalCpus) +} + +func (c *CpuStats) calculatePercent(t1, t2 float64, timeDelta int64) float64 { + vDelta := t2 - t1 + if timeDelta <= 0 || vDelta <= 0.0 { + return 0.0 + } + + overall_percent := (vDelta / float64(timeDelta)) * 100.0 + return overall_percent +} diff --git a/vendor/github.com/hashicorp/nomad/client/stats/host.go b/vendor/github.com/hashicorp/nomad/client/stats/host.go new file mode 100644 index 000000000..bf4a6b149 --- /dev/null +++ b/vendor/github.com/hashicorp/nomad/client/stats/host.go @@ -0,0 +1,187 @@ +package stats + +import ( + "math" + "runtime" + "time" + + "github.com/shirou/gopsutil/cpu" + "github.com/shirou/gopsutil/disk" + "github.com/shirou/gopsutil/host" + "github.com/shirou/gopsutil/mem" + + shelpers "github.com/hashicorp/nomad/helper/stats" +) + +// HostStats represents resource usage stats of the host running a Nomad client +type HostStats struct { + Memory *MemoryStats + CPU []*CPUStats + DiskStats []*DiskStats + Uptime uint64 + Timestamp int64 + CPUTicksConsumed float64 +} + +// MemoryStats represnts stats related to virtual memory usage +type MemoryStats struct { + Total uint64 + Available uint64 + Used uint64 + Free uint64 +} + +// CPUStats represents stats related to cpu usage +type CPUStats struct { + CPU string + User float64 + System float64 + Idle float64 + Total float64 +} + +// DiskStats represents stats related to disk usage +type DiskStats struct { + Device string + Mountpoint string + Size uint64 + Used uint64 + Available uint64 + UsedPercent float64 + InodesUsedPercent float64 +} + +// HostStatsCollector collects host resource usage stats +type HostStatsCollector struct { + clkSpeed float64 + numCores int + statsCalculator map[string]*HostCpuStatsCalculator +} + +// NewHostStatsCollector returns a HostStatsCollector +func NewHostStatsCollector() *HostStatsCollector { + numCores := runtime.NumCPU() + statsCalculator := make(map[string]*HostCpuStatsCalculator) + collector := &HostStatsCollector{ + statsCalculator: statsCalculator, + numCores: numCores, + } + return collector +} + +// Collect collects stats related to resource usage of a host +func (h *HostStatsCollector) Collect() (*HostStats, error) { + hs := &HostStats{Timestamp: time.Now().UTC().UnixNano()} + memStats, err := mem.VirtualMemory() + if err != nil { + return nil, err + } + hs.Memory = &MemoryStats{ + Total: memStats.Total, + Available: memStats.Available, + Used: memStats.Used, + Free: memStats.Free, + } + + ticksConsumed := 0.0 + cpuStats, err := cpu.Times(true) + if err != nil { + return nil, err + } + cs := make([]*CPUStats, len(cpuStats)) + for idx, cpuStat := range cpuStats { + percentCalculator, ok := h.statsCalculator[cpuStat.CPU] + if !ok { + percentCalculator = NewHostCpuStatsCalculator() + h.statsCalculator[cpuStat.CPU] = percentCalculator + } + idle, user, system, total := percentCalculator.Calculate(cpuStat) + cs[idx] = &CPUStats{ + CPU: cpuStat.CPU, + User: user, + System: system, + Idle: idle, + Total: total, + } + ticksConsumed += (total / 100) * (shelpers.TotalTicksAvailable() / float64(len(cpuStats))) + } + hs.CPU = cs + hs.CPUTicksConsumed = ticksConsumed + + partitions, err := disk.Partitions(false) + if err != nil { + return nil, err + } + var diskStats []*DiskStats + for _, partition := range partitions { + usage, err := disk.Usage(partition.Mountpoint) + if err != nil { + return nil, err + } + ds := DiskStats{ + Device: partition.Device, + Mountpoint: partition.Mountpoint, + Size: usage.Total, + Used: usage.Used, + Available: usage.Free, + UsedPercent: usage.UsedPercent, + InodesUsedPercent: usage.InodesUsedPercent, + } + if math.IsNaN(ds.UsedPercent) { + ds.UsedPercent = 0.0 + } + if math.IsNaN(ds.InodesUsedPercent) { + ds.InodesUsedPercent = 0.0 + } + diskStats = append(diskStats, &ds) + } + hs.DiskStats = diskStats + + uptime, err := host.Uptime() + if err != nil { + return nil, err + } + hs.Uptime = uptime + + return hs, nil +} + +// HostCpuStatsCalculator calculates cpu usage percentages +type HostCpuStatsCalculator struct { + prevIdle float64 + prevUser float64 + prevSystem float64 + prevBusy float64 + prevTotal float64 +} + +// NewHostCpuStatsCalculator returns a HostCpuStatsCalculator +func NewHostCpuStatsCalculator() *HostCpuStatsCalculator { + return &HostCpuStatsCalculator{} +} + +// Calculate calculates the current cpu usage percentages +func (h *HostCpuStatsCalculator) Calculate(times cpu.TimesStat) (idle float64, user float64, system float64, total float64) { + currentIdle := times.Idle + currentUser := times.User + currentSystem := times.System + currentTotal := times.Total() + + deltaTotal := currentTotal - h.prevTotal + idle = ((currentIdle - h.prevIdle) / deltaTotal) * 100 + user = ((currentUser - h.prevUser) / deltaTotal) * 100 + system = ((currentSystem - h.prevSystem) / deltaTotal) * 100 + + currentBusy := times.User + times.System + times.Nice + times.Iowait + times.Irq + + times.Softirq + times.Steal + times.Guest + times.GuestNice + times.Stolen + + total = ((currentBusy - h.prevBusy) / deltaTotal) * 100 + + h.prevIdle = currentIdle + h.prevUser = currentUser + h.prevSystem = currentSystem + h.prevTotal = currentTotal + h.prevBusy = currentBusy + + return +} diff --git a/vendor/github.com/hashicorp/nomad/client/structs/structs.go b/vendor/github.com/hashicorp/nomad/client/structs/structs.go new file mode 100644 index 000000000..724084dc8 --- /dev/null +++ b/vendor/github.com/hashicorp/nomad/client/structs/structs.go @@ -0,0 +1,97 @@ +package structs + +// MemoryStats holds memory usage related stats +type MemoryStats struct { + RSS uint64 + Cache uint64 + Swap uint64 + MaxUsage uint64 + KernelUsage uint64 + KernelMaxUsage uint64 + + // A list of fields whose values were actually sampled + Measured []string +} + +func (ms *MemoryStats) Add(other *MemoryStats) { + ms.RSS += other.RSS + ms.Cache += other.Cache + ms.Swap += other.Swap + ms.MaxUsage += other.MaxUsage + ms.KernelUsage += other.KernelUsage + ms.KernelMaxUsage += other.KernelMaxUsage + ms.Measured = joinStringSet(ms.Measured, other.Measured) +} + +// CpuStats holds cpu usage related stats +type CpuStats struct { + SystemMode float64 + UserMode float64 + TotalTicks float64 + ThrottledPeriods uint64 + ThrottledTime uint64 + Percent float64 + + // A list of fields whose values were actually sampled + Measured []string +} + +func (cs *CpuStats) Add(other *CpuStats) { + cs.SystemMode += other.SystemMode + cs.UserMode += other.UserMode + cs.TotalTicks += other.TotalTicks + cs.ThrottledPeriods += other.ThrottledPeriods + cs.ThrottledTime += other.ThrottledTime + cs.Percent += other.Percent + cs.Measured = joinStringSet(cs.Measured, other.Measured) +} + +// ResourceUsage holds information related to cpu and memory stats +type ResourceUsage struct { + MemoryStats *MemoryStats + CpuStats *CpuStats +} + +func (ru *ResourceUsage) Add(other *ResourceUsage) { + ru.MemoryStats.Add(other.MemoryStats) + ru.CpuStats.Add(other.CpuStats) +} + +// TaskResourceUsage holds aggregated resource usage of all processes in a Task +// and the resource usage of the individual pids +type TaskResourceUsage struct { + ResourceUsage *ResourceUsage + Timestamp int64 + Pids map[string]*ResourceUsage +} + +// AllocResourceUsage holds the aggregated task resource usage of the +// allocation. +type AllocResourceUsage struct { + // ResourceUsage is the summation of the task resources + ResourceUsage *ResourceUsage + + // Tasks contains the resource usage of each task + Tasks map[string]*TaskResourceUsage + + // The max timestamp of all the Tasks + Timestamp int64 +} + +// joinStringSet takes two slices of strings and joins them +func joinStringSet(s1, s2 []string) []string { + lookup := make(map[string]struct{}, len(s1)) + j := make([]string, 0, len(s1)) + for _, s := range s1 { + j = append(j, s) + lookup[s] = struct{}{} + } + + for _, s := range s2 { + if _, ok := lookup[s]; !ok { + j = append(j, s) + } + } + + return j +} diff --git a/vendor/github.com/hashicorp/nomad/command/agent/consul/check.go b/vendor/github.com/hashicorp/nomad/command/agent/consul/check.go new file mode 100644 index 000000000..28df291f6 --- /dev/null +++ b/vendor/github.com/hashicorp/nomad/command/agent/consul/check.go @@ -0,0 +1,84 @@ +package consul + +import ( + "log" + "sync" + "time" + + "github.com/hashicorp/consul/lib" + cstructs "github.com/hashicorp/nomad/client/driver/structs" +) + +// CheckRunner runs a given check in a specific interval and update a +// corresponding Consul TTL check +type CheckRunner struct { + check Check + runCheck func(Check) + logger *log.Logger + stop bool + stopCh chan struct{} + stopLock sync.Mutex + + started bool + startedLock sync.Mutex +} + +// NewCheckRunner configures and returns a CheckRunner +func NewCheckRunner(check Check, runCheck func(Check), logger *log.Logger) *CheckRunner { + cr := CheckRunner{ + check: check, + runCheck: runCheck, + logger: logger, + stopCh: make(chan struct{}), + } + return &cr +} + +// Start is used to start the check. The check runs until stop is called +func (r *CheckRunner) Start() { + r.startedLock.Lock() + defer r.startedLock.Unlock() + if r.started { + return + } + r.stopLock.Lock() + defer r.stopLock.Unlock() + go r.run() + r.started = true +} + +// Stop is used to stop the check. +func (r *CheckRunner) Stop() { + r.stopLock.Lock() + defer r.stopLock.Unlock() + if !r.stop { + r.stop = true + close(r.stopCh) + } +} + +// run is invoked by a goroutine to run until Stop() is called +func (r *CheckRunner) run() { + // Get the randomized initial pause time + initialPauseTime := lib.RandomStagger(r.check.Interval()) + r.logger.Printf("[DEBUG] agent: pausing %v before first invocation of %s", initialPauseTime, r.check.ID()) + next := time.NewTimer(initialPauseTime) + for { + select { + case <-next.C: + r.runCheck(r.check) + next.Reset(r.check.Interval()) + case <-r.stopCh: + next.Stop() + return + } + } +} + +// Check is an interface which check providers can implement for Nomad to run +type Check interface { + Run() *cstructs.CheckResult + ID() string + Interval() time.Duration + Timeout() time.Duration +} diff --git a/vendor/github.com/hashicorp/nomad/command/agent/consul/syncer.go b/vendor/github.com/hashicorp/nomad/command/agent/consul/syncer.go new file mode 100644 index 000000000..736e15c8e --- /dev/null +++ b/vendor/github.com/hashicorp/nomad/command/agent/consul/syncer.go @@ -0,0 +1,983 @@ +// Package consul is used by Nomad to register all services both static services +// and dynamic via allocations. +// +// Consul Service IDs have the following format: ${nomadServicePrefix}-${groupName}-${serviceKey} +// groupName takes on one of the following values: +// - server +// - client +// - executor-${alloc-id}-${task-name} +// +// serviceKey should be generated by service registrators. +// If the serviceKey is being generated by the executor for a Nomad Task.Services +// the following helper should be used: +// NOTE: Executor should interpolate the service prior to calling +// func GenerateTaskServiceKey(service *structs.Service) string +// +// The Nomad Client reaps services registered from dead allocations that were +// not properly cleaned up by the executor (this is not the expected case). +// +// TODO fix this comment +// The Consul ServiceIDs generated by the executor will contain the allocation +// ID. Thus the client can generate the list of Consul ServiceIDs to keep by +// calling the following method on all running allocations the client is aware +// of: +// func GenerateExecutorServiceKeyPrefixFromAlloc(allocID string) string +package consul + +import ( + "fmt" + "log" + "net" + "net/url" + "strconv" + "strings" + "sync" + "time" + + consul "github.com/hashicorp/consul/api" + "github.com/hashicorp/consul/lib" + "github.com/hashicorp/go-multierror" + + "github.com/hashicorp/nomad/nomad/structs" + "github.com/hashicorp/nomad/nomad/structs/config" + "github.com/hashicorp/nomad/nomad/types" +) + +const ( + // initialSyncBuffer is the max time an initial sync will sleep + // before syncing. + initialSyncBuffer = 30 * time.Second + + // initialSyncDelay is the delay before an initial sync. + initialSyncDelay = 5 * time.Second + + // nomadServicePrefix is the first prefix that scopes all Nomad registered + // services + nomadServicePrefix = "_nomad" + + // The periodic time interval for syncing services and checks with Consul + syncInterval = 5 * time.Second + + // syncJitter provides a little variance in the frequency at which + // Syncer polls Consul. + syncJitter = 8 + + // ttlCheckBuffer is the time interval that Nomad can take to report Consul + // the check result + ttlCheckBuffer = 31 * time.Second + + // DefaultQueryWaitDuration is the max duration the Consul Agent will + // spend waiting for a response from a Consul Query. + DefaultQueryWaitDuration = 2 * time.Second + + // ServiceTagHTTP is the tag assigned to HTTP services + ServiceTagHTTP = "http" + + // ServiceTagRPC is the tag assigned to RPC services + ServiceTagRPC = "rpc" + + // ServiceTagSerf is the tag assigned to Serf services + ServiceTagSerf = "serf" +) + +// consulServiceID and consulCheckID are the IDs registered with Consul +type consulServiceID string +type consulCheckID string + +// ServiceKey is the generated service key that is used to build the Consul +// ServiceID +type ServiceKey string + +// ServiceDomain is the domain of services registered by Nomad +type ServiceDomain string + +const ( + ClientDomain ServiceDomain = "client" + ServerDomain ServiceDomain = "server" +) + +// NewExecutorDomain returns a domain specific to the alloc ID and task +func NewExecutorDomain(allocID, task string) ServiceDomain { + return ServiceDomain(fmt.Sprintf("executor-%s-%s", allocID, task)) +} + +// Syncer allows syncing of services and checks with Consul +type Syncer struct { + client *consul.Client + consulAvailable bool + + // servicesGroups and checkGroups are named groups of services and checks + // respectively that will be flattened and reconciled with Consul when + // SyncServices() is called. The key to the servicesGroups map is unique + // per handler and is used to allow the Agent's services to be maintained + // independently of the Client or Server's services. + servicesGroups map[ServiceDomain]map[ServiceKey]*consul.AgentServiceRegistration + checkGroups map[ServiceDomain]map[ServiceKey][]*consul.AgentCheckRegistration + groupsLock sync.RWMutex + + // The "Consul Registry" is a collection of Consul Services and + // Checks all guarded by the registryLock. + registryLock sync.RWMutex + + // trackedChecks and trackedServices are registered with consul + trackedChecks map[consulCheckID]*consul.AgentCheckRegistration + trackedServices map[consulServiceID]*consul.AgentServiceRegistration + + // checkRunners are delegated Consul checks being ran by the Syncer + checkRunners map[consulCheckID]*CheckRunner + + addrFinder func(portLabel string) (string, int) + createDelegatedCheck func(*structs.ServiceCheck, string) (Check, error) + delegateChecks map[string]struct{} // delegateChecks are the checks that the Nomad client runs and reports to Consul + // End registryLock guarded attributes. + + logger *log.Logger + + shutdownCh chan struct{} + shutdown bool + shutdownLock sync.Mutex + + // notifyShutdownCh is used to notify a Syncer it needs to shutdown. + // This can happen because there was an explicit call to the Syncer's + // Shutdown() method, or because the calling task signaled the + // program is going to exit by closing its shutdownCh. + notifyShutdownCh chan struct{} + + // periodicCallbacks is walked sequentially when the timer in Run + // fires. + periodicCallbacks map[string]types.PeriodicCallback + notifySyncCh chan struct{} + periodicLock sync.RWMutex +} + +// NewSyncer returns a new consul.Syncer +func NewSyncer(consulConfig *config.ConsulConfig, shutdownCh chan struct{}, logger *log.Logger) (*Syncer, error) { + var consulClientConfig *consul.Config + var err error + consulClientConfig, err = consulConfig.ApiConfig() + if err != nil { + return nil, err + } + + var consulClient *consul.Client + if consulClient, err = consul.NewClient(consulClientConfig); err != nil { + return nil, err + } + consulSyncer := Syncer{ + client: consulClient, + logger: logger, + consulAvailable: true, + shutdownCh: shutdownCh, + servicesGroups: make(map[ServiceDomain]map[ServiceKey]*consul.AgentServiceRegistration), + checkGroups: make(map[ServiceDomain]map[ServiceKey][]*consul.AgentCheckRegistration), + trackedServices: make(map[consulServiceID]*consul.AgentServiceRegistration), + trackedChecks: make(map[consulCheckID]*consul.AgentCheckRegistration), + checkRunners: make(map[consulCheckID]*CheckRunner), + periodicCallbacks: make(map[string]types.PeriodicCallback), + } + + return &consulSyncer, nil +} + +// SetDelegatedChecks sets the checks that nomad is going to run and report the +// result back to consul +func (c *Syncer) SetDelegatedChecks(delegateChecks map[string]struct{}, createDelegatedCheckFn func(*structs.ServiceCheck, string) (Check, error)) *Syncer { + c.delegateChecks = delegateChecks + c.createDelegatedCheck = createDelegatedCheckFn + return c +} + +// SetAddrFinder sets a function to find the host and port for a Service given its port label +func (c *Syncer) SetAddrFinder(addrFinder func(string) (string, int)) *Syncer { + c.addrFinder = addrFinder + return c +} + +// GenerateServiceKey should be called to generate a serviceKey based on the +// Service. +func GenerateServiceKey(service *structs.Service) ServiceKey { + var key string + numTags := len(service.Tags) + switch numTags { + case 0: + key = fmt.Sprintf("%s", service.Name) + default: + tags := strings.Join(service.Tags, "-") + key = fmt.Sprintf("%s-%s", service.Name, tags) + } + return ServiceKey(key) +} + +// SetServices stores the map of Nomad Services to the provided service +// domain name. +func (c *Syncer) SetServices(domain ServiceDomain, services map[ServiceKey]*structs.Service) error { + var mErr multierror.Error + numServ := len(services) + registeredServices := make(map[ServiceKey]*consul.AgentServiceRegistration, numServ) + registeredChecks := make(map[ServiceKey][]*consul.AgentCheckRegistration, numServ) + for serviceKey, service := range services { + serviceReg, err := c.createService(service, domain, serviceKey) + if err != nil { + mErr.Errors = append(mErr.Errors, err) + continue + } + registeredServices[serviceKey] = serviceReg + + // Register the check(s) for this service + for _, chk := range service.Checks { + // Create a Consul check registration + chkReg, err := c.createCheckReg(chk, serviceReg) + if err != nil { + mErr.Errors = append(mErr.Errors, err) + continue + } + + // creating a nomad check if we have to handle this particular check type + c.registryLock.RLock() + if _, ok := c.delegateChecks[chk.Type]; ok { + _, ok := c.checkRunners[consulCheckID(chkReg.ID)] + c.registryLock.RUnlock() + if ok { + continue + } + + nc, err := c.createDelegatedCheck(chk, chkReg.ID) + if err != nil { + mErr.Errors = append(mErr.Errors, err) + continue + } + + cr := NewCheckRunner(nc, c.runCheck, c.logger) + c.registryLock.Lock() + // TODO type the CheckRunner + c.checkRunners[consulCheckID(nc.ID())] = cr + c.registryLock.Unlock() + } else { + c.registryLock.RUnlock() + } + + registeredChecks[serviceKey] = append(registeredChecks[serviceKey], chkReg) + } + } + + if len(mErr.Errors) > 0 { + return mErr.ErrorOrNil() + } + + c.groupsLock.Lock() + for serviceKey, service := range registeredServices { + serviceKeys, ok := c.servicesGroups[domain] + if !ok { + serviceKeys = make(map[ServiceKey]*consul.AgentServiceRegistration, len(registeredServices)) + c.servicesGroups[domain] = serviceKeys + } + serviceKeys[serviceKey] = service + } + for serviceKey, checks := range registeredChecks { + serviceKeys, ok := c.checkGroups[domain] + if !ok { + serviceKeys = make(map[ServiceKey][]*consul.AgentCheckRegistration, len(registeredChecks)) + c.checkGroups[domain] = serviceKeys + } + serviceKeys[serviceKey] = checks + } + c.groupsLock.Unlock() + + // Sync immediately + c.SyncNow() + + return nil +} + +// SyncNow expires the current timer forcing the list of periodic callbacks +// to be synced immediately. +func (c *Syncer) SyncNow() { + select { + case c.notifySyncCh <- struct{}{}: + default: + } +} + +// flattenedServices returns a flattened list of services that are registered +// locally +func (c *Syncer) flattenedServices() []*consul.AgentServiceRegistration { + const initialNumServices = 8 + services := make([]*consul.AgentServiceRegistration, 0, initialNumServices) + c.groupsLock.RLock() + defer c.groupsLock.RUnlock() + for _, servicesGroup := range c.servicesGroups { + for _, service := range servicesGroup { + services = append(services, service) + } + } + return services +} + +// flattenedChecks returns a flattened list of checks that are registered +// locally +func (c *Syncer) flattenedChecks() []*consul.AgentCheckRegistration { + const initialNumChecks = 8 + checks := make([]*consul.AgentCheckRegistration, 0, initialNumChecks) + c.groupsLock.RLock() + for _, checkGroup := range c.checkGroups { + for _, check := range checkGroup { + checks = append(checks, check...) + } + } + c.groupsLock.RUnlock() + return checks +} + +func (c *Syncer) signalShutdown() { + select { + case c.notifyShutdownCh <- struct{}{}: + default: + } +} + +// Shutdown de-registers the services and checks and shuts down periodic syncing +func (c *Syncer) Shutdown() error { + var mErr multierror.Error + + c.shutdownLock.Lock() + if !c.shutdown { + c.shutdown = true + } + c.shutdownLock.Unlock() + + c.signalShutdown() + + // Stop all the checks that nomad is running + c.registryLock.RLock() + defer c.registryLock.RUnlock() + for _, cr := range c.checkRunners { + cr.Stop() + } + + // De-register all the services from Consul + for serviceID := range c.trackedServices { + convertedID := string(serviceID) + if err := c.client.Agent().ServiceDeregister(convertedID); err != nil { + c.logger.Printf("[WARN] consul.syncer: failed to deregister service ID %+q: %v", convertedID, err) + mErr.Errors = append(mErr.Errors, err) + } + } + return mErr.ErrorOrNil() +} + +// queryChecks queries the Consul Agent for a list of Consul checks that +// have been registered with this Consul Syncer. +func (c *Syncer) queryChecks() (map[consulCheckID]*consul.AgentCheck, error) { + checks, err := c.client.Agent().Checks() + if err != nil { + return nil, err + } + return c.filterConsulChecks(checks), nil +} + +// queryAgentServices queries the Consul Agent for a list of Consul services that +// have been registered with this Consul Syncer. +func (c *Syncer) queryAgentServices() (map[consulServiceID]*consul.AgentService, error) { + services, err := c.client.Agent().Services() + if err != nil { + return nil, err + } + return c.filterConsulServices(services), nil +} + +// syncChecks synchronizes this Syncer's Consul Checks with the Consul Agent. +func (c *Syncer) syncChecks() error { + var mErr multierror.Error + consulChecks, err := c.queryChecks() + if err != nil { + return err + } + + // Synchronize checks with Consul + missingChecks, _, changedChecks, staleChecks := c.calcChecksDiff(consulChecks) + for _, check := range missingChecks { + if err := c.registerCheck(check); err != nil { + mErr.Errors = append(mErr.Errors, err) + } + c.registryLock.Lock() + c.trackedChecks[consulCheckID(check.ID)] = check + c.registryLock.Unlock() + } + for _, check := range changedChecks { + // NOTE(sean@): Do we need to deregister the check before + // re-registering it? Not deregistering to avoid missing the + // TTL but doesn't correct reconcile any possible drift with + // the check. + // + // if err := c.deregisterCheck(check.ID); err != nil { + // mErr.Errors = append(mErr.Errors, err) + // } + if err := c.registerCheck(check); err != nil { + mErr.Errors = append(mErr.Errors, err) + } + } + for _, check := range staleChecks { + if err := c.deregisterCheck(consulCheckID(check.ID)); err != nil { + mErr.Errors = append(mErr.Errors, err) + } + c.registryLock.Lock() + delete(c.trackedChecks, consulCheckID(check.ID)) + c.registryLock.Unlock() + } + return mErr.ErrorOrNil() +} + +// compareConsulCheck takes a consul.AgentCheckRegistration instance and +// compares it with a consul.AgentCheck. Returns true if they are equal +// according to consul.AgentCheck, otherwise false. +func compareConsulCheck(localCheck *consul.AgentCheckRegistration, consulCheck *consul.AgentCheck) bool { + if consulCheck.CheckID != localCheck.ID || + consulCheck.Name != localCheck.Name || + consulCheck.Notes != localCheck.Notes || + consulCheck.ServiceID != localCheck.ServiceID { + return false + } + return true +} + +// calcChecksDiff takes the argument (consulChecks) and calculates the delta +// between the consul.Syncer's list of known checks (c.trackedChecks). Three +// arrays are returned: +// +// 1) a slice of checks that exist only locally in the Syncer and are missing +// from the Consul Agent (consulChecks) and therefore need to be registered. +// +// 2) a slice of checks that exist in both the local consul.Syncer's +// tracked list and Consul Agent (consulChecks). +// +// 3) a slice of checks that exist in both the local consul.Syncer's +// tracked list and Consul Agent (consulServices) but have diverged state. +// +// 4) a slice of checks that exist only in the Consul Agent (consulChecks) +// and should be removed because the Consul Agent has drifted from the +// Syncer. +func (c *Syncer) calcChecksDiff(consulChecks map[consulCheckID]*consul.AgentCheck) ( + missingChecks []*consul.AgentCheckRegistration, + equalChecks []*consul.AgentCheckRegistration, + changedChecks []*consul.AgentCheckRegistration, + staleChecks []*consul.AgentCheckRegistration) { + + type mergedCheck struct { + check *consul.AgentCheckRegistration + // 'l' == Nomad local only + // 'e' == equal + // 'c' == changed + // 'a' == Consul agent only + state byte + } + var ( + localChecksCount = 0 + equalChecksCount = 0 + changedChecksCount = 0 + agentChecks = 0 + ) + c.registryLock.RLock() + localChecks := make(map[string]*mergedCheck, len(c.trackedChecks)+len(consulChecks)) + for _, localCheck := range c.flattenedChecks() { + localChecksCount++ + localChecks[localCheck.ID] = &mergedCheck{localCheck, 'l'} + } + c.registryLock.RUnlock() + for _, consulCheck := range consulChecks { + if localCheck, found := localChecks[consulCheck.CheckID]; found { + localChecksCount-- + if compareConsulCheck(localCheck.check, consulCheck) { + equalChecksCount++ + localChecks[consulCheck.CheckID].state = 'e' + } else { + changedChecksCount++ + localChecks[consulCheck.CheckID].state = 'c' + } + } else { + agentChecks++ + agentCheckReg := &consul.AgentCheckRegistration{ + ID: consulCheck.CheckID, + Name: consulCheck.Name, + Notes: consulCheck.Notes, + ServiceID: consulCheck.ServiceID, + } + localChecks[consulCheck.CheckID] = &mergedCheck{agentCheckReg, 'a'} + } + } + + missingChecks = make([]*consul.AgentCheckRegistration, 0, localChecksCount) + equalChecks = make([]*consul.AgentCheckRegistration, 0, equalChecksCount) + changedChecks = make([]*consul.AgentCheckRegistration, 0, changedChecksCount) + staleChecks = make([]*consul.AgentCheckRegistration, 0, agentChecks) + for _, check := range localChecks { + switch check.state { + case 'l': + missingChecks = append(missingChecks, check.check) + case 'e': + equalChecks = append(equalChecks, check.check) + case 'c': + changedChecks = append(changedChecks, check.check) + case 'a': + staleChecks = append(staleChecks, check.check) + } + } + + return missingChecks, equalChecks, changedChecks, staleChecks +} + +// compareConsulService takes a consul.AgentServiceRegistration instance and +// compares it with a consul.AgentService. Returns true if they are equal +// according to consul.AgentService, otherwise false. +func compareConsulService(localService *consul.AgentServiceRegistration, consulService *consul.AgentService) bool { + if consulService.ID != localService.ID || + consulService.Service != localService.Name || + consulService.Port != localService.Port || + consulService.Address != localService.Address || + consulService.EnableTagOverride != localService.EnableTagOverride { + return false + } + + serviceTags := make(map[string]byte, len(localService.Tags)) + for _, tag := range localService.Tags { + serviceTags[tag] = 'l' + } + for _, tag := range consulService.Tags { + if _, found := serviceTags[tag]; !found { + return false + } + serviceTags[tag] = 'b' + } + for _, state := range serviceTags { + if state == 'l' { + return false + } + } + + return true +} + +// calcServicesDiff takes the argument (consulServices) and calculates the +// delta between the consul.Syncer's list of known services +// (c.trackedServices). Four arrays are returned: +// +// 1) a slice of services that exist only locally in the Syncer and are +// missing from the Consul Agent (consulServices) and therefore need to be +// registered. +// +// 2) a slice of services that exist in both the local consul.Syncer's +// tracked list and Consul Agent (consulServices) *AND* are identical. +// +// 3) a slice of services that exist in both the local consul.Syncer's +// tracked list and Consul Agent (consulServices) but have diverged state. +// +// 4) a slice of services that exist only in the Consul Agent +// (consulServices) and should be removed because the Consul Agent has +// drifted from the Syncer. +func (c *Syncer) calcServicesDiff(consulServices map[consulServiceID]*consul.AgentService) (missingServices []*consul.AgentServiceRegistration, equalServices []*consul.AgentServiceRegistration, changedServices []*consul.AgentServiceRegistration, staleServices []*consul.AgentServiceRegistration) { + type mergedService struct { + service *consul.AgentServiceRegistration + // 'l' == Nomad local only + // 'e' == equal + // 'c' == changed + // 'a' == Consul agent only + state byte + } + var ( + localServicesCount = 0 + equalServicesCount = 0 + changedServicesCount = 0 + agentServices = 0 + ) + c.registryLock.RLock() + localServices := make(map[string]*mergedService, len(c.trackedServices)+len(consulServices)) + c.registryLock.RUnlock() + for _, localService := range c.flattenedServices() { + localServicesCount++ + localServices[localService.ID] = &mergedService{localService, 'l'} + } + for _, consulService := range consulServices { + if localService, found := localServices[consulService.ID]; found { + localServicesCount-- + if compareConsulService(localService.service, consulService) { + equalServicesCount++ + localServices[consulService.ID].state = 'e' + } else { + changedServicesCount++ + localServices[consulService.ID].state = 'c' + } + } else { + agentServices++ + agentServiceReg := &consul.AgentServiceRegistration{ + ID: consulService.ID, + Name: consulService.Service, + Tags: consulService.Tags, + Port: consulService.Port, + Address: consulService.Address, + } + localServices[consulService.ID] = &mergedService{agentServiceReg, 'a'} + } + } + + missingServices = make([]*consul.AgentServiceRegistration, 0, localServicesCount) + equalServices = make([]*consul.AgentServiceRegistration, 0, equalServicesCount) + changedServices = make([]*consul.AgentServiceRegistration, 0, changedServicesCount) + staleServices = make([]*consul.AgentServiceRegistration, 0, agentServices) + for _, service := range localServices { + switch service.state { + case 'l': + missingServices = append(missingServices, service.service) + case 'e': + equalServices = append(equalServices, service.service) + case 'c': + changedServices = append(changedServices, service.service) + case 'a': + staleServices = append(staleServices, service.service) + } + } + + return missingServices, equalServices, changedServices, staleServices +} + +// syncServices synchronizes this Syncer's Consul Services with the Consul +// Agent. +func (c *Syncer) syncServices() error { + consulServices, err := c.queryAgentServices() + if err != nil { + return err + } + + // Synchronize services with Consul + var mErr multierror.Error + missingServices, _, changedServices, removedServices := c.calcServicesDiff(consulServices) + for _, service := range missingServices { + if err := c.client.Agent().ServiceRegister(service); err != nil { + mErr.Errors = append(mErr.Errors, err) + } + c.registryLock.Lock() + c.trackedServices[consulServiceID(service.ID)] = service + c.registryLock.Unlock() + } + for _, service := range changedServices { + // Re-register the local service + if err := c.client.Agent().ServiceRegister(service); err != nil { + mErr.Errors = append(mErr.Errors, err) + } + } + for _, service := range removedServices { + if err := c.deregisterService(service.ID); err != nil { + mErr.Errors = append(mErr.Errors, err) + } + c.registryLock.Lock() + delete(c.trackedServices, consulServiceID(service.ID)) + c.registryLock.Unlock() + } + return mErr.ErrorOrNil() +} + +// registerCheck registers a check definition with Consul +func (c *Syncer) registerCheck(chkReg *consul.AgentCheckRegistration) error { + c.registryLock.RLock() + if cr, ok := c.checkRunners[consulCheckID(chkReg.ID)]; ok { + cr.Start() + } + c.registryLock.RUnlock() + return c.client.Agent().CheckRegister(chkReg) +} + +// createCheckReg creates a Check that can be registered with Nomad. It also +// creates a Nomad check for the check types that it can handle. +func (c *Syncer) createCheckReg(check *structs.ServiceCheck, serviceReg *consul.AgentServiceRegistration) (*consul.AgentCheckRegistration, error) { + chkReg := consul.AgentCheckRegistration{ + ID: check.Hash(serviceReg.ID), + Name: check.Name, + ServiceID: serviceReg.ID, + } + chkReg.Timeout = check.Timeout.String() + chkReg.Interval = check.Interval.String() + host, port := serviceReg.Address, serviceReg.Port + if check.PortLabel != "" { + host, port = c.addrFinder(check.PortLabel) + } + switch check.Type { + case structs.ServiceCheckHTTP: + if check.Protocol == "" { + check.Protocol = "http" + } + base := url.URL{ + Scheme: check.Protocol, + Host: net.JoinHostPort(host, strconv.Itoa(port)), + } + relative, err := url.Parse(check.Path) + if err != nil { + return nil, err + } + url := base.ResolveReference(relative) + chkReg.HTTP = url.String() + case structs.ServiceCheckTCP: + chkReg.TCP = net.JoinHostPort(host, strconv.Itoa(port)) + case structs.ServiceCheckScript: + chkReg.TTL = (check.Interval + ttlCheckBuffer).String() + default: + return nil, fmt.Errorf("check type %+q not valid", check.Type) + } + chkReg.Status = check.InitialStatus + return &chkReg, nil +} + +// generateConsulServiceID takes the domain and service key and returns a Consul +// ServiceID +func generateConsulServiceID(domain ServiceDomain, key ServiceKey) consulServiceID { + return consulServiceID(fmt.Sprintf("%s-%s-%s", nomadServicePrefix, domain, key)) +} + +// createService creates a Consul AgentService from a Nomad ConsulService. +func (c *Syncer) createService(service *structs.Service, domain ServiceDomain, key ServiceKey) (*consul.AgentServiceRegistration, error) { + c.registryLock.RLock() + defer c.registryLock.RUnlock() + + srv := consul.AgentServiceRegistration{ + ID: string(generateConsulServiceID(domain, key)), + Name: service.Name, + Tags: service.Tags, + } + host, port := c.addrFinder(service.PortLabel) + if host != "" { + srv.Address = host + } + + if port != 0 { + srv.Port = port + } + + return &srv, nil +} + +// deregisterService de-registers a service with the given ID from consul +func (c *Syncer) deregisterService(serviceID string) error { + return c.client.Agent().ServiceDeregister(serviceID) +} + +// deregisterCheck de-registers a check from Consul +func (c *Syncer) deregisterCheck(id consulCheckID) error { + c.registryLock.Lock() + defer c.registryLock.Unlock() + + // Deleting from Consul Agent + if err := c.client.Agent().CheckDeregister(string(id)); err != nil { + // CheckDeregister() will be reattempted again in a future + // sync. + return err + } + + // Remove the check from the local registry + if cr, ok := c.checkRunners[id]; ok { + cr.Stop() + delete(c.checkRunners, id) + } + + return nil +} + +// Run triggers periodic syncing of services and checks with Consul. This is +// a long lived go-routine which is stopped during shutdown. +func (c *Syncer) Run() { + sync := time.NewTimer(0) + for { + select { + case <-sync.C: + d := syncInterval - lib.RandomStagger(syncInterval/syncJitter) + sync.Reset(d) + + if err := c.SyncServices(); err != nil { + if c.consulAvailable { + c.logger.Printf("[DEBUG] consul.syncer: error in syncing: %v", err) + } + c.consulAvailable = false + } else { + if !c.consulAvailable { + c.logger.Printf("[DEBUG] consul.syncer: syncs succesful") + } + c.consulAvailable = true + } + case <-c.notifySyncCh: + sync.Reset(syncInterval) + case <-c.shutdownCh: + c.Shutdown() + case <-c.notifyShutdownCh: + sync.Stop() + c.logger.Printf("[INFO] consul.syncer: shutting down syncer ") + return + } + } +} + +// RunHandlers executes each handler (randomly) +func (c *Syncer) RunHandlers() error { + c.periodicLock.RLock() + handlers := make(map[string]types.PeriodicCallback, len(c.periodicCallbacks)) + for name, fn := range c.periodicCallbacks { + handlers[name] = fn + } + c.periodicLock.RUnlock() + + var mErr multierror.Error + for _, fn := range handlers { + if err := fn(); err != nil { + mErr.Errors = append(mErr.Errors, err) + } + } + return mErr.ErrorOrNil() +} + +// SyncServices sync the services with the Consul Agent +func (c *Syncer) SyncServices() error { + var mErr multierror.Error + if err := c.syncServices(); err != nil { + mErr.Errors = append(mErr.Errors, err) + } + if err := c.syncChecks(); err != nil { + mErr.Errors = append(mErr.Errors, err) + } + if err := c.RunHandlers(); err != nil { + return err + } + return mErr.ErrorOrNil() +} + +// filterConsulServices prunes out all the service who were not registered with +// the syncer +func (c *Syncer) filterConsulServices(consulServices map[string]*consul.AgentService) map[consulServiceID]*consul.AgentService { + localServices := make(map[consulServiceID]*consul.AgentService, len(consulServices)) + c.registryLock.RLock() + defer c.registryLock.RUnlock() + for serviceID, service := range consulServices { + for domain := range c.servicesGroups { + if strings.HasPrefix(service.ID, fmt.Sprintf("%s-%s", nomadServicePrefix, domain)) { + localServices[consulServiceID(serviceID)] = service + break + } + } + } + return localServices +} + +// filterConsulChecks prunes out all the consul checks which do not have +// services with Syncer's idPrefix. +func (c *Syncer) filterConsulChecks(consulChecks map[string]*consul.AgentCheck) map[consulCheckID]*consul.AgentCheck { + localChecks := make(map[consulCheckID]*consul.AgentCheck, len(consulChecks)) + c.registryLock.RLock() + defer c.registryLock.RUnlock() + for checkID, check := range consulChecks { + for domain := range c.checkGroups { + if strings.HasPrefix(check.ServiceID, fmt.Sprintf("%s-%s", nomadServicePrefix, domain)) { + localChecks[consulCheckID(checkID)] = check + break + } + } + } + return localChecks +} + +// consulPresent indicates whether the Consul Agent is responding +func (c *Syncer) consulPresent() bool { + _, err := c.client.Agent().Self() + return err == nil +} + +// runCheck runs a check and updates the corresponding ttl check in consul +func (c *Syncer) runCheck(check Check) { + res := check.Run() + if res.Duration >= check.Timeout() { + c.logger.Printf("[DEBUG] consul.syncer: check took time: %v, timeout: %v", res.Duration, check.Timeout()) + } + state := consul.HealthCritical + output := res.Output + switch res.ExitCode { + case 0: + state = consul.HealthPassing + case 1: + state = consul.HealthWarning + default: + state = consul.HealthCritical + } + if res.Err != nil { + state = consul.HealthCritical + output = res.Err.Error() + } + if err := c.client.Agent().UpdateTTL(check.ID(), output, state); err != nil { + if c.consulAvailable { + c.logger.Printf("[DEBUG] consul.syncer: check %+q failed, disabling Consul checks until until next successful sync: %v", check.ID(), err) + c.consulAvailable = false + } else { + c.consulAvailable = true + } + } +} + +// ReapUnmatched prunes all services that do not exist in the passed domains +func (c *Syncer) ReapUnmatched(domains []ServiceDomain) error { + servicesInConsul, err := c.ConsulClient().Agent().Services() + if err != nil { + return err + } + + var mErr multierror.Error + for serviceID := range servicesInConsul { + // Skip any service that was not registered by Nomad + if !strings.HasPrefix(serviceID, nomadServicePrefix) { + continue + } + + // Filter services that do not exist in the desired domains + match := false + for _, domain := range domains { + // Include the hyphen so it is explicit to that domain otherwise it + // maybe a subset match + desired := fmt.Sprintf("%s-%s-", nomadServicePrefix, domain) + if strings.HasPrefix(serviceID, desired) { + match = true + break + } + } + + if !match { + if err := c.deregisterService(serviceID); err != nil { + mErr.Errors = append(mErr.Errors, err) + } + } + } + + return mErr.ErrorOrNil() +} + +// AddPeriodicHandler adds a uniquely named callback. Returns true if +// successful, false if a handler with the same name already exists. +func (c *Syncer) AddPeriodicHandler(name string, fn types.PeriodicCallback) bool { + c.periodicLock.Lock() + defer c.periodicLock.Unlock() + if _, found := c.periodicCallbacks[name]; found { + c.logger.Printf("[ERROR] consul.syncer: failed adding handler %+q", name) + return false + } + c.periodicCallbacks[name] = fn + return true +} + +// NumHandlers returns the number of callbacks registered with the syncer +func (c *Syncer) NumHandlers() int { + c.periodicLock.RLock() + defer c.periodicLock.RUnlock() + return len(c.periodicCallbacks) +} + +// RemovePeriodicHandler removes a handler with a given name. +func (c *Syncer) RemovePeriodicHandler(name string) { + c.periodicLock.Lock() + defer c.periodicLock.Unlock() + delete(c.periodicCallbacks, name) +} + +// ConsulClient returns the Consul client used by the Syncer. +func (c *Syncer) ConsulClient() *consul.Client { + return c.client +} diff --git a/vendor/github.com/hashicorp/nomad/helper/discover/discover.go b/vendor/github.com/hashicorp/nomad/helper/discover/discover.go new file mode 100644 index 000000000..8582a0133 --- /dev/null +++ b/vendor/github.com/hashicorp/nomad/helper/discover/discover.go @@ -0,0 +1,60 @@ +package discover + +import ( + "fmt" + "os" + "os/exec" + "path/filepath" + "runtime" + + "github.com/kardianos/osext" +) + +// Checks the current executable, then $GOPATH/bin, and finally the CWD, in that +// order. If it can't be found, an error is returned. +func NomadExecutable() (string, error) { + nomadExe := "nomad" + if runtime.GOOS == "windows" { + nomadExe = "nomad.exe" + } + + // Check the current executable. + bin, err := osext.Executable() + if err != nil { + return "", fmt.Errorf("Failed to determine the nomad executable: %v", err) + } + + if filepath.Base(bin) == nomadExe { + return bin, nil + } + + // Check the $PATH + if bin, err := exec.LookPath(nomadExe); err == nil { + return bin, nil + } + + // Check the $GOPATH. + bin = filepath.Join(os.Getenv("GOPATH"), "bin", nomadExe) + if _, err := os.Stat(bin); err == nil { + return bin, nil + } + + // Check the CWD. + pwd, err := os.Getwd() + if err != nil { + return "", fmt.Errorf("Could not find Nomad executable (%v): %v", nomadExe, err) + } + + bin = filepath.Join(pwd, nomadExe) + if _, err := os.Stat(bin); err == nil { + return bin, nil + } + + // Check CWD/bin + bin = filepath.Join(pwd, "bin", nomadExe) + if _, err := os.Stat(bin); err == nil { + return bin, nil + } + + return "", fmt.Errorf("Could not find Nomad executable (%v)", nomadExe) +} diff --git a/vendor/github.com/hashicorp/nomad/helper/fields/data.go b/vendor/github.com/hashicorp/nomad/helper/fields/data.go new file mode 100644 index 000000000..fb22bbc59 --- /dev/null +++ b/vendor/github.com/hashicorp/nomad/helper/fields/data.go @@ -0,0 +1,169 @@ +package fields + +import ( + "fmt" + + "github.com/hashicorp/go-multierror" + "github.com/mitchellh/mapstructure" +) + +// FieldData contains the raw data and the schema that the data should adhere to +type FieldData struct { + Raw map[string]interface{} + Schema map[string]*FieldSchema +} + +// Validate cycles through the raw data and validates conversions in the schema. +// It also checks for the existence and value of required fields. +func (d *FieldData) Validate() error { + var result *multierror.Error + + // Scan for missing required fields + for field, schema := range d.Schema { + if schema.Required { + _, ok := d.Raw[field] + if !ok { + result = multierror.Append(result, fmt.Errorf( + "field %q is required", field)) + } + } + } + + // Validate field type and value + for field, value := range d.Raw { + schema, ok := d.Schema[field] + if !ok { + result = multierror.Append(result, fmt.Errorf( + "%q is an invalid field", field)) + continue + } + + switch schema.Type { + case TypeBool, TypeInt, TypeMap, TypeArray, TypeString: + val, _, err := d.getPrimitive(field, schema) + if err != nil { + result = multierror.Append(result, fmt.Errorf( + "field %q with input %q doesn't seem to be of type %s", + field, value, schema.Type)) + } + // Check that we don't have an empty value for required fields + if schema.Required && val == schema.Type.Zero() { + result = multierror.Append(result, fmt.Errorf( + "field %q is required, but no value was found", field)) + } + default: + result = multierror.Append(result, fmt.Errorf( + "unknown field type %s for field %s", schema.Type, field)) + } + } + + return result.ErrorOrNil() +} + +// Get gets the value for the given field. If the key is an invalid field, +// FieldData will panic. If you want a safer version of this method, use +// GetOk. If the field k is not set, the default value (if set) will be +// returned, otherwise the zero value will be returned. +func (d *FieldData) Get(k string) interface{} { + schema, ok := d.Schema[k] + if !ok { + panic(fmt.Sprintf("field %s not in the schema", k)) + } + + value, ok := d.GetOk(k) + if !ok { + value = schema.DefaultOrZero() + } + + return value +} + +// GetOk gets the value for the given field. The second return value +// will be false if the key is invalid or the key is not set at all. +func (d *FieldData) GetOk(k string) (interface{}, bool) { + schema, ok := d.Schema[k] + if !ok { + return nil, false + } + + result, ok, err := d.GetOkErr(k) + if err != nil { + panic(fmt.Sprintf("error reading %s: %s", k, err)) + } + + if ok && result == nil { + result = schema.DefaultOrZero() + } + + return result, ok +} + +// GetOkErr is the most conservative of all the Get methods. It returns +// whether key is set or not, but also an error value. The error value is +// non-nil if the field doesn't exist or there was an error parsing the +// field value. +func (d *FieldData) GetOkErr(k string) (interface{}, bool, error) { + schema, ok := d.Schema[k] + if !ok { + return nil, false, fmt.Errorf("unknown field: %s", k) + } + + switch schema.Type { + case TypeBool, TypeInt, TypeMap, TypeArray, TypeString: + return d.getPrimitive(k, schema) + default: + return nil, false, + fmt.Errorf("unknown field type %s for field %s", schema.Type, k) + } +} + +// getPrimitive tries to convert the raw value of a field to its data type as +// defined in the schema. It does strict type checking, so the value will need +// to be able to convert to the appropriate type directly. +func (d *FieldData) getPrimitive( + k string, schema *FieldSchema) (interface{}, bool, error) { + raw, ok := d.Raw[k] + if !ok { + return nil, false, nil + } + + switch schema.Type { + case TypeBool: + var result bool + if err := mapstructure.Decode(raw, &result); err != nil { + return nil, true, err + } + return result, true, nil + + case TypeInt: + var result int + if err := mapstructure.Decode(raw, &result); err != nil { + return nil, true, err + } + return result, true, nil + + case TypeString: + var result string + if err := mapstructure.Decode(raw, &result); err != nil { + return nil, true, err + } + return result, true, nil + + case TypeMap: + var result map[string]interface{} + if err := mapstructure.Decode(raw, &result); err != nil { + return nil, true, err + } + return result, true, nil + + case TypeArray: + var result []interface{} + if err := mapstructure.Decode(raw, &result); err != nil { + return nil, true, err + } + return result, true, nil + + default: + panic(fmt.Sprintf("Unknown type: %s", schema.Type)) + } +} diff --git a/vendor/github.com/hashicorp/nomad/helper/fields/schema.go b/vendor/github.com/hashicorp/nomad/helper/fields/schema.go new file mode 100644 index 000000000..f57a97685 --- /dev/null +++ b/vendor/github.com/hashicorp/nomad/helper/fields/schema.go @@ -0,0 +1,19 @@ +package fields + +// FieldSchema is a basic schema to describe the format of a configuration field +type FieldSchema struct { + Type FieldType + Default interface{} + Description string + Required bool +} + +// DefaultOrZero returns the default value if it is set, or otherwise +// the zero value of the type. +func (s *FieldSchema) DefaultOrZero() interface{} { + if s.Default != nil { + return s.Default + } + + return s.Type.Zero() +} diff --git a/vendor/github.com/hashicorp/nomad/helper/fields/type.go b/vendor/github.com/hashicorp/nomad/helper/fields/type.go new file mode 100644 index 000000000..dced1b186 --- /dev/null +++ b/vendor/github.com/hashicorp/nomad/helper/fields/type.go @@ -0,0 +1,47 @@ +package fields + +// FieldType is the enum of types that a field can be. +type FieldType uint + +const ( + TypeInvalid FieldType = 0 + TypeString FieldType = iota + TypeInt + TypeBool + TypeMap + TypeArray +) + +func (t FieldType) String() string { + switch t { + case TypeString: + return "string" + case TypeInt: + return "integer" + case TypeBool: + return "boolean" + case TypeMap: + return "map" + case TypeArray: + return "array" + default: + return "unknown type" + } +} + +func (t FieldType) Zero() interface{} { + switch t { + case TypeString: + return "" + case TypeInt: + return 0 + case TypeBool: + return false + case TypeMap: + return map[string]interface{}{} + case TypeArray: + return []interface{}{} + default: + panic("unknown type: " + t.String()) + } +} diff --git a/vendor/github.com/hashicorp/nomad/helper/stats/cpu.go b/vendor/github.com/hashicorp/nomad/helper/stats/cpu.go new file mode 100644 index 000000000..9c0cd72d8 --- /dev/null +++ b/vendor/github.com/hashicorp/nomad/helper/stats/cpu.go @@ -0,0 +1,67 @@ +package stats + +import ( + "fmt" + "math" + "sync" + + "github.com/shirou/gopsutil/cpu" +) + +var ( + cpuMhzPerCore float64 + cpuModelName string + cpuNumCores int + cpuTotalTicks float64 + + onceLer sync.Once +) + +func Init() error { + var err error + onceLer.Do(func() { + if cpuNumCores, err = cpu.Counts(true); err != nil { + err = fmt.Errorf("Unable to determine the number of CPU cores available: %v", err) + return + } + + var cpuInfo []cpu.InfoStat + if cpuInfo, err = cpu.Info(); err != nil { + err = fmt.Errorf("Unable to obtain CPU information: %v", err) + return + } + + for _, cpu := range cpuInfo { + cpuModelName = cpu.ModelName + cpuMhzPerCore = cpu.Mhz + break + } + + // Floor all of the values such that small difference don't cause the + // node to fall into a unique computed node class + cpuMhzPerCore = math.Floor(cpuMhzPerCore) + cpuTotalTicks = math.Floor(float64(cpuNumCores) * cpuMhzPerCore) + }) + return err +} + +// CPUModelName returns the number of CPU cores available +func CPUNumCores() int { + return cpuNumCores +} + +// CPUMHzPerCore returns the MHz per CPU core +func CPUMHzPerCore() float64 { + return cpuMhzPerCore +} + +// CPUModelName returns the model name of the CPU +func CPUModelName() string { + return cpuModelName +} + +// TotalTicksAvailable calculates the total frequency available across all +// cores +func TotalTicksAvailable() float64 { + return cpuTotalTicks +} diff --git a/vendor/github.com/hashicorp/nomad/jobspec/parse.go b/vendor/github.com/hashicorp/nomad/jobspec/parse.go index 00b727822..f9defa7cf 100644 --- a/vendor/github.com/hashicorp/nomad/jobspec/parse.go +++ b/vendor/github.com/hashicorp/nomad/jobspec/parse.go @@ -13,6 +13,7 @@ import ( "github.com/hashicorp/go-multierror" "github.com/hashicorp/hcl" "github.com/hashicorp/hcl/hcl/ast" + "github.com/hashicorp/nomad/client/driver" "github.com/hashicorp/nomad/nomad/structs" "github.com/mitchellh/mapstructure" ) @@ -630,6 +631,22 @@ func parseTasks(jobName string, taskGroupName string, result *[]*structs.Task, l return err } } + + // Instantiate a driver to validate the configuration + d, err := driver.NewDriver( + t.Driver, + driver.NewEmptyDriverContext(), + ) + + if err != nil { + return multierror.Prefix(err, + fmt.Sprintf("'%s', config ->", n)) + } + + if err := d.Validate(t.Config); err != nil { + return multierror.Prefix(err, + fmt.Sprintf("'%s', config ->", n)) + } } // Parse constraints diff --git a/vendor/github.com/hashicorp/nomad/nomad/structs/config/README.md b/vendor/github.com/hashicorp/nomad/nomad/structs/config/README.md new file mode 100644 index 000000000..c75016932 --- /dev/null +++ b/vendor/github.com/hashicorp/nomad/nomad/structs/config/README.md @@ -0,0 +1,7 @@ +# Overview + +`nomad/structs/config` is a package for configuration `struct`s that are +shared among packages that needs the same `struct` definitions, but can't +import each other without creating a cyle. This `config` package must be +terminal in the import graph (or very close to terminal in the dependency +graph). diff --git a/vendor/github.com/hashicorp/nomad/nomad/structs/config/consul.go b/vendor/github.com/hashicorp/nomad/nomad/structs/config/consul.go new file mode 100644 index 000000000..7f673a5c4 --- /dev/null +++ b/vendor/github.com/hashicorp/nomad/nomad/structs/config/consul.go @@ -0,0 +1,190 @@ +package config + +import ( + "crypto/tls" + "fmt" + "net/http" + "strings" + "time" + + consul "github.com/hashicorp/consul/api" +) + +// ConsulConfig contains the configuration information necessary to +// communicate with a Consul Agent in order to: +// +// - Register services and their checks with Consul +// +// - Bootstrap this Nomad Client with the list of Nomad Servers registered +// with Consul +// +// Both the Agent and the executor need to be able to import ConsulConfig. +type ConsulConfig struct { + // ServerServiceName is the name of the service that Nomad uses to register + // servers with Consul + ServerServiceName string `mapstructure:"server_service_name"` + + // ClientServiceName is the name of the service that Nomad uses to register + // clients with Consul + ClientServiceName string `mapstructure:"client_service_name"` + + // AutoAdvertise determines if this Nomad Agent will advertise its + // services via Consul. When true, Nomad Agent will register + // services with Consul. + AutoAdvertise bool `mapstructure:"auto_advertise"` + + // Addr is the address of the local Consul agent + Addr string `mapstructure:"address"` + + // Timeout is used by Consul HTTP Client + Timeout time.Duration `mapstructure:"timeout"` + + // Token is used to provide a per-request ACL token. This options overrides + // the agent's default token + Token string `mapstructure:"token"` + + // Auth is the information to use for http access to Consul agent + Auth string `mapstructure:"auth"` + + // EnableSSL sets the transport scheme to talk to the Consul agent as https + EnableSSL bool `mapstructure:"ssl"` + + // VerifySSL enables or disables SSL verification when the transport scheme + // for the consul api client is https + VerifySSL bool `mapstructure:"verify_ssl"` + + // CAFile is the path to the ca certificate used for Consul communication + CAFile string `mapstructure:"ca_file"` + + // CertFile is the path to the certificate for Consul communication + CertFile string `mapstructure:"cert_file"` + + // KeyFile is the path to the private key for Consul communication + KeyFile string `mapstructure:"key_file"` + + // ServerAutoJoin enables Nomad servers to find peers by querying Consul and + // joining them + ServerAutoJoin bool `mapstructure:"server_auto_join"` + + // ClientAutoJoin enables Nomad servers to find addresses of Nomad servers + // and register with them + ClientAutoJoin bool `mapstructure:"client_auto_join"` +} + +// DefaultConsulConfig() returns the canonical defaults for the Nomad +// `consul` configuration. +func DefaultConsulConfig() *ConsulConfig { + return &ConsulConfig{ + ServerServiceName: "nomad", + ClientServiceName: "nomad-client", + AutoAdvertise: true, + ServerAutoJoin: true, + ClientAutoJoin: true, + Timeout: 5 * time.Second, + } +} + +// Merge merges two Consul Configurations together. +func (a *ConsulConfig) Merge(b *ConsulConfig) *ConsulConfig { + result := *a + + if b.ServerServiceName != "" { + result.ServerServiceName = b.ServerServiceName + } + if b.ClientServiceName != "" { + result.ClientServiceName = b.ClientServiceName + } + if b.AutoAdvertise { + result.AutoAdvertise = true + } + if b.Addr != "" { + result.Addr = b.Addr + } + if b.Timeout != 0 { + result.Timeout = b.Timeout + } + if b.Token != "" { + result.Token = b.Token + } + if b.Auth != "" { + result.Auth = b.Auth + } + if b.EnableSSL { + result.EnableSSL = true + } + if b.VerifySSL { + result.VerifySSL = true + } + if b.CAFile != "" { + result.CAFile = b.CAFile + } + if b.CertFile != "" { + result.CertFile = b.CertFile + } + if b.KeyFile != "" { + result.KeyFile = b.KeyFile + } + if b.ServerAutoJoin { + result.ServerAutoJoin = true + } + if b.ClientAutoJoin { + result.ClientAutoJoin = true + } + return &result +} + +// ApiConfig() returns a usable Consul config that can be passed directly to +// hashicorp/consul/api. NOTE: datacenter is not set +func (c *ConsulConfig) ApiConfig() (*consul.Config, error) { + config := consul.DefaultConfig() + if c.Addr != "" { + config.Address = c.Addr + } + if c.Token != "" { + config.Token = c.Token + } + if c.Timeout != 0 { + config.HttpClient.Timeout = c.Timeout + } + if c.Auth != "" { + var username, password string + if strings.Contains(c.Auth, ":") { + split := strings.SplitN(c.Auth, ":", 2) + username = split[0] + password = split[1] + } else { + username = c.Auth + } + + config.HttpAuth = &consul.HttpBasicAuth{ + Username: username, + Password: password, + } + } + if c.EnableSSL { + config.Scheme = "https" + tlsConfig := consul.TLSConfig{ + Address: config.Address, + CAFile: c.CAFile, + CertFile: c.CertFile, + KeyFile: c.KeyFile, + InsecureSkipVerify: !c.VerifySSL, + } + tlsClientCfg, err := consul.SetupTLSConfig(&tlsConfig) + if err != nil { + return nil, fmt.Errorf("error creating tls client config for consul: %v", err) + } + config.HttpClient.Transport = &http.Transport{ + TLSClientConfig: tlsClientCfg, + } + } + if c.EnableSSL && !c.VerifySSL { + config.HttpClient.Transport = &http.Transport{ + TLSClientConfig: &tls.Config{ + InsecureSkipVerify: true, + }, + } + } + + return config, nil +} diff --git a/vendor/github.com/hashicorp/nomad/nomad/types/types.go b/vendor/github.com/hashicorp/nomad/nomad/types/types.go new file mode 100644 index 000000000..2a05ddbb3 --- /dev/null +++ b/vendor/github.com/hashicorp/nomad/nomad/types/types.go @@ -0,0 +1,3 @@ +package types + +type PeriodicCallback func() error diff --git a/vendor/github.com/hashicorp/yamux/.gitignore b/vendor/github.com/hashicorp/yamux/.gitignore deleted file mode 100644 index 836562412..000000000 --- a/vendor/github.com/hashicorp/yamux/.gitignore +++ /dev/null @@ -1,23 +0,0 @@ -# Compiled Object files, Static and Dynamic libs (Shared Objects) -*.o -*.a -*.so - -# Folders -_obj -_test - -# Architecture specific extensions/prefixes -*.[568vq] -[568vq].out - -*.cgo1.go -*.cgo2.c -_cgo_defun.c -_cgo_gotypes.go -_cgo_export.* - -_testmain.go - -*.exe -*.test diff --git a/vendor/github.com/hmrc/vmware-govcd/.editorconfig b/vendor/github.com/hmrc/vmware-govcd/.editorconfig deleted file mode 100644 index 3152da69a..000000000 --- a/vendor/github.com/hmrc/vmware-govcd/.editorconfig +++ /dev/null @@ -1,26 +0,0 @@ -# top-most EditorConfig file -root = true - -# Unix-style newlines with a newline ending every file -[*] -end_of_line = lf -insert_final_newline = true -indent_style = space -indent_size = 2 -trim_trailing_whitespace = true - -# Set default charset -[*.{js,py,go,scala,rb,java,html,css,less,sass,md}] -charset = utf-8 - -# Tab indentation (no size specified) -[*.go] -indent_style = tab - -[*.md] -trim_trailing_whitespace = false - -# Matches the exact files either package.json or .travis.yml -[{package.json,.travis.yml}] -indent_style = space -indent_size = 2 diff --git a/vendor/github.com/hmrc/vmware-govcd/.gitignore b/vendor/github.com/hmrc/vmware-govcd/.gitignore deleted file mode 100644 index 975162838..000000000 --- a/vendor/github.com/hmrc/vmware-govcd/.gitignore +++ /dev/null @@ -1,25 +0,0 @@ -# Compiled Object files, Static and Dynamic libs (Shared Objects) -*.o -*.a -*.so - -# Folders -_obj -_test -.cover - -# Architecture specific extensions/prefixes -*.[568vq] -[568vq].out - -*.cgo1.go -*.cgo2.c -_cgo_defun.c -_cgo_gotypes.go -_cgo_export.* - -_testmain.go - -*.exe -*.test -*.prof \ No newline at end of file diff --git a/vendor/github.com/hmrc/vmware-govcd/.travis.yml b/vendor/github.com/hmrc/vmware-govcd/.travis.yml deleted file mode 100644 index d179553b3..000000000 --- a/vendor/github.com/hmrc/vmware-govcd/.travis.yml +++ /dev/null @@ -1,16 +0,0 @@ -before_install: - - sudo apt-get update -qq - - sudo apt-get install -y figlet cowsay -sudo: required -language: go -install: -- go get -t ./... -- go get golang.org/x/tools/cmd/cover -- go get github.com/mattn/goveralls -script: -- PATH="$HOME/gopath/bin:$PATH" -- script/coverage --coveralls -after_success: -- figlet "Build Successful!" | /usr/games/cowsay -n -e "^^" -after_failure: -- figlet "Build Failed!" | /usr/games/cowsay -n -s diff --git a/vendor/github.com/imdario/mergo/.travis.yml b/vendor/github.com/imdario/mergo/.travis.yml deleted file mode 100644 index 9d91c6339..000000000 --- a/vendor/github.com/imdario/mergo/.travis.yml +++ /dev/null @@ -1,2 +0,0 @@ -language: go -install: go get -t diff --git a/vendor/github.com/influxdata/influxdb/client/influxdb.go b/vendor/github.com/influxdata/influxdb/client/influxdb.go index b82393290..2f62aacb8 100644 --- a/vendor/github.com/influxdata/influxdb/client/influxdb.go +++ b/vendor/github.com/influxdata/influxdb/client/influxdb.go @@ -1,4 +1,4 @@ -package client +package client // import "github.com/influxdata/influxdb/client" import ( "bytes" diff --git a/vendor/github.com/influxdata/influxdb/models/points.go b/vendor/github.com/influxdata/influxdb/models/points.go index f9ff203f0..cde7391bc 100644 --- a/vendor/github.com/influxdata/influxdb/models/points.go +++ b/vendor/github.com/influxdata/influxdb/models/points.go @@ -1,4 +1,4 @@ -package models +package models // import "github.com/influxdata/influxdb/models" import ( "bytes" diff --git a/vendor/github.com/influxdata/influxdb/pkg/escape/bytes.go b/vendor/github.com/influxdata/influxdb/pkg/escape/bytes.go index 15e9cf29d..bd0262b21 100644 --- a/vendor/github.com/influxdata/influxdb/pkg/escape/bytes.go +++ b/vendor/github.com/influxdata/influxdb/pkg/escape/bytes.go @@ -1,4 +1,4 @@ -package escape +package escape // import "github.com/influxdata/influxdb/pkg/escape" import "bytes" diff --git a/vendor/github.com/jmespath/go-jmespath/.gitignore b/vendor/github.com/jmespath/go-jmespath/.gitignore deleted file mode 100644 index 531fcc11c..000000000 --- a/vendor/github.com/jmespath/go-jmespath/.gitignore +++ /dev/null @@ -1,4 +0,0 @@ -jpgo -jmespath-fuzz.zip -cpu.out -go-jmespath.test diff --git a/vendor/github.com/jmespath/go-jmespath/.travis.yml b/vendor/github.com/jmespath/go-jmespath/.travis.yml deleted file mode 100644 index 1f9807757..000000000 --- a/vendor/github.com/jmespath/go-jmespath/.travis.yml +++ /dev/null @@ -1,9 +0,0 @@ -language: go - -sudo: false - -go: - - 1.4 - -install: go get -v -t ./... -script: make test diff --git a/vendor/github.com/joyent/gocommon/.gitignore b/vendor/github.com/joyent/gocommon/.gitignore deleted file mode 100644 index 8fde1319c..000000000 --- a/vendor/github.com/joyent/gocommon/.gitignore +++ /dev/null @@ -1,26 +0,0 @@ -# Compiled Object files, Static and Dynamic libs (Shared Objects) -*.o -*.a -*.so - -# Folders -_obj -_test - -# Architecture specific extensions/prefixes -*.[568vq] -[568vq].out - -*.cgo1.go -*.cgo2.c -_cgo_defun.c -_cgo_gotypes.go -_cgo_export.* - -_testmain.go - -*.exe - -# IntelliJ files -.idea -*.iml \ No newline at end of file diff --git a/vendor/github.com/jtopjian/cobblerclient/.gitignore b/vendor/github.com/jtopjian/cobblerclient/.gitignore deleted file mode 100644 index ead84456e..000000000 --- a/vendor/github.com/jtopjian/cobblerclient/.gitignore +++ /dev/null @@ -1 +0,0 @@ -**/*.swp diff --git a/vendor/github.com/kardianos/osext/osext.go b/vendor/github.com/kardianos/osext/osext.go index c0de8b7f5..17f380f0e 100644 --- a/vendor/github.com/kardianos/osext/osext.go +++ b/vendor/github.com/kardianos/osext/osext.go @@ -3,7 +3,7 @@ // license that can be found in the LICENSE file. // Extensions to the standard "os" package. -package osext +package osext // import "github.com/kardianos/osext" import "path/filepath" diff --git a/vendor/github.com/lib/pq/README.md b/vendor/github.com/lib/pq/README.md new file mode 100644 index 000000000..5eb9e1445 --- /dev/null +++ b/vendor/github.com/lib/pq/README.md @@ -0,0 +1,105 @@ +# pq - A pure Go postgres driver for Go's database/sql package + +[![Build Status](https://travis-ci.org/lib/pq.png?branch=master)](https://travis-ci.org/lib/pq) + +## Install + + go get github.com/lib/pq + +## Docs + +For detailed documentation and basic usage examples, please see the package +documentation at . + +## Tests + +`go test` is used for testing. A running PostgreSQL server is +required, with the ability to log in. The default database to connect +to test with is "pqgotest," but it can be overridden using environment +variables. + +Example: + + PGHOST=/run/postgresql go test github.com/lib/pq + +Optionally, a benchmark suite can be run as part of the tests: + + PGHOST=/run/postgresql go test -bench . + +## Features + +* SSL +* Handles bad connections for `database/sql` +* Scan `time.Time` correctly (i.e. `timestamp[tz]`, `time[tz]`, `date`) +* Scan binary blobs correctly (i.e. `bytea`) +* Package for `hstore` support +* COPY FROM support +* pq.ParseURL for converting urls to connection strings for sql.Open. +* Many libpq compatible environment variables +* Unix socket support +* Notifications: `LISTEN`/`NOTIFY` +* pgpass support + +## Future / Things you can help with + +* Better COPY FROM / COPY TO (see discussion in #181) + +## Thank you (alphabetical) + +Some of these contributors are from the original library `bmizerany/pq.go` whose +code still exists in here. + +* Andy Balholm (andybalholm) +* Ben Berkert (benburkert) +* Benjamin Heatwole (bheatwole) +* Bill Mill (llimllib) +* Bjørn Madsen (aeons) +* Blake Gentry (bgentry) +* Brad Fitzpatrick (bradfitz) +* Charlie Melbye (cmelbye) +* Chris Bandy (cbandy) +* Chris Gilling (cgilling) +* Chris Walsh (cwds) +* Dan Sosedoff (sosedoff) +* Daniel Farina (fdr) +* Eric Chlebek (echlebek) +* Eric Garrido (minusnine) +* Eric Urban (hydrogen18) +* Everyone at The Go Team +* Evan Shaw (edsrzf) +* Ewan Chou (coocood) +* Fazal Majid (fazalmajid) +* Federico Romero (federomero) +* Fumin (fumin) +* Gary Burd (garyburd) +* Heroku (heroku) +* James Pozdena (jpoz) +* Jason McVetta (jmcvetta) +* Jeremy Jay (pbnjay) +* Joakim Sernbrant (serbaut) +* John Gallagher (jgallagher) +* Jonathan Rudenberg (titanous) +* Joël Stemmer (jstemmer) +* Kamil Kisiel (kisielk) +* Kelly Dunn (kellydunn) +* Keith Rarick (kr) +* Kir Shatrov (kirs) +* Lann Martin (lann) +* Maciek Sakrejda (uhoh-itsmaciek) +* Marc Brinkmann (mbr) +* Marko Tiikkaja (johto) +* Matt Newberry (MattNewberry) +* Matt Robenolt (mattrobenolt) +* Martin Olsen (martinolsen) +* Mike Lewis (mikelikespie) +* Nicolas Patry (Narsil) +* Oliver Tonnhofer (olt) +* Patrick Hayes (phayes) +* Paul Hammond (paulhammond) +* Ryan Smith (ryandotsmith) +* Samuel Stauffer (samuel) +* Timothée Peignier (cyberdelia) +* Travis Cline (tmc) +* TruongSinh Tran-Nguyen (truongsinh) +* Yaismel Miranda (ympons) +* notedit (notedit) diff --git a/vendor/github.com/macaron-contrib/session/.gitignore b/vendor/github.com/macaron-contrib/session/.gitignore deleted file mode 100644 index 9297dbcd7..000000000 --- a/vendor/github.com/macaron-contrib/session/.gitignore +++ /dev/null @@ -1,2 +0,0 @@ -ledis/tmp.db -nodb/tmp.db \ No newline at end of file diff --git a/vendor/github.com/masterzen/simplexml/dom/document.go b/vendor/github.com/masterzen/simplexml/dom/document.go index d73693c3f..e871c35a3 100644 --- a/vendor/github.com/masterzen/simplexml/dom/document.go +++ b/vendor/github.com/masterzen/simplexml/dom/document.go @@ -32,4 +32,4 @@ func (doc *Document) String() string { } return string(b.Bytes()) -} +} \ No newline at end of file diff --git a/vendor/github.com/masterzen/simplexml/dom/element.go b/vendor/github.com/masterzen/simplexml/dom/element.go index 8e2795960..1ea16cc97 100644 --- a/vendor/github.com/masterzen/simplexml/dom/element.go +++ b/vendor/github.com/masterzen/simplexml/dom/element.go @@ -197,4 +197,4 @@ func (node *Element) String() string { var b bytes.Buffer node.Bytes(&b, false, "", 0) return string(b.Bytes()) -} +} \ No newline at end of file diff --git a/vendor/github.com/mattn/go-isatty/isatty_appengine.go b/vendor/github.com/mattn/go-isatty/isatty_appengine.go deleted file mode 100644 index 83c588773..000000000 --- a/vendor/github.com/mattn/go-isatty/isatty_appengine.go +++ /dev/null @@ -1,9 +0,0 @@ -// +build appengine - -package isatty - -// IsTerminal returns true if the file descriptor is terminal which -// is always false on on appengine classic which is a sandboxed PaaS. -func IsTerminal(fd uintptr) bool { - return false -} diff --git a/vendor/github.com/mitchellh/colorstring/.travis.yml b/vendor/github.com/mitchellh/colorstring/.travis.yml deleted file mode 100644 index 74e286ae1..000000000 --- a/vendor/github.com/mitchellh/colorstring/.travis.yml +++ /dev/null @@ -1,15 +0,0 @@ -language: go - -go: - - 1.0 - - 1.1 - - 1.2 - - 1.3 - - tip - -script: - - go test - -matrix: - allow_failures: - - go: tip diff --git a/vendor/github.com/mitchellh/go-ps/LICENSE.md b/vendor/github.com/mitchellh/go-ps/LICENSE.md new file mode 100644 index 000000000..229851590 --- /dev/null +++ b/vendor/github.com/mitchellh/go-ps/LICENSE.md @@ -0,0 +1,21 @@ +The MIT License (MIT) + +Copyright (c) 2014 Mitchell Hashimoto + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in +all copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN +THE SOFTWARE. diff --git a/vendor/github.com/mitchellh/go-ps/README.md b/vendor/github.com/mitchellh/go-ps/README.md new file mode 100644 index 000000000..881ad32a2 --- /dev/null +++ b/vendor/github.com/mitchellh/go-ps/README.md @@ -0,0 +1,34 @@ +# Process List Library for Go + +go-ps is a library for Go that implements OS-specific APIs to list and +manipulate processes in a platform-safe way. The library can find and +list processes on Linux, Mac OS X, and Windows. + +If you're new to Go, this library has a good amount of advanced Go educational +value as well. It uses some advanced features of Go: build tags, accessing +DLL methods for Windows, cgo for Darwin, etc. + +How it works: + + * **Darwin** uses the `sysctl` syscall to retrieve the process table. + * **Unix** uses the procfs at `/proc` to inspect the process tree. + * **Windows** uses the Windows API, and methods such as + `CreateToolhelp32Snapshot` to get a point-in-time snapshot of + the process table. + +## Installation + +Install using standard `go get`: + +``` +$ go get github.com/mitchellh/go-ps +... +``` + +## TODO + +Want to contribute? Here is a short TODO list of things that aren't +implemented for this library that would be nice: + + * FreeBSD support + * Plan9 support diff --git a/vendor/github.com/mitchellh/go-ps/Vagrantfile b/vendor/github.com/mitchellh/go-ps/Vagrantfile new file mode 100644 index 000000000..61662ab1e --- /dev/null +++ b/vendor/github.com/mitchellh/go-ps/Vagrantfile @@ -0,0 +1,43 @@ +# -*- mode: ruby -*- +# vi: set ft=ruby : + +# Vagrantfile API/syntax version. Don't touch unless you know what you're doing! +VAGRANTFILE_API_VERSION = "2" + +Vagrant.configure(VAGRANTFILE_API_VERSION) do |config| + config.vm.box = "chef/ubuntu-12.04" + + config.vm.provision "shell", inline: $script + + ["vmware_fusion", "vmware_workstation"].each do |p| + config.vm.provider "p" do |v| + v.vmx["memsize"] = "1024" + v.vmx["numvcpus"] = "2" + v.vmx["cpuid.coresPerSocket"] = "1" + end + end +end + +$script = <