Merge pull request #10235 from hashicorp/b-update-nomad

vendor: update to Nomad 0.5
This commit is contained in:
Mitchell Hashimoto 2016-11-18 21:33:52 -08:00 committed by GitHub
commit 178355fc43
399 changed files with 1397 additions and 49313 deletions

View File

@ -1,468 +0,0 @@
## 0.4.1
__BACKWARDS INCOMPATIBILITIES:__
* telemetry: Operators will have to explicitly opt-in for Nomad client to
publish allocation and node metrics
IMPROVEMENTS:
* core: Allow count 0 on system jobs [GH-1421]
* core: Summarize the current status of registered jobs. [GH-1383, GH-1517]
* core: Gracefully handle short lived outages by holding RPC calls [GH-1403]
* core: Introduce a lost state for allocations that were on Nodes that died
[GH-1516]
* api: client Logs endpoint for streaming task logs [GH-1444]
* api/cli: Support for tailing/streaming files [GH-1404, GH-1420]
* api/server: Support for querying job summaries [GH-1455]
* cli: `nomad logs` command for streaming task logs [GH-1444]
* cli: `nomad status` shows the create time of allocations [GH-1540]
* cli: `nomad plan` exit code indicates if changes will occur [GH-1502]
* cli: status commands support JSON output and go template formating [GH-1503]
* cli: Validate and plan command supports reading from stdin [GH-1460,
GH-1458]
* cli: Allow basic authentication through address and environment variable
[GH-1610]
* cli: `nomad node-status` shows volume name for non-physical volumes instead
of showing 0B used [GH-1538]
* cli: Support retrieving job files using go-getter in the `run`, `plan` and
`validate` command [GH-1511]
* client: Add killing event to task state [GH-1457]
* client: Fingerprint network speed on Windows [GH-1443]
* discovery: Support for initial check status [GH-1599]
* discovery: Support for query params in health check urls [GH-1562]
* driver/docker: Allow working directory to be configured [GH-1513]
* driver/docker: Remove docker volumes when removing container [GH-1519]
* driver/docker: Set windows containers network mode to nat by default
[GH-1521]
* driver/exec: Allow chroot environment to be configurable [GH-1518]
* driver/qemu: Allows users to pass extra args to the qemu driver [GH-1596]
* telemetry: Circonus integration for telemetry metrics [GH-1459]
* telemetry: Allow operators to opt-in for publishing metrics [GH-1501]
BUG FIXES:
* agent: Reload agent configuration on SIGHUP [GH-1566]
* core: Sanitize empty slices/maps in jobs to avoid incorrect create/destroy
updates [GH-1434]
* core: Fix race in which a Node registers and doesn't receive system jobs
[GH-1456]
* core: Fix issue in which Nodes with large amount of reserved ports would
casue dynamic port allocations to fail [GH-1526]
* core: Fix a condition in which old batch allocations could get updated even
after terminal. In a rare case this could cause a server panic [GH-1471]
* core: Do not update the Job attached to Allocations that have been marked
terminal [GH-1508]
* agent: Fix advertise address when using IPv6 [GH-1465]
* cli: Fix node-status when using IPv6 advertise address [GH-1465]
* client: Task start errors adhere to restart policy mode [GH-1405]
* client: Reregister with servers if node is unregistered [GH-1593]
* client: Killing an allocation doesn't cause allocation stats to block
[GH-1454]
* driver/docker: Disable swap on docker driver [GH-1480]
* driver/docker: Fix improper gating on priviledged mode [GH-1506]
* driver/docker: Default network type is "nat" on Windows [GH-1521]
* driver/docker: Cleanup created volume when destroying container [GH-1519]
* driver/rkt: Set host environment variables [GH-1581]
* driver/rkt: Validate the command and trust_prefix configs [GH-1493]
* plan: Plan on system jobs discounts nodes that do not meet required
constraints [GH-1568]
## 0.4.0
__BACKWARDS INCOMPATIBILITIES:__
* api: Tasks are no longer allowed to have slashes in their name [GH-1210]
* cli: Remove the eval-monitor command. Users should switch to `nomad
eval-status -monitor`.
* config: Consul configuration has been moved from client options map to
consul block under client configuration
* driver/docker: Enabled SSL by default for pulling images from docker
registries. [GH-1336]
IMPROVEMENTS:
* core: Scheduler reuses blocked evaluations to avoid unbounded creation of
evaluations under high contention [GH-1199]
* core: Scheduler stores placement failures in evaluations, no longer
generating failed allocations for debug information [GH-1188]
* api: Faster JSON response encoding [GH-1182]
* api: Gzip compress HTTP API requests [GH-1203]
* api: Plan api introduced for the Job endpoint [GH-1168]
* api: Job endpoint can enforce Job Modify Index to ensure job is being
modified from a known state [GH-1243]
* api/client: Add resource usage APIs for retrieving tasks/allocations/host
resource usage [GH-1189]
* cli: Faster when displaying large amounts ouptuts [GH-1362]
* cli: Deprecate `eval-monitor` and introduce `eval-status` [GH-1206]
* cli: Unify the `fs` family of commands to be a single command [GH-1150]
* cli: Introduce `nomad plan` to dry-run a job through the scheduler and
determine its effects [GH-1181]
* cli: node-status command displays host resource usage and allocation
resources [GH-1261]
* cli: Region flag and environment variable introduced to set region
forwarding. Automatic region forwarding for run and plan [GH-1237]
* client: If Consul is available, automatically bootstrap Nomad Client
using the `_nomad` service in Consul. Nomad Servers now register
themselves with Consul to make this possible. [GH-1201]
* drivers: Qemu and Java can be run without an artifact being download. Useful
if the artifact exists inside a chrooted directory [GH-1262]
* driver/docker: Added a client options to set SELinux labels for container
bind mounts. [GH-788]
* driver/docker: Enabled SSL by default for pulling images from docker
registries. [GH-1336]
* server: If Consul is available, automatically bootstrap Nomad Servers
using the `_nomad` service in Consul. [GH-1276]
BUG FIXES:
* core: Improve garbage collection of allocations and nodes [GH-1256]
* core: Fix a potential deadlock if establishing leadership fails and is
retried [GH-1231]
* core: Do not restart successful batch jobs when the node is removed/drained
[GH-1205]
* core: Fix an issue in which the scheduler could be invoked with insufficient
state [GH-1339]
* core: Updated User, Meta or Resources in a task cause create/destroy updates
[GH-1128, GH-1153]
* core: Fix blocked evaluations being run without properly accounting for
priority [GH-1183]
* api: Tasks are no longer allowed to have slashes in their name [GH-1210]
* client: Delete tmp files used to communicate with execcutor [GH-1241]
* client: Prevent the client from restoring with incorrect task state [GH-1294]
* discovery: Ensure service and check names are unique [GH-1143, GH-1144]
* driver/docker: Ensure docker client doesn't time out after a minute.
[GH-1184]
* driver/java: Fix issue in which Java on darwin attempted to chroot [GH-1262]
* driver/docker: Fix issue in which logs could be spliced [GH-1322]
## 0.3.2 (April 22, 2016)
IMPROVEMENTS:
* core: Garbage collection partitioned to avoid system delays [GH-1012]
* core: Allow count zero task groups to enable blue/green deploys [GH-931]
* core: Validate driver configurations when submitting jobs [GH-1062, GH-1089]
* core: Job Deregister forces an evaluation for the job even if it doesn't
exist [GH-981]
* core: Rename successfully finished allocations to "Complete" rather than
"Dead" for clarity [GH-975]
* cli: `alloc-status` explains restart decisions [GH-984]
* cli: `node-drain -self` drains the local node [GH-1068]
* cli: `node-status -self` queries the local node [GH-1004]
* cli: Destructive commands now require confirmation [GH-983]
* cli: `alloc-status` display is less verbose by default [GH-946]
* cli: `server-members` displays the current leader in each region [GH-935]
* cli: `run` has an `-output` flag to emit a JSON version of the job [GH-990]
* cli: New `inspect` command to display a submitted job's specification
[GH-952]
* cli: `node-status` display is less verbose by default and shows a node's
total resources [GH-946]
* client: `artifact` source can be interpreted [GH-1070]
* client: Add IP and Port environment variables [GH-1099]
* client: Nomad fingerprinter to detect client's version [GH-965]
* client: Tasks can interpret Meta set in the task group and job [GH-985]
* client: All tasks in a task group are killed when a task fails [GH-962]
* client: Pass environment variables from host to exec based tasks [GH-970]
* client: Allow task's to be run as particular user [GH-950, GH-978]
* client: `artifact` block now supports downloading paths relative to the
task's directory [GH-944]
* docker: Timeout communications with Docker Daemon to avoid deadlocks with
misbehaving Docker Daemon [GH-1117]
* discovery: Support script based health checks [GH-986]
* discovery: Allowing registration of services which don't expose ports
[GH-1092]
* driver/docker: Support for `tty` and `interactive` options [GH-1059]
* jobspec: Improved validation of services referencing port labels [GH-1097]
* periodic: Periodic jobs are always evaluated in UTC timezone [GH-1074]
BUG FIXES:
* core: Prevent garbage collection of running batch jobs [GH-989]
* core: Trigger System scheduler when Node drain is disabled [GH-1106]
* core: Fix issue where in-place updated allocation double counted resources
[GH-957]
* core: Fix drained, batched allocations from being migrated indefinitely
[GH-1086]
* client: Garbage collect Docker containers on exit [GH-1071]
* client: Fix common exec failures on CentOS and Amazon Linux [GH-1009]
* client: Fix S3 artifact downloading with IAM credentials [GH-1113]
* client: Fix handling of environment variables containing multiple equal
signs [GH-1115]
## 0.3.1 (March 16, 2016)
__BACKWARDS INCOMPATIBILITIES:__
* Service names that dont conform to RFC-1123 and RFC-2782 will fail
validation. To fix, change service name to conform to the RFCs before
running the job [GH-915]
* Jobs that downloaded artifacts will have to be updated to the new syntax and
be resubmitted. The new syntax consolidates artifacts to the `task` rather
than being duplicated inside each driver config [GH-921]
IMPROVEMENTS:
* cli: Validate job file schemas [GH-900]
* client: Add environment variables for task name, allocation ID/Name/Index
[GH-869, GH-896]
* client: Starting task is retried under the restart policy if the error is
recoverable [GH-859]
* client: Allow tasks to download artifacts, which can be archives, prior to
starting [GH-921]
* config: Validate Nomad configuration files [GH-910]
* config: Client config allows reserving resources [GH-910]
* driver/docker: Support for ECR [GH-858]
* driver/docker: Periodic Fingerprinting [GH-893]
* driver/docker: Preventing port reservation for log collection on Unix platforms [GH-897]
* driver/rkt: Pass DNS information to rkt driver [GH-892]
* jobspec: Require RFC-1123 and RFC-2782 valid service names [GH-915]
BUG FIXES:
* core: No longer cancel evaluations that are delayed in the plan queue
[GH-884]
* api: Guard client/fs/ APIs from being accessed on a non-client node [GH-890]
* client: Allow dashes in variable names during interprelation [GH-857]
* client: Updating kill timeout adheres to operator specified maximum value [GH-878]
* client: Fix a case in which clients would pull but not run allocations
[GH-906]
* consul: Remove concurrent map access [GH-874]
* driver/exec: Stopping tasks with more than one pid in a cgroup [GH-855]
* executor/linux: Add /run/resolvconf/ to chroot so DNS works [GH-905]
## 0.3.0 (February 25, 2016)
__BACKWARDS INCOMPATIBILITIES:__
* Stdout and Stderr log files of tasks have moved from task/local to
alloc/logs [GH-851]
* Any users of the runtime environment variable `$NOMAD_PORT_` will need to
update to the new `${NOMAD_ADDR_}` varriable [GH-704]
* Service names that include periods will fail validation. To fix, remove any
periods from the service name before running the job [GH-770]
* Task resources are now validated and enforce minimum resources. If a job
specifies resources below the minimum they will need to be updated [GH-739]
* Node ID is no longer specifiable. For users who have set a custom Node
ID, the node should be drained before Nomad is updated and the data_dir
should be deleted before starting for the first time [GH-675]
* Users of custom restart policies should update to the new syntax which adds
a `mode` field. The `mode` can be either `fail` or `delay`. The default for
`batch` and `service` jobs is `fail` and `delay` respectively [GH-594]
* All jobs that interpret variables in constraints or driver configurations
will need to be updated to the new syntax which wraps the interpreted
variable in curly braces. (`$node.class` becomes `${node.class}`) [GH-760]
IMPROVEMENTS:
* core: Populate job status [GH-663]
* core: Cgroup fingerprinter [GH-712]
* core: Node class constraint [GH-618]
* core: User specifiable kill timeout [GH-624]
* core: Job queueing via blocked evaluations [GH-726]
* core: Only reschedule failed batch allocations [GH-746]
* core: Add available nodes by DC to AllocMetrics [GH-619]
* core: Improve scheduler retry logic under contention [GH-787]
* core: Computed node class and stack optimization [GH-691, GH-708]
* core: Improved restart policy with more user configuration [GH-594]
* core: Periodic specification for jobs [GH-540, GH-657, GH-659, GH-668]
* core: Batch jobs are garbage collected from the Nomad Servers [GH-586]
* core: Free half the CPUs on leader node for use in plan queue and evaluation
broker [GH-812]
* core: Seed random number generator used to randomize node traversal order
during scheduling [GH-808]
* core: Performance improvements [GH-823, GH-825, GH-827, GH-830, GH-832,
GH-833, GH-834, GH-839]
* core/api: System garbage collection endpoint [GH-828]
* core/api: Allow users to set arbitrary headers via agent config [GH-699]
* core/cli: Prefix based lookups of allocs/nodes/evals/jobs [GH-575]
* core/cli: Print short identifiers and UX cleanup [GH-675, GH-693, GH-692]
* core/client: Client pulls minimum set of required allocations [GH-731]
* cli: Output of agent-info is sorted [GH-617]
* cli: Eval monitor detects zero wait condition [GH-776]
* cli: Ability to navigate allocation directories [GH-709, GH-798]
* client: Batch allocation updates to the server [GH-835]
* client: Log rotation for all drivers [GH-685, GH-763, GH-819]
* client: Only download artifacts from http, https, and S3 [GH-841]
* client: Create a tmp/ directory inside each task directory [GH-757]
* client: Store when an allocation was received by the client [GH-821]
* client: Heartbeating and saving state resilient under high load [GH-811]
* client: Handle updates to tasks Restart Policy and KillTimeout [GH-751]
* client: Killing a driver handle is retried with an exponential backoff
[GH-809]
* client: Send Node to server when periodic fingerprinters change Node
attributes/metadata [GH-749]
* client/api: File-system access to allocation directories [GH-669]
* drivers: Validate the "command" field contains a single value [GH-842]
* drivers: Interpret Nomad variables in environment variables/args [GH-653]
* driver/rkt: Add support for CPU/Memory isolation [GH-610]
* driver/rkt: Add support for mounting alloc/task directory [GH-645]
* driver/docker: Support for .dockercfg based auth for private registries
[GH-773]
BUG FIXES:
* core: Node drain could only be partially applied [GH-750]
* core: Fix panic when eval Ack occurs at delivery limit [GH-790]
* cli: Handle parsing of un-named ports [GH-604]
* cli: Enforce absolute paths for data directories [GH-622]
* client: Cleanup of the allocation directory [GH-755]
* client: Improved stability under high contention [GH-789]
* client: Handle non-200 codes when parsing AWS metadata [GH-614]
* client: Unmounted of shared alloc dir when client is rebooted [GH-755]
* client/consul: Service name changes handled properly [GH-766]
* driver/rkt: handle broader format of rkt version outputs [GH-745]
* driver/qemu: failed to load image and kvm accelerator fixes [GH-656]
## 0.2.3 (December 17, 2015)
BUG FIXES:
* core: Task States not being properly updated [GH-600]
* client: Fixes for user lookup to support CoreOS [GH-591]
* discovery: Using a random prefix for nomad managed services [GH-579]
* discovery: De-Registering Tasks while Nomad sleeps before failed tasks are
restarted.
* discovery: Fixes for service registration when multiple allocations are bin
packed on a node [GH-583]
* configuration: Sort configuration files [GH-588]
* cli: RetryInterval was not being applied properly [GH-601]
## 0.2.2 (December 11, 2015)
IMPROVEMENTS:
* core: Enable `raw_exec` driver in dev mode [GH-558]
* cli: Server join/retry-join command line and config options [GH-527]
* cli: Nomad reports which config files are loaded at start time, or if none
are loaded [GH-536], [GH-553]
BUG FIXES:
* core: Send syslog to `LOCAL0` by default as previously documented [GH-547]
* client: remove all calls to default logger [GH-570]
* consul: Nomad is less noisy when Consul is not running [GH-567]
* consul: Nomad only deregisters services that it created [GH-568]
* driver/exec: Shutdown a task now sends the interrupt signal first to the
process before forcefully killing it. [GH-543]
* driver/docker: Docker driver no longer leaks unix domain socket connections
[GH-556]
* fingerprint/network: Now correctly detects interfaces on Windows [GH-382]
## 0.2.1 (November 28, 2015)
IMPROVEMENTS:
* core: Can specify a whitelist for activating drivers [GH-467]
* core: Can specify a whitelist for activating fingerprinters [GH-488]
* core/api: Can list all known regions in the cluster [GH-495]
* client/spawn: spawn package tests made portable (work on Windows) [GH-442]
* client/executor: executor package tests made portable (work on Windows) [GH-497]
* client/driver: driver package tests made portable (work on windows) [GH-502]
* client/discovery: Added more consul client api configuration options [GH-503]
* driver/docker: Added TLS client options to the config file [GH-480]
* jobspec: More flexibility in naming Services [GH-509]
BUG FIXES:
* core: Shared reference to DynamicPorts caused port conflicts when scheduling
count > 1 [GH-494]
* client/restart policy: Not restarting Batch Jobs if the exit code is 0 [GH-491]
* client/service discovery: Make Service IDs unique [GH-479]
* client/service: Fixes update to check definitions and services which are already registered [GH-498]
* driver/docker: Expose the container port instead of the host port [GH-466]
* driver/docker: Support `port_map` for static ports [GH-476]
* driver/docker: Pass 0.2.0-style port environment variables to the docker container [GH-476]
* jobspec: distinct_hosts constraint can be specified as a boolean (previously panicked) [GH-501]
## 0.2.0 (November 18, 2015)
__BACKWARDS INCOMPATIBILITIES:__
* core: HTTP API `/v1/node/<id>/allocations` returns full Allocation and not
stub [GH-402]
* core: Removed weight and hard/soft fields in constraints [GH-351]
* drivers: Qemu and Java driver configurations have been updated to both use
`artifact_source` as the source for external images/jars to be ran
* jobspec: New reserved and dynamic port specification [GH-415]
* jobspec/drivers: Driver configuration supports arbitrary struct to be
passed in jobspec [GH-415]
FEATURES:
* core: Blocking queries supported in API [GH-366]
* core: System Scheduler that runs tasks on every node [GH-287]
* core: Regexp, version and lexical ordering constraints [GH-271]
* core: distinctHost constraint ensures Task Groups are running on distinct
clients [GH-321]
* core: Service block definition with Consul registration [GH-463, GH-460,
GH-458, GH-455, GH-446, GH-425]
* client: GCE Fingerprinting [GH-215]
* client: Restart policy for task groups enforced by the client [GH-369,
GH-393]
* driver/rawexec: Raw Fork/Exec Driver [GH-237]
* driver/rkt: Experimental Rkt Driver [GH-165, GH-247]
* drivers: Add support for downloading external artifacts to execute for
Exec, Raw exec drivers [GH-381]
IMPROVEMENTS:
* core: Configurable Node GC threshold [GH-362]
* core: Overlap plan verification and plan application for increased
throughput [GH-272]
* cli: Output of `alloc-status` also displays task state [GH-424]
* cli: Output of `server-members` is sorted [GH-323]
* cli: Show node attributes in `node-status` [GH-313]
* client/fingerprint: Network fingerprinter detects interface suitable for
use, rather than defaulting to eth0 [GH-334, GH-356]
* client: Client Restore State properly reattaches to tasks and recreates
them as needed [GH-364, GH-380, GH-388, GH-392, GH-394, GH-397, GH-408]
* client: Periodic Fingerprinting [GH-391]
* client: Precise snapshotting of TaskRunner and AllocRunner [GH-403, GH-411]
* client: Task State is tracked by client [GH-416]
* client: Test Skip Detection [GH-221]
* driver/docker: Can now specify auth for docker pull [GH-390]
* driver/docker: Can now specify DNS and DNSSearch options [GH-390]
* driver/docker: Can now specify the container's hostname [GH-426]
* driver/docker: Containers now have names based on the task name. [GH-389]
* driver/docker: Mount task local and alloc directory to docker containers [GH-290]
* driver/docker: Now accepts any value for `network_mode` to support userspace networking plugins in docker 1.9
* driver/java: Pass JVM options in java driver [GH-293, GH-297]
* drivers: Use BlkioWeight rather than BlkioThrottleReadIopsDevice [GH-222]
* jobspec and drivers: Driver configuration supports arbitrary struct to be passed in jobspec [GH-415]
BUG FIXES:
* core: Nomad Client/Server RPC codec encodes strings properly [GH-420]
* core: Reset Nack timer in response to scheduler operations [GH-325]
* core: Scheduler checks for updates to environment variables [GH-327]
* cli: Fix crash when -config was given a directory or empty path [GH-119]
* client/fingerprint: Use correct local interface on OS X [GH-361, GH-365]
* client: Nomad Client doesn't restart failed containers [GH-198]
* client: Reap spawn-daemon process, avoiding a zombie process [GH-240]
* client: Resource exhausted errors because of link-speed zero [GH-146,
GH-205]
* client: Restarting Nomad Client leads to orphaned containers [GH-159]
* driver/docker: Apply SELinux label for mounting directories in docker
[GH-377]
* driver/docker: Docker driver exposes ports when creating container [GH-212,
GH-412]
* driver/docker: Docker driver uses docker environment variables correctly
[GH-407]
* driver/qemu: Qemu fingerprint and tests work on both windows/linux [GH-352]
## 0.1.2 (October 6, 2015)
IMPROVEMENTS:
* client: Nomad client cleans allocations on exit when in dev mode [GH-214]
* drivers: Use go-getter for artifact retrieval, add artifact support to
Exec, Raw Exec drivers [GH-288]
## 0.1.1 (October 5, 2015)
IMPROVEMENTS:
* cli: Nomad Client configurable from command-line [GH-191]
* client/fingerprint: Native IP detection and user specifiable network
interface for fingerprinting [GH-189]
* driver/docker: Docker networking mode is configurable [GH-184]
* drivers: Set task environment variables [GH-206]
BUG FIXES:
* client/fingerprint: Network fingerprinting failed if default network
interface did not exist [GH-189]
* client: Fixed issue where network resources throughput would be set to 0
MBits if the link speed could not be determined [GH-205]
* client: Improved detection of Nomad binary [GH-181]
* driver/docker: Docker dynamic port mapping were not being set properly
[GH-199]
## 0.1.0 (September 28, 2015)
* Initial release

View File

@ -1,85 +0,0 @@
PACKAGES = $(shell go list ./... | grep -v '/vendor/')
VETARGS?=-asmdecl -atomic -bool -buildtags -copylocks -methods \
-nilfunc -printf -rangeloops -shift -structtags -unsafeptr
EXTERNAL_TOOLS=\
github.com/kardianos/govendor \
github.com/mitchellh/gox \
golang.org/x/tools/cmd/cover \
github.com/axw/gocov/gocov \
gopkg.in/matm/v1/gocov-html \
github.com/ugorji/go/codec/codecgen
GOFILES_NOVENDOR = $(shell find . -type f -name '*.go' -not -path "./vendor/*")
all: test
dev: format generate
@NOMAD_DEV=1 sh -c "'$(PWD)/scripts/build.sh'"
bin: generate
@sh -c "'$(PWD)/scripts/build.sh'"
release:
@$(MAKE) bin
cov:
gocov test ./... | gocov-html > /tmp/coverage.html
open /tmp/coverage.html
test: generate
@echo "--> Running go fmt" ;
@if [ -n "`go fmt ${PACKAGES}`" ]; then \
echo "[ERR] go fmt updated formatting. Please commit formatted code first."; \
exit 1; \
fi
@sh -c "'$(PWD)/scripts/test.sh'"
@$(MAKE) vet
cover:
go list ./... | xargs -n1 go test --cover
format:
@echo "--> Running go fmt"
@go fmt $(PACKAGES)
generate:
@echo "--> Running go generate"
@go generate $(PACKAGES)
@sed -e 's|github.com/hashicorp/nomad/vendor/github.com/ugorji/go/codec|github.com/ugorji/go/codec|' nomad/structs/structs.generated.go >> structs.gen.tmp
@mv structs.gen.tmp nomad/structs/structs.generated.go
vet:
@go tool vet 2>/dev/null ; if [ $$? -eq 3 ]; then \
go get golang.org/x/tools/cmd/vet; \
fi
@echo "--> Running go tool vet $(VETARGS) ${GOFILES_NOVENDOR}"
@go tool vet $(VETARGS) ${GOFILES_NOVENDOR} ; if [ $$? -eq 1 ]; then \
echo ""; \
echo "[LINT] Vet found suspicious constructs. Please check the reported constructs"; \
echo "and fix them if necessary before submitting the code for review."; \
fi
@git grep -n `echo "log"".Print"` | grep -v 'vendor/' ; if [ $$? -eq 0 ]; then \
echo "[LINT] Found "log"".Printf" calls. These should use Nomad's logger instead."; \
fi
web:
./scripts/website_run.sh
web-push:
./scripts/website_push.sh
# bootstrap the build by downloading additional tools
bootstrap:
@for tool in $(EXTERNAL_TOOLS) ; do \
echo "Installing $$tool" ; \
go get $$tool; \
done
install: bin/nomad
install -o root -g wheel -m 0755 ./bin/nomad /usr/local/bin/nomad
travis:
@sh -c "'$(PWD)/scripts/travis.sh'"
.PHONY: all bin cov integ test vet web web-push test-nodep

View File

@ -1,18 +0,0 @@
If you have a question, prepend your issue with `[question]` or preferably use the [nomad mailing list](https://www.nomadproject.io/community.html).
If filing a bug please include the following:
### Nomad version
Output from `nomad version`
### Operating system and Environment details
### Issue
### Reproduction steps
### Nomad Server logs (if appropriate)
### Nomad Client logs (if appropriate)
### Job file (if appropriate)

View File

@ -1,117 +0,0 @@
Nomad [![Build Status](https://travis-ci.org/hashicorp/nomad.svg)](https://travis-ci.org/hashicorp/nomad)
=========
- Website: https://www.nomadproject.io
- IRC: `#nomad-tool` on Freenode
- Mailing list: [Google Groups](https://groups.google.com/group/nomad-tool)
![Nomad](https://raw.githubusercontent.com/hashicorp/nomad/master/website/source/assets/images/logo-header%402x.png?token=AAkIoLO_y1g3wgHMr3QO-559BN22rN0kks5V_2HpwA%3D%3D)
Nomad is a cluster manager, designed for both long lived services and short
lived batch processing workloads. Developers use a declarative job specification
to submit work, and Nomad ensures constraints are satisfied and resource utilization
is optimized by efficient task packing. Nomad supports all major operating systems
and virtualized, containerized, or standalone applications.
The key features of Nomad are:
* **Docker Support**: Jobs can specify tasks which are Docker containers.
Nomad will automatically run the containers on clients which have Docker
installed, scale up and down based on the number of instances request,
and automatically recover from failures.
* **Multi-Datacenter and Multi-Region Aware**: Nomad is designed to be
a global-scale scheduler. Multiple datacenters can be managed as part
of a larger region, and jobs can be scheduled across datacenters if
requested. Multiple regions join together and federate jobs making it
easy to run jobs anywhere.
* **Operationally Simple**: Nomad runs as a single binary that can be
either a client or server, and is completely self contained. Nomad does
not require any external services for storage or coordination. This means
Nomad combines the features of a resource manager and scheduler in a single
system.
* **Distributed and Highly-Available**: Nomad servers cluster together and
perform leader election and state replication to provide high availability
in the face of failure. The Nomad scheduling engine is optimized for
optimistic concurrency allowing all servers to make scheduling decisions to
maximize throughput.
* **HashiCorp Ecosystem**: Nomad integrates with the entire HashiCorp
ecosystem of tools. Along with all HashiCorp tools, Nomad is designed
in the unix philosophy of doing something specific and doing it well.
Nomad integrates with tools like Packer, Consul, and Terraform to support
building artifacts, service discovery, monitoring and capacity management.
For more information, see the [introduction section](https://www.nomadproject.io/intro)
of the Nomad website.
Getting Started & Documentation
-------------------------------
All documentation is available on the [Nomad website](https://www.nomadproject.io).
Developing Nomad
--------------------
If you wish to work on Nomad itself or any of its built-in systems,
you will first need [Go](https://www.golang.org) installed on your
machine (version 1.5+ is *required*).
**Developing with Vagrant**
There is an included Vagrantfile that can help bootstrap the process. The
created virtual machine is based off of Ubuntu 14, and installs several of the
base libraries that can be used by Nomad.
To use this virtual machine, checkout Nomad and run `vagrant up` from the root
of the repository:
```sh
$ git clone https://github.com/hashicorp/nomad.git
$ cd nomad
$ vagrant up
```
The virtual machine will launch, and a provisioning script will install the
needed dependencies.
**Developing locally**
For local dev first make sure Go is properly installed, including setting up a
[GOPATH](https://golang.org/doc/code.html#GOPATH). After setting up Go, clone this
repository into `$GOPATH/src/github.com/hashicorp/nomad`. Then you can
download the required build tools such as vet, cover, godep etc by bootstrapping
your environment.
```sh
$ make bootstrap
...
```
Afterwards type `make test`. This will run the tests. If this exits with exit status 0,
then everything is working!
```sh
$ make test
...
```
To compile a development version of Nomad, run `make dev`. This will put the
Nomad binary in the `bin` and `$GOPATH/bin` folders:
```sh
$ make dev
...
$ bin/nomad
...
```
To cross-compile Nomad, run `make bin`. This will compile Nomad for multiple
platforms and place the resulting binaries into the `./pkg` directory:
```sh
$ make bin
...
$ ls ./pkg
...
```

View File

@ -1,137 +0,0 @@
# -*- mode: ruby -*-
# vi: set ft=ruby :
# Vagrantfile API/syntax version. Don't touch unless you know what you're doing!
VAGRANTFILE_API_VERSION = "2"
DEFAULT_CPU_COUNT = 2
$script = <<SCRIPT
GO_VERSION="1.7"
CONSUL_VERSION="0.6.4"
# Install Prereq Packages
sudo apt-get update
sudo apt-get install -y build-essential curl git-core mercurial bzr libpcre3-dev pkg-config zip default-jre qemu libc6-dev-i386 silversearcher-ag jq htop vim unzip
# Setup go, for development of Nomad
SRCROOT="/opt/go"
SRCPATH="/opt/gopath"
# Get the ARCH
ARCH=`uname -m | sed 's|i686|386|' | sed 's|x86_64|amd64|'`
# Install Go
cd /tmp
wget -q https://storage.googleapis.com/golang/go${GO_VERSION}.linux-${ARCH}.tar.gz
tar -xf go${GO_VERSION}.linux-${ARCH}.tar.gz
sudo mv go $SRCROOT
sudo chmod 775 $SRCROOT
sudo chown vagrant:vagrant $SRCROOT
# Setup the GOPATH; even though the shared folder spec gives the working
# directory the right user/group, we need to set it properly on the
# parent path to allow subsequent "go get" commands to work.
sudo mkdir -p $SRCPATH
sudo chown -R vagrant:vagrant $SRCPATH 2>/dev/null || true
# ^^ silencing errors here because we expect this to fail for the shared folder
cat <<EOF >/tmp/gopath.sh
export GOPATH="$SRCPATH"
export GOROOT="$SRCROOT"
export PATH="$SRCROOT/bin:$SRCPATH/bin:\$PATH"
EOF
sudo mv /tmp/gopath.sh /etc/profile.d/gopath.sh
sudo chmod 0755 /etc/profile.d/gopath.sh
source /etc/profile.d/gopath.sh
echo Fetching Consul...
cd /tmp/
wget https://releases.hashicorp.com/consul/${CONSUL_VERSION}/consul_${CONSUL_VERSION}_linux_amd64.zip -O consul.zip
echo Installing Consul...
unzip consul.zip
sudo chmod +x consul
sudo mv consul /usr/bin/consul
# Install Docker
echo deb https://apt.dockerproject.org/repo ubuntu-`lsb_release -c | awk '{print $2}'` main | sudo tee /etc/apt/sources.list.d/docker.list
sudo apt-key adv --keyserver hkp://p80.pool.sks-keyservers.net:80 --recv-keys 58118E89F3A912897C070ADBF76221572C52609D
sudo apt-get update
sudo apt-get install -y docker-engine
# Restart docker to make sure we get the latest version of the daemon if there is an upgrade
sudo service docker restart
# Make sure we can actually use docker as the vagrant user
sudo usermod -aG docker vagrant
# Setup Nomad for development
cd /opt/gopath/src/github.com/hashicorp/nomad && make bootstrap
# Install rkt
bash scripts/install_rkt.sh
# CD into the nomad working directory when we login to the VM
grep "cd /opt/gopath/src/github.com/hashicorp/nomad" ~/.profile || echo "cd /opt/gopath/src/github.com/hashicorp/nomad" >> ~/.profile
SCRIPT
def configureVM(vmCfg, vmParams={
numCPUs: DEFAULT_CPU_COUNT,
}
)
vmCfg.vm.box = "cbednarski/ubuntu-1404"
vmCfg.vm.provision "shell", inline: $script, privileged: false
vmCfg.vm.synced_folder '.', '/opt/gopath/src/github.com/hashicorp/nomad'
# We're going to compile go and run a concurrent system, so give ourselves
# some extra resources. Nomad will have trouble working correctly with <2
# CPUs so we should use at least that many.
cpus = vmParams.fetch(:numCPUs, DEFAULT_CPU_COUNT)
memory = 2048
vmCfg.vm.provider "parallels" do |p, o|
o.vm.box = "parallels/ubuntu-14.04"
p.memory = memory
p.cpus = cpus
end
vmCfg.vm.provider "virtualbox" do |v|
v.memory = memory
v.cpus = cpus
end
["vmware_fusion", "vmware_workstation"].each do |p|
vmCfg.vm.provider p do |v|
v.gui = false
v.memory = memory
v.cpus = cpus
end
end
return vmCfg
end
Vagrant.configure(VAGRANTFILE_API_VERSION) do |config|
1.upto(3) do |n|
vmName = "nomad-server%02d" % [n]
isFirstBox = (n == 1)
numCPUs = DEFAULT_CPU_COUNT
if isFirstBox and Object::RUBY_PLATFORM =~ /darwin/i
# Override the max CPUs for the first VM
numCPUs = [numCPUs, (`/usr/sbin/sysctl -n hw.ncpu`.to_i - 1)].max
end
config.vm.define vmName, autostart: isFirstBox, primary: isFirstBox do |vmCfg|
vmCfg.vm.hostname = vmName
vmCfg = configureVM(vmCfg, {:numCPUs => numCPUs})
end
end
1.upto(3) do |n|
vmName = "nomad-client%02d" % [n]
config.vm.define vmName, autostart: false, primary: false do |vmCfg|
vmCfg.vm.hostname = vmName
vmCfg = configureVM(vmCfg)
end
end
end

View File

@ -16,6 +16,19 @@ type Agent struct {
region string
}
// KeyringResponse is a unified key response and can be used for install,
// remove, use, as well as listing key queries.
type KeyringResponse struct {
Messages map[string]string
Keys map[string]int
NumNodes int
}
// KeyringRequest is request objects for serf key operations.
type KeyringRequest struct {
Key string
}
// Agent returns a new agent which can be used to query
// the agent-specific endpoints.
func (c *Client) Agent() *Agent {
@ -118,8 +131,8 @@ func (a *Agent) Join(addrs ...string) (int, error) {
}
// Members is used to query all of the known server members
func (a *Agent) Members() ([]*AgentMember, error) {
var resp []*AgentMember
func (a *Agent) Members() (*ServerMembers, error) {
var resp *ServerMembers
// Query the known members
_, err := a.client.query("/v1/agent/members", &resp, nil)
@ -157,6 +170,46 @@ func (a *Agent) SetServers(addrs []string) error {
return err
}
// ListKeys returns the list of installed keys
func (a *Agent) ListKeys() (*KeyringResponse, error) {
var resp KeyringResponse
_, err := a.client.query("/v1/agent/keyring/list", &resp, nil)
if err != nil {
return nil, err
}
return &resp, nil
}
// InstallKey installs a key in the keyrings of all the serf members
func (a *Agent) InstallKey(key string) (*KeyringResponse, error) {
args := KeyringRequest{
Key: key,
}
var resp KeyringResponse
_, err := a.client.write("/v1/agent/keyring/install", &args, &resp, nil)
return &resp, err
}
// UseKey uses a key from the keyring of serf members
func (a *Agent) UseKey(key string) (*KeyringResponse, error) {
args := KeyringRequest{
Key: key,
}
var resp KeyringResponse
_, err := a.client.write("/v1/agent/keyring/use", &args, &resp, nil)
return &resp, err
}
// RemoveKey removes a particular key from keyrings of serf members
func (a *Agent) RemoveKey(key string) (*KeyringResponse, error) {
args := KeyringRequest{
Key: key,
}
var resp KeyringResponse
_, err := a.client.write("/v1/agent/keyring/remove", &args, &resp, nil)
return &resp, err
}
// joinResponse is used to decode the response we get while
// sending a member join request.
type joinResponse struct {
@ -164,6 +217,13 @@ type joinResponse struct {
Error string `json:"error"`
}
type ServerMembers struct {
ServerName string
Region string
DC string
Members []*AgentMember
}
// AgentMember represents a cluster member known to the agent
type AgentMember struct {
Name string

View File

@ -4,8 +4,12 @@ import (
"fmt"
"sort"
"time"
)
"github.com/hashicorp/go-cleanhttp"
var (
// NodeDownErr marks an operation as not able to complete since the node is
// down.
NodeDownErr = fmt.Errorf("node down")
)
// Allocations is used to query the alloc-related endpoints.
@ -48,13 +52,13 @@ func (a *Allocations) Stats(alloc *Allocation, q *QueryOptions) (*AllocResourceU
if err != nil {
return nil, err
}
if node.Status == "down" {
return nil, NodeDownErr
}
if node.HTTPAddr == "" {
return nil, fmt.Errorf("http addr of the node where alloc %q is running is not advertised", alloc.ID)
}
client, err := NewClient(&Config{
Address: fmt.Sprintf("http://%s", node.HTTPAddr),
HttpClient: cleanhttp.DefaultClient(),
})
client, err := NewClient(a.client.config.CopyConfig(node.HTTPAddr, node.TLSEnabled))
if err != nil {
return nil, err
}
@ -81,6 +85,7 @@ type Allocation struct {
ClientStatus string
ClientDescription string
TaskStates map[string]*TaskState
PreviousAllocation string
CreateIndex uint64
ModifyIndex uint64
CreateTime int64

View File

@ -3,6 +3,7 @@ package api
import (
"bytes"
"compress/gzip"
"crypto/tls"
"encoding/json"
"fmt"
"io"
@ -14,6 +15,7 @@ import (
"time"
"github.com/hashicorp/go-cleanhttp"
rootcerts "github.com/hashicorp/go-rootcerts"
)
// QueryOptions are used to parameterize a query
@ -102,6 +104,53 @@ type Config struct {
// WaitTime limits how long a Watch will block. If not provided,
// the agent default values will be used.
WaitTime time.Duration
// TLSConfig provides the various TLS related configurations for the http
// client
TLSConfig *TLSConfig
}
// CopyConfig copies the configuration with a new address
func (c *Config) CopyConfig(address string, tlsEnabled bool) *Config {
scheme := "http"
if tlsEnabled {
scheme = "https"
}
config := &Config{
Address: fmt.Sprintf("%s://%s", scheme, address),
Region: c.Region,
HttpClient: c.HttpClient,
HttpAuth: c.HttpAuth,
WaitTime: c.WaitTime,
TLSConfig: c.TLSConfig,
}
return config
}
// TLSConfig contains the parameters needed to configure TLS on the HTTP client
// used to communicate with Nomad.
type TLSConfig struct {
// CACert is the path to a PEM-encoded CA cert file to use to verify the
// Nomad server SSL certificate.
CACert string
// CAPath is the path to a directory of PEM-encoded CA cert files to verify
// the Nomad server SSL certificate.
CAPath string
// ClientCert is the path to the certificate for Nomad communication
ClientCert string
// ClientKey is the path to the private key for Nomad communication
ClientKey string
// TLSServerName, if set, is used to set the SNI host when connecting via
// TLS.
TLSServerName string
// Insecure enables or disables SSL verification
Insecure bool
}
// DefaultConfig returns a default configuration for the client
@ -109,7 +158,14 @@ func DefaultConfig() *Config {
config := &Config{
Address: "http://127.0.0.1:4646",
HttpClient: cleanhttp.DefaultClient(),
TLSConfig: &TLSConfig{},
}
transport := config.HttpClient.Transport.(*http.Transport)
transport.TLSHandshakeTimeout = 10 * time.Second
transport.TLSClientConfig = &tls.Config{
MinVersion: tls.VersionTLS12,
}
if addr := os.Getenv("NOMAD_ADDR"); addr != "" {
config.Address = addr
}
@ -128,9 +184,71 @@ func DefaultConfig() *Config {
Password: password,
}
}
// Read TLS specific env vars
if v := os.Getenv("NOMAD_CACERT"); v != "" {
config.TLSConfig.CACert = v
}
if v := os.Getenv("NOMAD_CAPATH"); v != "" {
config.TLSConfig.CAPath = v
}
if v := os.Getenv("NOMAD_CLIENT_CERT"); v != "" {
config.TLSConfig.ClientCert = v
}
if v := os.Getenv("NOMAD_CLIENT_KEY"); v != "" {
config.TLSConfig.ClientKey = v
}
if v := os.Getenv("NOMAD_SKIP_VERIFY"); v != "" {
if insecure, err := strconv.ParseBool(v); err == nil {
config.TLSConfig.Insecure = insecure
}
}
return config
}
// ConfigureTLS applies a set of TLS configurations to the the HTTP client.
func (c *Config) ConfigureTLS() error {
if c.HttpClient == nil {
return fmt.Errorf("config HTTP Client must be set")
}
var clientCert tls.Certificate
foundClientCert := false
if c.TLSConfig.ClientCert != "" || c.TLSConfig.ClientKey != "" {
if c.TLSConfig.ClientCert != "" && c.TLSConfig.ClientKey != "" {
var err error
clientCert, err = tls.LoadX509KeyPair(c.TLSConfig.ClientCert, c.TLSConfig.ClientKey)
if err != nil {
return err
}
foundClientCert = true
} else if c.TLSConfig.ClientCert != "" || c.TLSConfig.ClientKey != "" {
return fmt.Errorf("Both client cert and client key must be provided")
}
}
clientTLSConfig := c.HttpClient.Transport.(*http.Transport).TLSClientConfig
rootConfig := &rootcerts.Config{
CAFile: c.TLSConfig.CACert,
CAPath: c.TLSConfig.CAPath,
}
if err := rootcerts.ConfigureTLS(clientTLSConfig, rootConfig); err != nil {
return err
}
clientTLSConfig.InsecureSkipVerify = c.TLSConfig.Insecure
if foundClientCert {
clientTLSConfig.Certificates = []tls.Certificate{clientCert}
}
if c.TLSConfig.TLSServerName != "" {
clientTLSConfig.ServerName = c.TLSConfig.TLSServerName
}
return nil
}
// Client provides a client to the Nomad API
type Client struct {
config Config
@ -151,6 +269,11 @@ func NewClient(config *Config) (*Client, error) {
config.HttpClient = defConfig.HttpClient
}
// Configure the TLS cofigurations
if err := config.ConfigureTLS(); err != nil {
return nil, err
}
client := &Client{
config: *config,
}

View File

@ -52,17 +52,13 @@ func (c *Client) AllocFS() *AllocFS {
// getNodeClient returns a Client that will dial the node. If the QueryOptions
// is set, the function will ensure that it is initalized and that the Params
// field is valid.
func (a *AllocFS) getNodeClient(nodeHTTPAddr, allocID string, q **QueryOptions) (*Client, error) {
if nodeHTTPAddr == "" {
func (a *AllocFS) getNodeClient(node *Node, allocID string, q **QueryOptions) (*Client, error) {
if node.HTTPAddr == "" {
return nil, fmt.Errorf("http addr of the node where alloc %q is running is not advertised", allocID)
}
// Get an API client for the node
nodeClientConfig := &Config{
Address: fmt.Sprintf("http://%s", nodeHTTPAddr),
Region: a.client.config.Region,
}
nodeClient, err := NewClient(nodeClientConfig)
nodeClient, err := NewClient(a.client.config.CopyConfig(node.HTTPAddr, node.TLSEnabled))
if err != nil {
return nil, err
}
@ -87,7 +83,7 @@ func (a *AllocFS) List(alloc *Allocation, path string, q *QueryOptions) ([]*Allo
if err != nil {
return nil, nil, err
}
nodeClient, err := a.getNodeClient(node.HTTPAddr, alloc.ID, &q)
nodeClient, err := a.getNodeClient(node, alloc.ID, &q)
if err != nil {
return nil, nil, err
}
@ -108,7 +104,7 @@ func (a *AllocFS) Stat(alloc *Allocation, path string, q *QueryOptions) (*AllocF
if err != nil {
return nil, nil, err
}
nodeClient, err := a.getNodeClient(node.HTTPAddr, alloc.ID, &q)
nodeClient, err := a.getNodeClient(node, alloc.ID, &q)
if err != nil {
return nil, nil, err
}
@ -130,7 +126,7 @@ func (a *AllocFS) ReadAt(alloc *Allocation, path string, offset int64, limit int
return nil, err
}
nodeClient, err := a.getNodeClient(node.HTTPAddr, alloc.ID, &q)
nodeClient, err := a.getNodeClient(node, alloc.ID, &q)
if err != nil {
return nil, err
}
@ -153,7 +149,7 @@ func (a *AllocFS) Cat(alloc *Allocation, path string, q *QueryOptions) (io.ReadC
return nil, err
}
nodeClient, err := a.getNodeClient(node.HTTPAddr, alloc.ID, &q)
nodeClient, err := a.getNodeClient(node, alloc.ID, &q)
if err != nil {
return nil, err
}
@ -182,7 +178,7 @@ func (a *AllocFS) Stream(alloc *Allocation, path, origin string, offset int64,
return nil, err
}
nodeClient, err := a.getNodeClient(node.HTTPAddr, alloc.ID, &q)
nodeClient, err := a.getNodeClient(node, alloc.ID, &q)
if err != nil {
return nil, err
}
@ -251,7 +247,7 @@ func (a *AllocFS) Logs(alloc *Allocation, follow bool, task, logType, origin str
return nil, err
}
nodeClient, err := a.getNodeClient(node.HTTPAddr, alloc.ID, &q)
nodeClient, err := a.getNodeClient(node, alloc.ID, &q)
if err != nil {
return nil, err
}

View File

@ -191,6 +191,7 @@ type PeriodicConfig struct {
type Job struct {
Region string
ID string
ParentID string
Name string
Type string
Priority int
@ -201,6 +202,7 @@ type Job struct {
Update *UpdateStrategy
Periodic *PeriodicConfig
Meta map[string]string
VaultToken string
Status string
StatusDescription string
CreateIndex uint64

View File

@ -4,8 +4,6 @@ import (
"fmt"
"sort"
"strconv"
"github.com/hashicorp/go-cleanhttp"
)
// Nodes is used to query node-related API endpoints
@ -82,10 +80,7 @@ func (n *Nodes) Stats(nodeID string, q *QueryOptions) (*HostStats, error) {
if node.HTTPAddr == "" {
return nil, fmt.Errorf("http addr of the node %q is running is not advertised", nodeID)
}
client, err := NewClient(&Config{
Address: fmt.Sprintf("http://%s", node.HTTPAddr),
HttpClient: cleanhttp.DefaultClient(),
})
client, err := NewClient(n.client.config.CopyConfig(node.HTTPAddr, node.TLSEnabled))
if err != nil {
return nil, err
}
@ -102,6 +97,7 @@ type Node struct {
Datacenter string
Name string
HTTPAddr string
TLSEnabled bool
Attributes map[string]string
Resources *Resources
Reserved *Resources

View File

@ -1,5 +1,7 @@
package api
import "io"
// Raw can be used to do raw queries against custom endpoints
type Raw struct {
c *Client
@ -17,6 +19,12 @@ func (raw *Raw) Query(endpoint string, out interface{}, q *QueryOptions) (*Query
return raw.c.query(endpoint, out, q)
}
// Response is used to make a GET request against an endpoint and returns the
// response body
func (raw *Raw) Response(endpoint string, q *QueryOptions) (io.ReadCloser, error) {
return raw.c.rawQuery(endpoint, q)
}
// Write is used to do a PUT request against an endpoint
// and serialize/deserialized using the standard Nomad conventions.
func (raw *Raw) Write(endpoint string, in, out interface{}, q *WriteOptions) (*WriteMeta, error) {

View File

@ -60,16 +60,17 @@ type RestartPolicy struct {
// The ServiceCheck data model represents the consul health check that
// Nomad registers for a Task
type ServiceCheck struct {
Id string
Name string
Type string
Command string
Args []string
Path string
Protocol string `mapstructure:"port"`
PortLabel string `mapstructure:"port"`
Interval time.Duration
Timeout time.Duration
Id string
Name string
Type string
Command string
Args []string
Path string
Protocol string `mapstructure:"port"`
PortLabel string `mapstructure:"port"`
Interval time.Duration
Timeout time.Duration
InitialStatus string `mapstructure:"initial_status"`
}
// The Service model represents a Consul service definition
@ -81,6 +82,13 @@ type Service struct {
Checks []ServiceCheck
}
// EphemeralDisk is an ephemeral disk object
type EphemeralDisk struct {
Sticky bool
Migrate bool
SizeMB int `mapstructure:"size"`
}
// TaskGroup is the unit of scheduling.
type TaskGroup struct {
Name string
@ -88,6 +96,7 @@ type TaskGroup struct {
Constraints []*Constraint
Tasks []*Task
RestartPolicy *RestartPolicy
EphemeralDisk *EphemeralDisk
Meta map[string]string
}
@ -120,6 +129,12 @@ func (g *TaskGroup) AddTask(t *Task) *TaskGroup {
return g
}
// RequireDisk adds a ephemeral disk to the task group
func (g *TaskGroup) RequireDisk(disk *EphemeralDisk) *TaskGroup {
g.EphemeralDisk = disk
return g
}
// LogConfig provides configuration for log rotation
type LogConfig struct {
MaxFiles int
@ -140,6 +155,8 @@ type Task struct {
KillTimeout time.Duration
LogConfig *LogConfig
Artifacts []*TaskArtifact
Vault *Vault
Templates []*Template
}
// TaskArtifact is used to download artifacts before running a task.
@ -149,6 +166,22 @@ type TaskArtifact struct {
RelativeDest string
}
type Template struct {
SourcePath string
DestPath string
EmbeddedTmpl string
ChangeMode string
ChangeSignal string
Splay time.Duration
}
type Vault struct {
Policies []string
Env bool
ChangeMode string
ChangeSignal string
}
// NewTask creates and initializes a new Task.
func NewTask(name, driver string) *Task {
return &Task{
@ -159,7 +192,7 @@ func NewTask(name, driver string) *Task {
// Configure is used to configure a single k/v pair on
// the task.
func (t *Task) SetConfig(key, val string) *Task {
func (t *Task) SetConfig(key string, val interface{}) *Task {
if t.Config == nil {
t.Config = make(map[string]interface{})
}
@ -198,10 +231,12 @@ func (t *Task) SetLogConfig(l *LogConfig) *Task {
// transitions.
type TaskState struct {
State string
Failed bool
Events []*TaskEvent
}
const (
TaskSetupFailure = "Setup Failure"
TaskDriverFailure = "Driver Failure"
TaskReceived = "Received"
TaskFailedValidation = "Failed Validation"
@ -213,21 +248,34 @@ const (
TaskNotRestarting = "Not Restarting"
TaskDownloadingArtifacts = "Downloading Artifacts"
TaskArtifactDownloadFailed = "Failed Artifact Download"
TaskVaultRenewalFailed = "Vault token renewal failed"
TaskSiblingFailed = "Sibling task failed"
TaskSignaling = "Signaling"
TaskRestartSignal = "Restart Signaled"
)
// TaskEvent is an event that effects the state of a task and contains meta-data
// appropriate to the events type.
type TaskEvent struct {
Type string
Time int64
RestartReason string
DriverError string
ExitCode int
Signal int
Message string
KillTimeout time.Duration
KillError string
StartDelay int64
DownloadError string
ValidationError string
Type string
Time int64
FailsTask bool
RestartReason string
SetupError string
DriverError string
ExitCode int
Signal int
Message string
KillReason string
KillTimeout time.Duration
KillError string
StartDelay int64
DownloadError string
ValidationError string
DiskLimit int64
DiskSize int64
FailedSibling string
VaultError string
TaskSignalReason string
TaskSignal string
}

View File

@ -1,679 +0,0 @@
package agent
import (
"fmt"
"io"
"log"
"net"
"path/filepath"
"runtime"
"strconv"
"strings"
"sync"
"sync/atomic"
"time"
"github.com/hashicorp/nomad/client"
clientconfig "github.com/hashicorp/nomad/client/config"
"github.com/hashicorp/nomad/command/agent/consul"
"github.com/hashicorp/nomad/nomad"
"github.com/hashicorp/nomad/nomad/structs"
)
const (
clientHttpCheckInterval = 10 * time.Second
clientHttpCheckTimeout = 3 * time.Second
serverHttpCheckInterval = 10 * time.Second
serverHttpCheckTimeout = 3 * time.Second
serverRpcCheckInterval = 10 * time.Second
serverRpcCheckTimeout = 3 * time.Second
serverSerfCheckInterval = 10 * time.Second
serverSerfCheckTimeout = 3 * time.Second
)
// Agent is a long running daemon that is used to run both
// clients and servers. Servers are responsible for managing
// state and making scheduling decisions. Clients can be
// scheduled to, and are responsible for interfacing with
// servers to run allocations.
type Agent struct {
config *Config
logger *log.Logger
logOutput io.Writer
// consulSyncer registers the Nomad agent with the Consul Agent
consulSyncer *consul.Syncer
client *client.Client
clientHTTPAddr string
server *nomad.Server
serverHTTPAddr string
serverRPCAddr string
serverSerfAddr string
shutdown bool
shutdownCh chan struct{}
shutdownLock sync.Mutex
}
// NewAgent is used to create a new agent with the given configuration
func NewAgent(config *Config, logOutput io.Writer) (*Agent, error) {
a := &Agent{
config: config,
logger: log.New(logOutput, "", log.LstdFlags|log.Lmicroseconds),
logOutput: logOutput,
shutdownCh: make(chan struct{}),
}
if err := a.setupConsulSyncer(); err != nil {
return nil, fmt.Errorf("Failed to initialize Consul syncer task: %v", err)
}
if err := a.setupServer(); err != nil {
return nil, err
}
if err := a.setupClient(); err != nil {
return nil, err
}
if a.client == nil && a.server == nil {
return nil, fmt.Errorf("must have at least client or server mode enabled")
}
// The Nomad Agent runs the consul.Syncer regardless of whether or not the
// Agent is running in Client or Server mode (or both), and regardless of
// the consul.auto_advertise parameter. The Client and Server both reuse the
// same consul.Syncer instance. This Syncer task periodically executes
// callbacks that update Consul. The reason the Syncer is always running is
// because one of the callbacks is attempts to self-bootstrap Nomad using
// information found in Consul.
go a.consulSyncer.Run()
return a, nil
}
// serverConfig is used to generate a new server configuration struct
// for initializing a nomad server.
func (a *Agent) serverConfig() (*nomad.Config, error) {
conf := a.config.NomadConfig
if conf == nil {
conf = nomad.DefaultConfig()
}
conf.LogOutput = a.logOutput
conf.DevMode = a.config.DevMode
conf.Build = fmt.Sprintf("%s%s", a.config.Version, a.config.VersionPrerelease)
if a.config.Region != "" {
conf.Region = a.config.Region
}
if a.config.Datacenter != "" {
conf.Datacenter = a.config.Datacenter
}
if a.config.NodeName != "" {
conf.NodeName = a.config.NodeName
}
if a.config.Server.BootstrapExpect > 0 {
if a.config.Server.BootstrapExpect == 1 {
conf.Bootstrap = true
} else {
atomic.StoreInt32(&conf.BootstrapExpect, int32(a.config.Server.BootstrapExpect))
}
}
if a.config.DataDir != "" {
conf.DataDir = filepath.Join(a.config.DataDir, "server")
}
if a.config.Server.DataDir != "" {
conf.DataDir = a.config.Server.DataDir
}
if a.config.Server.ProtocolVersion != 0 {
conf.ProtocolVersion = uint8(a.config.Server.ProtocolVersion)
}
if a.config.Server.NumSchedulers != 0 {
conf.NumSchedulers = a.config.Server.NumSchedulers
}
if len(a.config.Server.EnabledSchedulers) != 0 {
conf.EnabledSchedulers = a.config.Server.EnabledSchedulers
}
// Set up the advertise addrs
if addr := a.config.AdvertiseAddrs.Serf; addr != "" {
serfAddr, err := net.ResolveTCPAddr("tcp", addr)
if err != nil {
return nil, fmt.Errorf("error resolving serf advertise address: %s", err)
}
conf.SerfConfig.MemberlistConfig.AdvertiseAddr = serfAddr.IP.String()
conf.SerfConfig.MemberlistConfig.AdvertisePort = serfAddr.Port
}
if addr := a.config.AdvertiseAddrs.RPC; addr != "" {
rpcAddr, err := net.ResolveTCPAddr("tcp", addr)
if err != nil {
return nil, fmt.Errorf("error resolving rpc advertise address: %s", err)
}
conf.RPCAdvertise = rpcAddr
}
// Set up the bind addresses
if addr := a.config.BindAddr; addr != "" {
conf.RPCAddr.IP = net.ParseIP(addr)
conf.SerfConfig.MemberlistConfig.BindAddr = addr
}
if addr := a.config.Addresses.RPC; addr != "" {
conf.RPCAddr.IP = net.ParseIP(addr)
}
if addr := a.config.Addresses.Serf; addr != "" {
conf.SerfConfig.MemberlistConfig.BindAddr = addr
}
// Set up the ports
if port := a.config.Ports.RPC; port != 0 {
conf.RPCAddr.Port = port
}
if port := a.config.Ports.Serf; port != 0 {
conf.SerfConfig.MemberlistConfig.BindPort = port
}
// Resolve the Server's HTTP Address
if a.config.AdvertiseAddrs.HTTP != "" {
a.serverHTTPAddr = a.config.AdvertiseAddrs.HTTP
} else if a.config.Addresses.HTTP != "" {
a.serverHTTPAddr = net.JoinHostPort(a.config.Addresses.HTTP, strconv.Itoa(a.config.Ports.HTTP))
} else if a.config.BindAddr != "" {
a.serverHTTPAddr = net.JoinHostPort(a.config.BindAddr, strconv.Itoa(a.config.Ports.HTTP))
} else {
a.serverHTTPAddr = net.JoinHostPort("127.0.0.1", strconv.Itoa(a.config.Ports.HTTP))
}
addr, err := net.ResolveTCPAddr("tcp", a.serverHTTPAddr)
if err != nil {
return nil, fmt.Errorf("error resolving HTTP addr %+q: %v", a.serverHTTPAddr, err)
}
a.serverHTTPAddr = net.JoinHostPort(addr.IP.String(), strconv.Itoa(addr.Port))
// Resolve the Server's RPC Address
if a.config.AdvertiseAddrs.RPC != "" {
a.serverRPCAddr = a.config.AdvertiseAddrs.RPC
} else if a.config.Addresses.RPC != "" {
a.serverRPCAddr = net.JoinHostPort(a.config.Addresses.RPC, strconv.Itoa(a.config.Ports.RPC))
} else if a.config.BindAddr != "" {
a.serverRPCAddr = net.JoinHostPort(a.config.BindAddr, strconv.Itoa(a.config.Ports.RPC))
} else {
a.serverRPCAddr = net.JoinHostPort("127.0.0.1", strconv.Itoa(a.config.Ports.RPC))
}
addr, err = net.ResolveTCPAddr("tcp", a.serverRPCAddr)
if err != nil {
return nil, fmt.Errorf("error resolving RPC addr %+q: %v", a.serverRPCAddr, err)
}
a.serverRPCAddr = net.JoinHostPort(addr.IP.String(), strconv.Itoa(addr.Port))
// Resolve the Server's Serf Address
if a.config.AdvertiseAddrs.Serf != "" {
a.serverSerfAddr = a.config.AdvertiseAddrs.Serf
} else if a.config.Addresses.Serf != "" {
a.serverSerfAddr = net.JoinHostPort(a.config.Addresses.Serf, strconv.Itoa(a.config.Ports.Serf))
} else if a.config.BindAddr != "" {
a.serverSerfAddr = net.JoinHostPort(a.config.BindAddr, strconv.Itoa(a.config.Ports.Serf))
} else {
a.serverSerfAddr = net.JoinHostPort("127.0.0.1", strconv.Itoa(a.config.Ports.Serf))
}
addr, err = net.ResolveTCPAddr("tcp", a.serverSerfAddr)
if err != nil {
return nil, fmt.Errorf("error resolving Serf addr %+q: %v", a.serverSerfAddr, err)
}
a.serverSerfAddr = net.JoinHostPort(addr.IP.String(), strconv.Itoa(addr.Port))
if gcThreshold := a.config.Server.NodeGCThreshold; gcThreshold != "" {
dur, err := time.ParseDuration(gcThreshold)
if err != nil {
return nil, err
}
conf.NodeGCThreshold = dur
}
if heartbeatGrace := a.config.Server.HeartbeatGrace; heartbeatGrace != "" {
dur, err := time.ParseDuration(heartbeatGrace)
if err != nil {
return nil, err
}
conf.HeartbeatGrace = dur
}
if a.config.Consul.AutoAdvertise && a.config.Consul.ServerServiceName == "" {
return nil, fmt.Errorf("server_service_name must be set when auto_advertise is enabled")
}
conf.ConsulConfig = a.config.Consul
return conf, nil
}
// clientConfig is used to generate a new client configuration struct
// for initializing a Nomad client.
func (a *Agent) clientConfig() (*clientconfig.Config, error) {
// Setup the configuration
conf := a.config.ClientConfig
if conf == nil {
conf = clientconfig.DefaultConfig()
}
if a.server != nil {
conf.RPCHandler = a.server
}
conf.LogOutput = a.logOutput
conf.DevMode = a.config.DevMode
if a.config.Region != "" {
conf.Region = a.config.Region
}
if a.config.DataDir != "" {
conf.StateDir = filepath.Join(a.config.DataDir, "client")
conf.AllocDir = filepath.Join(a.config.DataDir, "alloc")
}
if a.config.Client.StateDir != "" {
conf.StateDir = a.config.Client.StateDir
}
if a.config.Client.AllocDir != "" {
conf.AllocDir = a.config.Client.AllocDir
}
conf.Servers = a.config.Client.Servers
if a.config.Client.NetworkInterface != "" {
conf.NetworkInterface = a.config.Client.NetworkInterface
}
conf.ChrootEnv = a.config.Client.ChrootEnv
conf.Options = a.config.Client.Options
// Logging deprecation messages about consul related configuration in client
// options
var invalidConsulKeys []string
for key := range conf.Options {
if strings.HasPrefix(key, "consul") {
invalidConsulKeys = append(invalidConsulKeys, fmt.Sprintf("options.%s", key))
}
}
if len(invalidConsulKeys) > 0 {
a.logger.Printf("[WARN] agent: Invalid keys: %v", strings.Join(invalidConsulKeys, ","))
a.logger.Printf(`Nomad client ignores consul related configuration in client options.
Please refer to the guide https://www.nomadproject.io/docs/agent/config.html#consul_options
to configure Nomad to work with Consul.`)
}
if a.config.Client.NetworkSpeed != 0 {
conf.NetworkSpeed = a.config.Client.NetworkSpeed
}
if a.config.Client.MaxKillTimeout != "" {
dur, err := time.ParseDuration(a.config.Client.MaxKillTimeout)
if err != nil {
return nil, fmt.Errorf("Error parsing retry interval: %s", err)
}
conf.MaxKillTimeout = dur
}
conf.ClientMaxPort = uint(a.config.Client.ClientMaxPort)
conf.ClientMinPort = uint(a.config.Client.ClientMinPort)
// Setup the node
conf.Node = new(structs.Node)
conf.Node.Datacenter = a.config.Datacenter
conf.Node.Name = a.config.NodeName
conf.Node.Meta = a.config.Client.Meta
conf.Node.NodeClass = a.config.Client.NodeClass
// Resolve the Client's HTTP address
if a.config.AdvertiseAddrs.HTTP != "" {
a.clientHTTPAddr = a.config.AdvertiseAddrs.HTTP
} else if a.config.Addresses.HTTP != "" {
a.clientHTTPAddr = net.JoinHostPort(a.config.Addresses.HTTP, strconv.Itoa(a.config.Ports.HTTP))
} else if a.config.BindAddr != "" {
a.clientHTTPAddr = net.JoinHostPort(a.config.BindAddr, strconv.Itoa(a.config.Ports.HTTP))
} else {
a.clientHTTPAddr = net.JoinHostPort("127.0.0.1", strconv.Itoa(a.config.Ports.HTTP))
}
addr, err := net.ResolveTCPAddr("tcp", a.clientHTTPAddr)
if err != nil {
return nil, fmt.Errorf("error resolving HTTP addr %+q: %v", a.clientHTTPAddr, err)
}
httpAddr := net.JoinHostPort(addr.IP.String(), strconv.Itoa(addr.Port))
conf.Node.HTTPAddr = httpAddr
a.clientHTTPAddr = httpAddr
// Reserve resources on the node.
r := conf.Node.Reserved
if r == nil {
r = new(structs.Resources)
conf.Node.Reserved = r
}
r.CPU = a.config.Client.Reserved.CPU
r.MemoryMB = a.config.Client.Reserved.MemoryMB
r.DiskMB = a.config.Client.Reserved.DiskMB
r.IOPS = a.config.Client.Reserved.IOPS
conf.GloballyReservedPorts = a.config.Client.Reserved.ParsedReservedPorts
conf.Version = fmt.Sprintf("%s%s", a.config.Version, a.config.VersionPrerelease)
conf.Revision = a.config.Revision
if a.config.Consul.AutoAdvertise && a.config.Consul.ClientServiceName == "" {
return nil, fmt.Errorf("client_service_name must be set when auto_advertise is enabled")
}
conf.ConsulConfig = a.config.Consul
conf.StatsCollectionInterval = a.config.Telemetry.collectionInterval
conf.PublishNodeMetrics = a.config.Telemetry.PublishNodeMetrics
conf.PublishAllocationMetrics = a.config.Telemetry.PublishAllocationMetrics
return conf, nil
}
// setupServer is used to setup the server if enabled
func (a *Agent) setupServer() error {
if !a.config.Server.Enabled {
return nil
}
// Setup the configuration
conf, err := a.serverConfig()
if err != nil {
return fmt.Errorf("server config setup failed: %s", err)
}
// Create the server
server, err := nomad.NewServer(conf, a.consulSyncer, a.logger)
if err != nil {
return fmt.Errorf("server setup failed: %v", err)
}
a.server = server
// Create the Nomad Server services for Consul
if a.config.Consul.AutoAdvertise {
httpServ := &structs.Service{
Name: a.config.Consul.ServerServiceName,
PortLabel: a.serverHTTPAddr,
Tags: []string{consul.ServiceTagHTTP},
Checks: []*structs.ServiceCheck{
&structs.ServiceCheck{
Name: "Nomad Server HTTP Check",
Type: "http",
Path: "/v1/status/peers",
Protocol: "http", // TODO TLS
Interval: serverHttpCheckInterval,
Timeout: serverHttpCheckTimeout,
},
},
}
rpcServ := &structs.Service{
Name: a.config.Consul.ServerServiceName,
PortLabel: a.serverRPCAddr,
Tags: []string{consul.ServiceTagRPC},
Checks: []*structs.ServiceCheck{
&structs.ServiceCheck{
Name: "Nomad Server RPC Check",
Type: "tcp",
Interval: serverRpcCheckInterval,
Timeout: serverRpcCheckTimeout,
},
},
}
serfServ := &structs.Service{
PortLabel: a.serverSerfAddr,
Name: a.config.Consul.ServerServiceName,
Tags: []string{consul.ServiceTagSerf},
Checks: []*structs.ServiceCheck{
&structs.ServiceCheck{
Name: "Nomad Server Serf Check",
Type: "tcp",
Interval: serverSerfCheckInterval,
Timeout: serverSerfCheckTimeout,
},
},
}
a.consulSyncer.SetServices(consul.ServerDomain, map[consul.ServiceKey]*structs.Service{
consul.GenerateServiceKey(httpServ): httpServ,
consul.GenerateServiceKey(rpcServ): rpcServ,
consul.GenerateServiceKey(serfServ): serfServ,
})
}
return nil
}
// setupClient is used to setup the client if enabled
func (a *Agent) setupClient() error {
if !a.config.Client.Enabled {
return nil
}
// Setup the configuration
conf, err := a.clientConfig()
if err != nil {
return fmt.Errorf("client setup failed: %v", err)
}
// Reserve some ports for the plugins if we are on Windows
if runtime.GOOS == "windows" {
if err := a.reservePortsForClient(conf); err != nil {
return err
}
}
// Create the client
client, err := client.NewClient(conf, a.consulSyncer, a.logger)
if err != nil {
return fmt.Errorf("client setup failed: %v", err)
}
a.client = client
// Create the Nomad Client services for Consul
if a.config.Consul.AutoAdvertise {
httpServ := &structs.Service{
Name: a.config.Consul.ClientServiceName,
PortLabel: a.clientHTTPAddr,
Tags: []string{consul.ServiceTagHTTP},
Checks: []*structs.ServiceCheck{
&structs.ServiceCheck{
Name: "Nomad Client HTTP Check",
Type: "http",
Path: "/v1/agent/servers",
Protocol: "http", // TODO TLS
Interval: clientHttpCheckInterval,
Timeout: clientHttpCheckTimeout,
},
},
}
a.consulSyncer.SetServices(consul.ClientDomain, map[consul.ServiceKey]*structs.Service{
consul.GenerateServiceKey(httpServ): httpServ,
})
}
return nil
}
// reservePortsForClient reserves a range of ports for the client to use when
// it creates various plugins for log collection, executors, drivers, etc
func (a *Agent) reservePortsForClient(conf *clientconfig.Config) error {
// finding the device name for loopback
deviceName, addr, mask, err := a.findLoopbackDevice()
if err != nil {
return fmt.Errorf("error finding the device name for loopback: %v", err)
}
// seeing if the user has already reserved some resources on this device
var nr *structs.NetworkResource
if conf.Node.Reserved == nil {
conf.Node.Reserved = &structs.Resources{}
}
for _, n := range conf.Node.Reserved.Networks {
if n.Device == deviceName {
nr = n
}
}
// If the user hasn't already created the device, we create it
if nr == nil {
nr = &structs.NetworkResource{
Device: deviceName,
IP: addr,
CIDR: mask,
ReservedPorts: make([]structs.Port, 0),
}
}
// appending the port ranges we want to use for the client to the list of
// reserved ports for this device
for i := conf.ClientMinPort; i <= conf.ClientMaxPort; i++ {
nr.ReservedPorts = append(nr.ReservedPorts, structs.Port{Label: fmt.Sprintf("plugin-%d", i), Value: int(i)})
}
conf.Node.Reserved.Networks = append(conf.Node.Reserved.Networks, nr)
return nil
}
// findLoopbackDevice iterates through all the interfaces on a machine and
// returns the ip addr, mask of the loopback device
func (a *Agent) findLoopbackDevice() (string, string, string, error) {
var ifcs []net.Interface
var err error
ifcs, err = net.Interfaces()
if err != nil {
return "", "", "", err
}
for _, ifc := range ifcs {
addrs, err := ifc.Addrs()
if err != nil {
return "", "", "", err
}
for _, addr := range addrs {
var ip net.IP
switch v := addr.(type) {
case *net.IPNet:
ip = v.IP
case *net.IPAddr:
ip = v.IP
}
if ip.IsLoopback() {
if ip.To4() == nil {
continue
}
return ifc.Name, ip.String(), addr.String(), nil
}
}
}
return "", "", "", fmt.Errorf("no loopback devices with IPV4 addr found")
}
// Leave is used gracefully exit. Clients will inform servers
// of their departure so that allocations can be rescheduled.
func (a *Agent) Leave() error {
if a.client != nil {
if err := a.client.Leave(); err != nil {
a.logger.Printf("[ERR] agent: client leave failed: %v", err)
}
}
if a.server != nil {
if err := a.server.Leave(); err != nil {
a.logger.Printf("[ERR] agent: server leave failed: %v", err)
}
}
return nil
}
// Shutdown is used to terminate the agent.
func (a *Agent) Shutdown() error {
a.shutdownLock.Lock()
defer a.shutdownLock.Unlock()
if a.shutdown {
return nil
}
a.logger.Println("[INFO] agent: requesting shutdown")
if a.client != nil {
if err := a.client.Shutdown(); err != nil {
a.logger.Printf("[ERR] agent: client shutdown failed: %v", err)
}
}
if a.server != nil {
if err := a.server.Shutdown(); err != nil {
a.logger.Printf("[ERR] agent: server shutdown failed: %v", err)
}
}
if err := a.consulSyncer.Shutdown(); err != nil {
a.logger.Printf("[ERR] agent: shutting down consul service failed: %v", err)
}
a.logger.Println("[INFO] agent: shutdown complete")
a.shutdown = true
close(a.shutdownCh)
return nil
}
// RPC is used to make an RPC call to the Nomad servers
func (a *Agent) RPC(method string, args interface{}, reply interface{}) error {
if a.server != nil {
return a.server.RPC(method, args, reply)
}
return a.client.RPC(method, args, reply)
}
// Client returns the configured client or nil
func (a *Agent) Client() *client.Client {
return a.client
}
// Server returns the configured server or nil
func (a *Agent) Server() *nomad.Server {
return a.server
}
// Stats is used to return statistics for debugging and insight
// for various sub-systems
func (a *Agent) Stats() map[string]map[string]string {
stats := make(map[string]map[string]string)
if a.server != nil {
subStat := a.server.Stats()
for k, v := range subStat {
stats[k] = v
}
}
if a.client != nil {
subStat := a.client.Stats()
for k, v := range subStat {
stats[k] = v
}
}
return stats
}
// setupConsulSyncer creates the Consul tasks used by this Nomad Agent
// (either Client or Server mode).
func (a *Agent) setupConsulSyncer() error {
var err error
a.consulSyncer, err = consul.NewSyncer(a.config.Consul, a.shutdownCh, a.logger)
if err != nil {
return err
}
a.consulSyncer.SetAddrFinder(func(portLabel string) (string, int) {
host, port, err := net.SplitHostPort(portLabel)
if err != nil {
p, err := strconv.Atoi(port)
if err != nil {
return "", 0
}
return "", p
}
// If the addr for the service is ":port", then we fall back
// to Nomad's default address resolution protocol.
//
// TODO(sean@): This should poll Consul to figure out what
// its advertise address is and use that in order to handle
// the case where there is something funky like NAT on this
// host. For now we just use the BindAddr if set, otherwise
// we fall back to a loopback addr.
if host == "" {
if a.config.BindAddr != "" {
host = a.config.BindAddr
} else {
host = "127.0.0.1"
}
}
p, err := strconv.Atoi(port)
if err != nil {
return host, 0
}
return host, p
})
return nil
}

View File

@ -1,178 +0,0 @@
package agent
import (
"net"
"net/http"
"github.com/hashicorp/serf/serf"
)
type Member struct {
Name string
Addr net.IP
Port uint16
Tags map[string]string
Status string
ProtocolMin uint8
ProtocolMax uint8
ProtocolCur uint8
DelegateMin uint8
DelegateMax uint8
DelegateCur uint8
}
func nomadMember(m serf.Member) Member {
return Member{
Name: m.Name,
Addr: m.Addr,
Port: m.Port,
Tags: m.Tags,
Status: m.Status.String(),
ProtocolMin: m.ProtocolMin,
ProtocolMax: m.ProtocolMax,
ProtocolCur: m.ProtocolCur,
DelegateMin: m.DelegateMin,
DelegateMax: m.DelegateMax,
DelegateCur: m.DelegateCur,
}
}
func (s *HTTPServer) AgentSelfRequest(resp http.ResponseWriter, req *http.Request) (interface{}, error) {
if req.Method != "GET" {
return nil, CodedError(405, ErrInvalidMethod)
}
// Get the member as a server
var member serf.Member
srv := s.agent.Server()
if srv != nil {
member = srv.LocalMember()
}
self := agentSelf{
Config: s.agent.config,
Member: nomadMember(member),
Stats: s.agent.Stats(),
}
return self, nil
}
func (s *HTTPServer) AgentJoinRequest(resp http.ResponseWriter, req *http.Request) (interface{}, error) {
if req.Method != "PUT" && req.Method != "POST" {
return nil, CodedError(405, ErrInvalidMethod)
}
srv := s.agent.Server()
if srv == nil {
return nil, CodedError(501, ErrInvalidMethod)
}
// Get the join addresses
query := req.URL.Query()
addrs := query["address"]
if len(addrs) == 0 {
return nil, CodedError(400, "missing address to join")
}
// Attempt the join
num, err := srv.Join(addrs)
var errStr string
if err != nil {
errStr = err.Error()
}
return joinResult{num, errStr}, nil
}
func (s *HTTPServer) AgentMembersRequest(resp http.ResponseWriter, req *http.Request) (interface{}, error) {
if req.Method != "GET" {
return nil, CodedError(405, ErrInvalidMethod)
}
srv := s.agent.Server()
if srv == nil {
return nil, CodedError(501, ErrInvalidMethod)
}
serfMembers := srv.Members()
members := make([]Member, len(serfMembers))
for i, mem := range serfMembers {
members[i] = nomadMember(mem)
}
return members, nil
}
func (s *HTTPServer) AgentForceLeaveRequest(resp http.ResponseWriter, req *http.Request) (interface{}, error) {
if req.Method != "PUT" && req.Method != "POST" {
return nil, CodedError(405, ErrInvalidMethod)
}
srv := s.agent.Server()
if srv == nil {
return nil, CodedError(501, ErrInvalidMethod)
}
// Get the node to eject
node := req.URL.Query().Get("node")
if node == "" {
return nil, CodedError(400, "missing node to force leave")
}
// Attempt remove
err := srv.RemoveFailedNode(node)
return nil, err
}
// AgentServersRequest is used to query the list of servers used by the Nomad
// Client for RPCs. This endpoint can also be used to update the list of
// servers for a given agent.
func (s *HTTPServer) AgentServersRequest(resp http.ResponseWriter, req *http.Request) (interface{}, error) {
switch req.Method {
case "PUT", "POST":
return s.updateServers(resp, req)
case "GET":
return s.listServers(resp, req)
default:
return nil, CodedError(405, ErrInvalidMethod)
}
}
func (s *HTTPServer) listServers(resp http.ResponseWriter, req *http.Request) (interface{}, error) {
client := s.agent.Client()
if client == nil {
return nil, CodedError(501, ErrInvalidMethod)
}
peers := s.agent.client.RPCProxy().ServerRPCAddrs()
return peers, nil
}
func (s *HTTPServer) updateServers(resp http.ResponseWriter, req *http.Request) (interface{}, error) {
client := s.agent.Client()
if client == nil {
return nil, CodedError(501, ErrInvalidMethod)
}
// Get the servers from the request
servers := req.URL.Query()["address"]
if len(servers) == 0 {
return nil, CodedError(400, "missing server address")
}
// Set the servers list into the client
for _, server := range servers {
s.agent.logger.Printf("[TRACE] Adding server %s to the client's primary server list", server)
se := client.AddPrimaryServerToRPCProxy(server)
if se == nil {
s.agent.logger.Printf("[ERR] Attempt to add server %q to client failed", server)
}
}
return nil, nil
}
type agentSelf struct {
Config *Config `json:"config"`
Member Member `json:"member,omitempty"`
Stats map[string]map[string]string `json:"stats"`
}
type joinResult struct {
NumJoined int `json:"num_joined"`
Error string `json:"error"`
}

View File

@ -1,85 +0,0 @@
package agent
import (
"net/http"
"strings"
"github.com/hashicorp/nomad/nomad/structs"
)
const (
allocNotFoundErr = "allocation not found"
)
func (s *HTTPServer) AllocsRequest(resp http.ResponseWriter, req *http.Request) (interface{}, error) {
if req.Method != "GET" {
return nil, CodedError(405, ErrInvalidMethod)
}
args := structs.AllocListRequest{}
if s.parse(resp, req, &args.Region, &args.QueryOptions) {
return nil, nil
}
var out structs.AllocListResponse
if err := s.agent.RPC("Alloc.List", &args, &out); err != nil {
return nil, err
}
setMeta(resp, &out.QueryMeta)
if out.Allocations == nil {
out.Allocations = make([]*structs.AllocListStub, 0)
}
return out.Allocations, nil
}
func (s *HTTPServer) AllocSpecificRequest(resp http.ResponseWriter, req *http.Request) (interface{}, error) {
allocID := strings.TrimPrefix(req.URL.Path, "/v1/allocation/")
if req.Method != "GET" {
return nil, CodedError(405, ErrInvalidMethod)
}
args := structs.AllocSpecificRequest{
AllocID: allocID,
}
if s.parse(resp, req, &args.Region, &args.QueryOptions) {
return nil, nil
}
var out structs.SingleAllocResponse
if err := s.agent.RPC("Alloc.GetAlloc", &args, &out); err != nil {
return nil, err
}
setMeta(resp, &out.QueryMeta)
if out.Alloc == nil {
return nil, CodedError(404, "alloc not found")
}
return out.Alloc, nil
}
func (s *HTTPServer) ClientAllocRequest(resp http.ResponseWriter, req *http.Request) (interface{}, error) {
if s.agent.client == nil {
return nil, clientNotRunning
}
reqSuffix := strings.TrimPrefix(req.URL.Path, "/v1/client/allocation/")
// tokenize the suffix of the path to get the alloc id and find the action
// invoked on the alloc id
tokens := strings.Split(reqSuffix, "/")
if len(tokens) == 1 || tokens[1] != "stats" {
return nil, CodedError(404, allocNotFoundErr)
}
allocID := tokens[0]
// Get the stats reporter
clientStats := s.agent.client.StatsReporter()
aStats, err := clientStats.GetAllocStats(allocID)
if err != nil {
return nil, err
}
task := req.URL.Query().Get("task")
return aStats.LatestAllocStats(task)
}

View File

@ -1,873 +0,0 @@
package agent
import (
"flag"
"fmt"
"io"
"log"
"os"
"os/signal"
"path/filepath"
"reflect"
"sort"
"strconv"
"strings"
"syscall"
"time"
"github.com/armon/go-metrics"
"github.com/armon/go-metrics/circonus"
"github.com/hashicorp/consul/lib"
"github.com/hashicorp/go-checkpoint"
"github.com/hashicorp/go-syslog"
"github.com/hashicorp/logutils"
"github.com/hashicorp/nomad/helper/flag-slice"
"github.com/hashicorp/nomad/helper/gated-writer"
"github.com/hashicorp/scada-client/scada"
"github.com/mitchellh/cli"
)
// gracefulTimeout controls how long we wait before forcefully terminating
const gracefulTimeout = 5 * time.Second
// Command is a Command implementation that runs a Nomad agent.
// The command will not end unless a shutdown message is sent on the
// ShutdownCh. If two messages are sent on the ShutdownCh it will forcibly
// exit.
type Command struct {
Revision string
Version string
VersionPrerelease string
Ui cli.Ui
ShutdownCh <-chan struct{}
args []string
agent *Agent
httpServer *HTTPServer
logFilter *logutils.LevelFilter
logOutput io.Writer
retryJoinErrCh chan struct{}
scadaProvider *scada.Provider
scadaHttp *HTTPServer
}
func (c *Command) readConfig() *Config {
var dev bool
var configPath []string
var servers string
var meta []string
// Make a new, empty config.
cmdConfig := &Config{
Atlas: &AtlasConfig{},
Client: &ClientConfig{},
Ports: &Ports{},
Server: &ServerConfig{},
}
flags := flag.NewFlagSet("agent", flag.ContinueOnError)
flags.Usage = func() { c.Ui.Error(c.Help()) }
// Role options
flags.BoolVar(&dev, "dev", false, "")
flags.BoolVar(&cmdConfig.Server.Enabled, "server", false, "")
flags.BoolVar(&cmdConfig.Client.Enabled, "client", false, "")
// Server-only options
flags.IntVar(&cmdConfig.Server.BootstrapExpect, "bootstrap-expect", 0, "")
flags.BoolVar(&cmdConfig.Server.RejoinAfterLeave, "rejoin", false, "")
flags.Var((*sliceflag.StringFlag)(&cmdConfig.Server.StartJoin), "join", "")
flags.Var((*sliceflag.StringFlag)(&cmdConfig.Server.RetryJoin), "retry-join", "")
flags.IntVar(&cmdConfig.Server.RetryMaxAttempts, "retry-max", 0, "")
flags.StringVar(&cmdConfig.Server.RetryInterval, "retry-interval", "", "")
// Client-only options
flags.StringVar(&cmdConfig.Client.StateDir, "state-dir", "", "")
flags.StringVar(&cmdConfig.Client.AllocDir, "alloc-dir", "", "")
flags.StringVar(&cmdConfig.Client.NodeClass, "node-class", "", "")
flags.StringVar(&servers, "servers", "", "")
flags.Var((*sliceflag.StringFlag)(&meta), "meta", "")
flags.StringVar(&cmdConfig.Client.NetworkInterface, "network-interface", "", "")
flags.IntVar(&cmdConfig.Client.NetworkSpeed, "network-speed", 0, "")
// General options
flags.Var((*sliceflag.StringFlag)(&configPath), "config", "config")
flags.StringVar(&cmdConfig.BindAddr, "bind", "", "")
flags.StringVar(&cmdConfig.Region, "region", "", "")
flags.StringVar(&cmdConfig.DataDir, "data-dir", "", "")
flags.StringVar(&cmdConfig.Datacenter, "dc", "", "")
flags.StringVar(&cmdConfig.LogLevel, "log-level", "", "")
flags.StringVar(&cmdConfig.NodeName, "node", "", "")
// Atlas options
flags.StringVar(&cmdConfig.Atlas.Infrastructure, "atlas", "", "")
flags.BoolVar(&cmdConfig.Atlas.Join, "atlas-join", false, "")
flags.StringVar(&cmdConfig.Atlas.Token, "atlas-token", "", "")
if err := flags.Parse(c.args); err != nil {
return nil
}
// Split the servers.
if servers != "" {
cmdConfig.Client.Servers = strings.Split(servers, ",")
}
// Parse the meta flags.
metaLength := len(meta)
if metaLength != 0 {
cmdConfig.Client.Meta = make(map[string]string, metaLength)
for _, kv := range meta {
parts := strings.SplitN(kv, "=", 2)
if len(parts) != 2 {
c.Ui.Error(fmt.Sprintf("Error parsing Client.Meta value: %v", kv))
return nil
}
cmdConfig.Client.Meta[parts[0]] = parts[1]
}
}
// Load the configuration
var config *Config
if dev {
config = DevConfig()
} else {
config = DefaultConfig()
}
for _, path := range configPath {
current, err := LoadConfig(path)
if err != nil {
c.Ui.Error(fmt.Sprintf(
"Error loading configuration from %s: %s", path, err))
return nil
}
// The user asked us to load some config here but we didn't find any,
// so we'll complain but continue.
if current == nil || reflect.DeepEqual(current, &Config{}) {
c.Ui.Warn(fmt.Sprintf("No configuration loaded from %s", path))
}
if config == nil {
config = current
} else {
config = config.Merge(current)
}
}
// Ensure the sub-structs at least exist
if config.Atlas == nil {
config.Atlas = &AtlasConfig{}
}
if config.Client == nil {
config.Client = &ClientConfig{}
}
if config.Server == nil {
config.Server = &ServerConfig{}
}
// Merge any CLI options over config file options
config = config.Merge(cmdConfig)
// Set the version info
config.Revision = c.Revision
config.Version = c.Version
config.VersionPrerelease = c.VersionPrerelease
if dev {
// Skip validation for dev mode
return config
}
// Parse the RetryInterval.
dur, err := time.ParseDuration(config.Server.RetryInterval)
if err != nil {
c.Ui.Error(fmt.Sprintf("Error parsing retry interval: %s", err))
return nil
}
config.Server.retryInterval = dur
// Check that the server is running in at least one mode.
if !(config.Server.Enabled || config.Client.Enabled) {
c.Ui.Error("Must specify either server, client or dev mode for the agent.")
return nil
}
// Verify the paths are absolute.
dirs := map[string]string{
"data-dir": config.DataDir,
"alloc-dir": config.Client.AllocDir,
"state-dir": config.Client.StateDir,
}
for k, dir := range dirs {
if dir == "" {
continue
}
if !filepath.IsAbs(dir) {
c.Ui.Error(fmt.Sprintf("%s must be given as an absolute path: got %v", k, dir))
return nil
}
}
// Ensure that we have the directories we neet to run.
if config.Server.Enabled && config.DataDir == "" {
c.Ui.Error("Must specify data directory")
return nil
}
// The config is valid if the top-level data-dir is set or if both
// alloc-dir and state-dir are set.
if config.Client.Enabled && config.DataDir == "" {
if config.Client.AllocDir == "" || config.Client.StateDir == "" {
c.Ui.Error("Must specify both the state and alloc dir if data-dir is omitted.")
return nil
}
}
// Check the bootstrap flags
if config.Server.BootstrapExpect > 0 && !config.Server.Enabled {
c.Ui.Error("Bootstrap requires server mode to be enabled")
return nil
}
if config.Server.BootstrapExpect == 1 {
c.Ui.Error("WARNING: Bootstrap mode enabled! Potentially unsafe operation.")
}
return config
}
// setupLoggers is used to setup the logGate, logWriter, and our logOutput
func (c *Command) setupLoggers(config *Config) (*gatedwriter.Writer, *logWriter, io.Writer) {
// Setup logging. First create the gated log writer, which will
// store logs until we're ready to show them. Then create the level
// filter, filtering logs of the specified level.
logGate := &gatedwriter.Writer{
Writer: &cli.UiWriter{Ui: c.Ui},
}
c.logFilter = LevelFilter()
c.logFilter.MinLevel = logutils.LogLevel(strings.ToUpper(config.LogLevel))
c.logFilter.Writer = logGate
if !ValidateLevelFilter(c.logFilter.MinLevel, c.logFilter) {
c.Ui.Error(fmt.Sprintf(
"Invalid log level: %s. Valid log levels are: %v",
c.logFilter.MinLevel, c.logFilter.Levels))
return nil, nil, nil
}
// Check if syslog is enabled
var syslog io.Writer
if config.EnableSyslog {
l, err := gsyslog.NewLogger(gsyslog.LOG_NOTICE, config.SyslogFacility, "nomad")
if err != nil {
c.Ui.Error(fmt.Sprintf("Syslog setup failed: %v", err))
return nil, nil, nil
}
syslog = &SyslogWrapper{l, c.logFilter}
}
// Create a log writer, and wrap a logOutput around it
logWriter := NewLogWriter(512)
var logOutput io.Writer
if syslog != nil {
logOutput = io.MultiWriter(c.logFilter, logWriter, syslog)
} else {
logOutput = io.MultiWriter(c.logFilter, logWriter)
}
c.logOutput = logOutput
log.SetOutput(logOutput)
return logGate, logWriter, logOutput
}
// setupAgent is used to start the agent and various interfaces
func (c *Command) setupAgent(config *Config, logOutput io.Writer) error {
c.Ui.Output("Starting Nomad agent...")
agent, err := NewAgent(config, logOutput)
if err != nil {
c.Ui.Error(fmt.Sprintf("Error starting agent: %s", err))
return err
}
c.agent = agent
// Enable the SCADA integration
if err := c.setupSCADA(config); err != nil {
agent.Shutdown()
c.Ui.Error(fmt.Sprintf("Error starting SCADA: %s", err))
return err
}
// Setup the HTTP server
http, err := NewHTTPServer(agent, config, logOutput)
if err != nil {
agent.Shutdown()
c.Ui.Error(fmt.Sprintf("Error starting http server: %s", err))
return err
}
c.httpServer = http
// Setup update checking
if !config.DisableUpdateCheck {
version := config.Version
if config.VersionPrerelease != "" {
version += fmt.Sprintf("-%s", config.VersionPrerelease)
}
updateParams := &checkpoint.CheckParams{
Product: "nomad",
Version: version,
}
if !config.DisableAnonymousSignature {
updateParams.SignatureFile = filepath.Join(config.DataDir, "checkpoint-signature")
}
// Schedule a periodic check with expected interval of 24 hours
checkpoint.CheckInterval(updateParams, 24*time.Hour, c.checkpointResults)
// Do an immediate check within the next 30 seconds
go func() {
time.Sleep(lib.RandomStagger(30 * time.Second))
c.checkpointResults(checkpoint.Check(updateParams))
}()
}
return nil
}
// checkpointResults is used to handler periodic results from our update checker
func (c *Command) checkpointResults(results *checkpoint.CheckResponse, err error) {
if err != nil {
c.Ui.Error(fmt.Sprintf("Failed to check for updates: %v", err))
return
}
if results.Outdated {
versionStr := c.Version
if c.VersionPrerelease != "" {
versionStr += fmt.Sprintf("-%s", c.VersionPrerelease)
}
c.Ui.Error(fmt.Sprintf("Newer Nomad version available: %s (currently running: %s)", results.CurrentVersion, versionStr))
}
for _, alert := range results.Alerts {
switch alert.Level {
case "info":
c.Ui.Info(fmt.Sprintf("Bulletin [%s]: %s (%s)", alert.Level, alert.Message, alert.URL))
default:
c.Ui.Error(fmt.Sprintf("Bulletin [%s]: %s (%s)", alert.Level, alert.Message, alert.URL))
}
}
}
func (c *Command) Run(args []string) int {
c.Ui = &cli.PrefixedUi{
OutputPrefix: "==> ",
InfoPrefix: " ",
ErrorPrefix: "==> ",
Ui: c.Ui,
}
// Parse our configs
c.args = args
config := c.readConfig()
if config == nil {
return 1
}
// Setup the log outputs
logGate, _, logOutput := c.setupLoggers(config)
if logGate == nil {
return 1
}
// Log config files
if len(config.Files) > 0 {
c.Ui.Info(fmt.Sprintf("Loaded configuration from %s", strings.Join(config.Files, ", ")))
} else {
c.Ui.Info("No configuration files loaded")
}
// Initialize the telemetry
if err := c.setupTelementry(config); err != nil {
c.Ui.Error(fmt.Sprintf("Error initializing telemetry: %s", err))
return 1
}
// Create the agent
if err := c.setupAgent(config, logOutput); err != nil {
return 1
}
defer c.agent.Shutdown()
// Check and shut down the SCADA listeners at the end
defer func() {
if c.httpServer != nil {
c.httpServer.Shutdown()
}
if c.scadaHttp != nil {
c.scadaHttp.Shutdown()
}
if c.scadaProvider != nil {
c.scadaProvider.Shutdown()
}
}()
// Join startup nodes if specified
if err := c.startupJoin(config); err != nil {
c.Ui.Error(err.Error())
return 1
}
// Compile agent information for output later
info := make(map[string]string)
info["client"] = strconv.FormatBool(config.Client.Enabled)
info["log level"] = config.LogLevel
info["server"] = strconv.FormatBool(config.Server.Enabled)
info["region"] = fmt.Sprintf("%s (DC: %s)", config.Region, config.Datacenter)
if config.Atlas != nil && config.Atlas.Infrastructure != "" {
info["atlas"] = fmt.Sprintf("(Infrastructure: '%s' Join: %v)",
config.Atlas.Infrastructure, config.Atlas.Join)
} else {
info["atlas"] = "<disabled>"
}
// Sort the keys for output
infoKeys := make([]string, 0, len(info))
for key := range info {
infoKeys = append(infoKeys, key)
}
sort.Strings(infoKeys)
// Agent configuration output
padding := 18
c.Ui.Output("Nomad agent configuration:\n")
for _, k := range infoKeys {
c.Ui.Info(fmt.Sprintf(
"%s%s: %s",
strings.Repeat(" ", padding-len(k)),
strings.Title(k),
info[k]))
}
c.Ui.Output("")
// Output the header that the server has started
c.Ui.Output("Nomad agent started! Log data will stream in below:\n")
// Enable log streaming
logGate.Flush()
// Start retry join process
c.retryJoinErrCh = make(chan struct{})
go c.retryJoin(config)
// Wait for exit
return c.handleSignals(config)
}
// handleSignals blocks until we get an exit-causing signal
func (c *Command) handleSignals(config *Config) int {
signalCh := make(chan os.Signal, 4)
signal.Notify(signalCh, os.Interrupt, syscall.SIGTERM, syscall.SIGHUP)
// Wait for a signal
WAIT:
var sig os.Signal
select {
case s := <-signalCh:
sig = s
case <-c.ShutdownCh:
sig = os.Interrupt
case <-c.retryJoinErrCh:
return 1
}
c.Ui.Output(fmt.Sprintf("Caught signal: %v", sig))
// Check if this is a SIGHUP
if sig == syscall.SIGHUP {
if conf := c.handleReload(config); conf != nil {
*config = *conf
}
goto WAIT
}
// Check if we should do a graceful leave
graceful := false
if sig == os.Interrupt && config.LeaveOnInt {
graceful = true
} else if sig == syscall.SIGTERM && config.LeaveOnTerm {
graceful = true
}
// Bail fast if not doing a graceful leave
if !graceful {
return 1
}
// Attempt a graceful leave
gracefulCh := make(chan struct{})
c.Ui.Output("Gracefully shutting down agent...")
go func() {
if err := c.agent.Leave(); err != nil {
c.Ui.Error(fmt.Sprintf("Error: %s", err))
return
}
close(gracefulCh)
}()
// Wait for leave or another signal
select {
case <-signalCh:
return 1
case <-time.After(gracefulTimeout):
return 1
case <-gracefulCh:
return 0
}
}
// handleReload is invoked when we should reload our configs, e.g. SIGHUP
func (c *Command) handleReload(config *Config) *Config {
c.Ui.Output("Reloading configuration...")
newConf := c.readConfig()
if newConf == nil {
c.Ui.Error(fmt.Sprintf("Failed to reload configs"))
return config
}
// Change the log level
minLevel := logutils.LogLevel(strings.ToUpper(newConf.LogLevel))
if ValidateLevelFilter(minLevel, c.logFilter) {
c.logFilter.SetMinLevel(minLevel)
} else {
c.Ui.Error(fmt.Sprintf(
"Invalid log level: %s. Valid log levels are: %v",
minLevel, c.logFilter.Levels))
// Keep the current log level
newConf.LogLevel = config.LogLevel
}
return newConf
}
// setupTelementry is used ot setup the telemetry sub-systems
func (c *Command) setupTelementry(config *Config) error {
/* Setup telemetry
Aggregate on 10 second intervals for 1 minute. Expose the
metrics over stderr when there is a SIGUSR1 received.
*/
inm := metrics.NewInmemSink(10*time.Second, time.Minute)
metrics.DefaultInmemSignal(inm)
var telConfig *Telemetry
if config.Telemetry == nil {
telConfig = &Telemetry{}
} else {
telConfig = config.Telemetry
}
metricsConf := metrics.DefaultConfig("nomad")
metricsConf.EnableHostname = !telConfig.DisableHostname
// Configure the statsite sink
var fanout metrics.FanoutSink
if telConfig.StatsiteAddr != "" {
sink, err := metrics.NewStatsiteSink(telConfig.StatsiteAddr)
if err != nil {
return err
}
fanout = append(fanout, sink)
}
// Configure the statsd sink
if telConfig.StatsdAddr != "" {
sink, err := metrics.NewStatsdSink(telConfig.StatsdAddr)
if err != nil {
return err
}
fanout = append(fanout, sink)
}
// Configure the Circonus sink
if telConfig.CirconusAPIToken != "" || telConfig.CirconusCheckSubmissionURL != "" {
cfg := &circonus.Config{}
cfg.Interval = telConfig.CirconusSubmissionInterval
cfg.CheckManager.API.TokenKey = telConfig.CirconusAPIToken
cfg.CheckManager.API.TokenApp = telConfig.CirconusAPIApp
cfg.CheckManager.API.URL = telConfig.CirconusAPIURL
cfg.CheckManager.Check.SubmissionURL = telConfig.CirconusCheckSubmissionURL
cfg.CheckManager.Check.ID = telConfig.CirconusCheckID
cfg.CheckManager.Check.ForceMetricActivation = telConfig.CirconusCheckForceMetricActivation
cfg.CheckManager.Check.InstanceID = telConfig.CirconusCheckInstanceID
cfg.CheckManager.Check.SearchTag = telConfig.CirconusCheckSearchTag
cfg.CheckManager.Broker.ID = telConfig.CirconusBrokerID
cfg.CheckManager.Broker.SelectTag = telConfig.CirconusBrokerSelectTag
if cfg.CheckManager.API.TokenApp == "" {
cfg.CheckManager.API.TokenApp = "nomad"
}
if cfg.CheckManager.Check.InstanceID == "" {
if config.NodeName != "" && config.Datacenter != "" {
cfg.CheckManager.Check.InstanceID = fmt.Sprintf("%s:%s", config.NodeName, config.Datacenter)
}
}
if cfg.CheckManager.Check.SearchTag == "" {
cfg.CheckManager.Check.SearchTag = "service:nomad"
}
sink, err := circonus.NewCirconusSink(cfg)
if err != nil {
return err
}
sink.Start()
fanout = append(fanout, sink)
}
// Initialize the global sink
if len(fanout) > 0 {
fanout = append(fanout, inm)
metrics.NewGlobal(metricsConf, fanout)
} else {
metricsConf.EnableHostname = false
metrics.NewGlobal(metricsConf, inm)
}
return nil
}
// setupSCADA is used to start a new SCADA provider and listener,
// replacing any existing listeners.
func (c *Command) setupSCADA(config *Config) error {
// Shut down existing SCADA listeners
if c.scadaProvider != nil {
c.scadaProvider.Shutdown()
}
if c.scadaHttp != nil {
c.scadaHttp.Shutdown()
}
// No-op if we don't have an infrastructure
if config.Atlas == nil || config.Atlas.Infrastructure == "" {
return nil
}
// Create the new provider and listener
c.Ui.Output("Connecting to Atlas: " + config.Atlas.Infrastructure)
scadaConfig := &scada.Config{
Service: "nomad",
Version: fmt.Sprintf("%s%s", config.Version, config.VersionPrerelease),
ResourceType: "nomad-cluster",
Meta: map[string]string{
"auto-join": strconv.FormatBool(config.Atlas.Join),
"region": config.Region,
"datacenter": config.Datacenter,
"client": strconv.FormatBool(config.Client != nil && config.Client.Enabled),
"server": strconv.FormatBool(config.Server != nil && config.Server.Enabled),
},
Atlas: scada.AtlasConfig{
Endpoint: config.Atlas.Endpoint,
Infrastructure: config.Atlas.Infrastructure,
Token: config.Atlas.Token,
},
}
provider, list, err := scada.NewHTTPProvider(scadaConfig, c.logOutput)
if err != nil {
return err
}
c.scadaProvider = provider
c.scadaHttp = newScadaHttp(c.agent, list)
return nil
}
func (c *Command) startupJoin(config *Config) error {
if len(config.Server.StartJoin) == 0 || !config.Server.Enabled {
return nil
}
c.Ui.Output("Joining cluster...")
n, err := c.agent.server.Join(config.Server.StartJoin)
if err != nil {
return err
}
c.Ui.Info(fmt.Sprintf("Join completed. Synced with %d initial agents", n))
return nil
}
// retryJoin is used to handle retrying a join until it succeeds or all retries
// are exhausted.
func (c *Command) retryJoin(config *Config) {
if len(config.Server.RetryJoin) == 0 || !config.Server.Enabled {
return
}
logger := c.agent.logger
logger.Printf("[INFO] agent: Joining cluster...")
attempt := 0
for {
n, err := c.agent.server.Join(config.Server.RetryJoin)
if err == nil {
logger.Printf("[INFO] agent: Join completed. Synced with %d initial agents", n)
return
}
attempt++
if config.Server.RetryMaxAttempts > 0 && attempt > config.Server.RetryMaxAttempts {
logger.Printf("[ERR] agent: max join retry exhausted, exiting")
close(c.retryJoinErrCh)
return
}
logger.Printf("[WARN] agent: Join failed: %v, retrying in %v", err,
config.Server.RetryInterval)
time.Sleep(config.Server.retryInterval)
}
}
func (c *Command) Synopsis() string {
return "Runs a Nomad agent"
}
func (c *Command) Help() string {
helpText := `
Usage: nomad agent [options]
Starts the Nomad agent and runs until an interrupt is received.
The agent may be a client and/or server.
The Nomad agent's configuration primarily comes from the config
files used, but a subset of the options may also be passed directly
as CLI arguments, listed below.
General Options (clients and servers):
-bind=<addr>
The address the agent will bind to for all of its various network
services. The individual services that run bind to individual
ports on this address. Defaults to the loopback 127.0.0.1.
-config=<path>
The path to either a single config file or a directory of config
files to use for configuring the Nomad agent. This option may be
specified multiple times. If multiple config files are used, the
values from each will be merged together. During merging, values
from files found later in the list are merged over values from
previously parsed files.
-data-dir=<path>
The data directory used to store state and other persistent data.
On client machines this is used to house allocation data such as
downloaded artifacts used by drivers. On server nodes, the data
dir is also used to store the replicated log.
-dc=<datacenter>
The name of the datacenter this Nomad agent is a member of. By
default this is set to "dc1".
-log-level=<level>
Specify the verbosity level of Nomad's logs. Valid values include
DEBUG, INFO, and WARN, in decreasing order of verbosity. The
default is INFO.
-node=<name>
The name of the local agent. This name is used to identify the node
in the cluster. The name must be unique per region. The default is
the current hostname of the machine.
-region=<region>
Name of the region the Nomad agent will be a member of. By default
this value is set to "global".
-dev
Start the agent in development mode. This enables a pre-configured
dual-role agent (client + server) which is useful for developing
or testing Nomad. No other configuration is required to start the
agent in this mode.
Server Options:
-server
Enable server mode for the agent. Agents in server mode are
clustered together and handle the additional responsibility of
leader election, data replication, and scheduling work onto
eligible client nodes.
-bootstrap-expect=<num>
Configures the expected number of servers nodes to wait for before
bootstrapping the cluster. Once <num> servers have joined eachother,
Nomad initiates the bootstrap process.
-join=<address>
Address of an agent to join at start time. Can be specified
multiple times.
-retry-join=<address>
Address of an agent to join at start time with retries enabled.
Can be specified multiple times.
-retry-max=<num>
Maximum number of join attempts. Defaults to 0, which will retry
indefinitely.
-retry-interval=<dur>
Time to wait between join attempts.
-rejoin
Ignore a previous leave and attempts to rejoin the cluster.
Client Options:
-client
Enable client mode for the agent. Client mode enables a given node to be
evaluated for allocations. If client mode is not enabled, no work will be
scheduled to the agent.
-state-dir
The directory used to store state and other persistent data. If not
specified a subdirectory under the "-data-dir" will be used.
-alloc-dir
The directory used to store allocation data such as downloaded artificats as
well as data produced by tasks. If not specified, a subdirectory under the
"-data-dir" will be used.
-servers
A list of known server addresses to connect to given as "host:port" and
delimited by commas.
-node-class
Mark this node as a member of a node-class. This can be used to label
similar node types.
-meta
User specified metadata to associated with the node. Each instance of -meta
parses a single KEY=VALUE pair. Repeat the meta flag for each key/value pair
to be added.
-network-interface
Forces the network fingerprinter to use the specified network interface.
-network-speed
The default speed for network interfaces in MBits if the link speed can not
be determined dynamically.
Atlas Options:
-atlas=<infrastructure>
The Atlas infrastructure name to configure. This enables the SCADA
client and attempts to connect Nomad to the HashiCorp Atlas service
using the provided infrastructure name and token.
-atlas-token=<token>
The Atlas token to use when connecting to the HashiCorp Atlas
service. This must be provided to successfully connect your Nomad
agent to Atlas.
-atlas-join
Enable the Atlas join feature. This mode allows agents to discover
eachother automatically using the SCADA integration features.
`
return strings.TrimSpace(helpText)
}

View File

@ -1,109 +0,0 @@
region = "foobar"
datacenter = "dc2"
name = "my-web"
data_dir = "/tmp/nomad"
log_level = "ERR"
bind_addr = "192.168.0.1"
enable_debug = true
ports {
http = 1234
rpc = 2345
serf = 3456
}
addresses {
http = "127.0.0.1"
rpc = "127.0.0.2"
serf = "127.0.0.3"
}
advertise {
rpc = "127.0.0.3"
serf = "127.0.0.4"
}
client {
enabled = true
state_dir = "/tmp/client-state"
alloc_dir = "/tmp/alloc"
servers = ["a.b.c:80", "127.0.0.1:1234"]
node_class = "linux-medium-64bit"
meta {
foo = "bar"
baz = "zip"
}
options {
foo = "bar"
baz = "zip"
}
chroot_env {
"/opt/myapp/etc" = "/etc"
"/opt/myapp/bin" = "/bin"
}
network_interface = "eth0"
network_speed = 100
reserved {
cpu = 10
memory = 10
disk = 10
iops = 10
reserved_ports = "1,100,10-12"
}
client_min_port = 1000
client_max_port = 2000
max_kill_timeout = "10s"
stats {
data_points = 35
collection_interval = "5s"
}
}
server {
enabled = true
bootstrap_expect = 5
data_dir = "/tmp/data"
protocol_version = 3
num_schedulers = 2
enabled_schedulers = ["test"]
node_gc_threshold = "12h"
heartbeat_grace = "30s"
retry_join = [ "1.1.1.1", "2.2.2.2" ]
start_join = [ "1.1.1.1", "2.2.2.2" ]
retry_max = 3
retry_interval = "15s"
rejoin_after_leave = true
}
telemetry {
statsite_address = "127.0.0.1:1234"
statsd_address = "127.0.0.1:2345"
disable_hostname = true
collection_interval = "3s"
publish_allocation_metrics = true
publish_node_metrics = true
}
leave_on_interrupt = true
leave_on_terminate = true
enable_syslog = true
syslog_facility = "LOCAL1"
disable_update_check = true
disable_anonymous_signature = true
atlas {
infrastructure = "armon/test"
token = "abcd"
join = true
endpoint = "127.0.0.1:1234"
}
http_api_response_headers {
Access-Control-Allow-Origin = "*"
}
consul {
server_service_name = "nomad"
client_service_name = "nomad-client"
address = "127.0.0.1:9500"
token = "token1"
auth = "username:pass"
ssl = true
verify_ssl = false
ca_file = "/path/to/ca/file"
cert_file = "/path/to/cert/file"
key_file = "/path/to/key/file"
server_auto_join = false
client_auto_join = false
auto_advertise = false
}

View File

@ -1,987 +0,0 @@
package agent
import (
"fmt"
"io"
"net"
"os"
"path/filepath"
"runtime"
"sort"
"strconv"
"strings"
"time"
client "github.com/hashicorp/nomad/client/config"
"github.com/hashicorp/nomad/nomad"
"github.com/hashicorp/nomad/nomad/structs/config"
)
// Config is the configuration for the Nomad agent.
type Config struct {
// Region is the region this agent is in. Defaults to global.
Region string `mapstructure:"region"`
// Datacenter is the datacenter this agent is in. Defaults to dc1
Datacenter string `mapstructure:"datacenter"`
// NodeName is the name we register as. Defaults to hostname.
NodeName string `mapstructure:"name"`
// DataDir is the directory to store our state in
DataDir string `mapstructure:"data_dir"`
// LogLevel is the level of the logs to putout
LogLevel string `mapstructure:"log_level"`
// BindAddr is the address on which all of nomad's services will
// be bound. If not specified, this defaults to 127.0.0.1.
BindAddr string `mapstructure:"bind_addr"`
// EnableDebug is used to enable debugging HTTP endpoints
EnableDebug bool `mapstructure:"enable_debug"`
// Ports is used to control the network ports we bind to.
Ports *Ports `mapstructure:"ports"`
// Addresses is used to override the network addresses we bind to.
Addresses *Addresses `mapstructure:"addresses"`
// AdvertiseAddrs is used to control the addresses we advertise.
AdvertiseAddrs *AdvertiseAddrs `mapstructure:"advertise"`
// Client has our client related settings
Client *ClientConfig `mapstructure:"client"`
// Server has our server related settings
Server *ServerConfig `mapstructure:"server"`
// Telemetry is used to configure sending telemetry
Telemetry *Telemetry `mapstructure:"telemetry"`
// LeaveOnInt is used to gracefully leave on the interrupt signal
LeaveOnInt bool `mapstructure:"leave_on_interrupt"`
// LeaveOnTerm is used to gracefully leave on the terminate signal
LeaveOnTerm bool `mapstructure:"leave_on_terminate"`
// EnableSyslog is used to enable sending logs to syslog
EnableSyslog bool `mapstructure:"enable_syslog"`
// SyslogFacility is used to control the syslog facility used.
SyslogFacility string `mapstructure:"syslog_facility"`
// DisableUpdateCheck is used to disable the periodic update
// and security bulletin checking.
DisableUpdateCheck bool `mapstructure:"disable_update_check"`
// DisableAnonymousSignature is used to disable setting the
// anonymous signature when doing the update check and looking
// for security bulletins
DisableAnonymousSignature bool `mapstructure:"disable_anonymous_signature"`
// AtlasConfig is used to configure Atlas
Atlas *AtlasConfig `mapstructure:"atlas"`
// Consul contains the configuration for the Consul Agent and
// parameters necessary to register services, their checks, and
// discover the current Nomad servers.
Consul *config.ConsulConfig `mapstructure:"consul"`
// NomadConfig is used to override the default config.
// This is largly used for testing purposes.
NomadConfig *nomad.Config `mapstructure:"-" json:"-"`
// ClientConfig is used to override the default config.
// This is largly used for testing purposes.
ClientConfig *client.Config `mapstructure:"-" json:"-"`
// DevMode is set by the -dev CLI flag.
DevMode bool `mapstructure:"-"`
// Version information is set at compilation time
Revision string
Version string
VersionPrerelease string
// List of config files that have been loaded (in order)
Files []string `mapstructure:"-"`
// HTTPAPIResponseHeaders allows users to configure the Nomad http agent to
// set arbritrary headers on API responses
HTTPAPIResponseHeaders map[string]string `mapstructure:"http_api_response_headers"`
}
// AtlasConfig is used to enable an parameterize the Atlas integration
type AtlasConfig struct {
// Infrastructure is the name of the infrastructure
// we belong to. e.g. hashicorp/stage
Infrastructure string `mapstructure:"infrastructure"`
// Token is our authentication token from Atlas
Token string `mapstructure:"token" json:"-"`
// Join controls if Atlas will attempt to auto-join the node
// to it's cluster. Requires Atlas integration.
Join bool `mapstructure:"join"`
// Endpoint is the SCADA endpoint used for Atlas integration. If
// empty, the defaults from the provider are used.
Endpoint string `mapstructure:"endpoint"`
}
// ClientConfig is configuration specific to the client mode
type ClientConfig struct {
// Enabled controls if we are a client
Enabled bool `mapstructure:"enabled"`
// StateDir is the state directory
StateDir string `mapstructure:"state_dir"`
// AllocDir is the directory for storing allocation data
AllocDir string `mapstructure:"alloc_dir"`
// Servers is a list of known server addresses. These are as "host:port"
Servers []string `mapstructure:"servers"`
// NodeClass is used to group the node by class
NodeClass string `mapstructure:"node_class"`
// Options is used for configuration of nomad internals,
// like fingerprinters and drivers. The format is:
//
// namespace.option = value
Options map[string]string `mapstructure:"options"`
// Metadata associated with the node
Meta map[string]string `mapstructure:"meta"`
// A mapping of directories on the host OS to attempt to embed inside each
// task's chroot.
ChrootEnv map[string]string `mapstructure:"chroot_env"`
// Interface to use for network fingerprinting
NetworkInterface string `mapstructure:"network_interface"`
// The network link speed to use if it can not be determined dynamically.
NetworkSpeed int `mapstructure:"network_speed"`
// MaxKillTimeout allows capping the user-specifiable KillTimeout.
MaxKillTimeout string `mapstructure:"max_kill_timeout"`
// ClientMaxPort is the upper range of the ports that the client uses for
// communicating with plugin subsystems
ClientMaxPort int `mapstructure:"client_max_port"`
// ClientMinPort is the lower range of the ports that the client uses for
// communicating with plugin subsystems
ClientMinPort int `mapstructure:"client_min_port"`
// Reserved is used to reserve resources from being used by Nomad. This can
// be used to target a certain utilization or to prevent Nomad from using a
// particular set of ports.
Reserved *Resources `mapstructure:"reserved"`
}
// ServerConfig is configuration specific to the server mode
type ServerConfig struct {
// Enabled controls if we are a server
Enabled bool `mapstructure:"enabled"`
// BootstrapExpect tries to automatically bootstrap the Consul cluster,
// by withholding peers until enough servers join.
BootstrapExpect int `mapstructure:"bootstrap_expect"`
// DataDir is the directory to store our state in
DataDir string `mapstructure:"data_dir"`
// ProtocolVersion is the protocol version to speak. This must be between
// ProtocolVersionMin and ProtocolVersionMax.
ProtocolVersion int `mapstructure:"protocol_version"`
// NumSchedulers is the number of scheduler thread that are run.
// This can be as many as one per core, or zero to disable this server
// from doing any scheduling work.
NumSchedulers int `mapstructure:"num_schedulers"`
// EnabledSchedulers controls the set of sub-schedulers that are
// enabled for this server to handle. This will restrict the evaluations
// that the workers dequeue for processing.
EnabledSchedulers []string `mapstructure:"enabled_schedulers"`
// NodeGCThreshold controls how "old" a node must be to be collected by GC.
NodeGCThreshold string `mapstructure:"node_gc_threshold"`
// HeartbeatGrace is the grace period beyond the TTL to account for network,
// processing delays and clock skew before marking a node as "down".
HeartbeatGrace string `mapstructure:"heartbeat_grace"`
// StartJoin is a list of addresses to attempt to join when the
// agent starts. If Serf is unable to communicate with any of these
// addresses, then the agent will error and exit.
StartJoin []string `mapstructure:"start_join"`
// RetryJoin is a list of addresses to join with retry enabled.
RetryJoin []string `mapstructure:"retry_join"`
// RetryMaxAttempts specifies the maximum number of times to retry joining a
// host on startup. This is useful for cases where we know the node will be
// online eventually.
RetryMaxAttempts int `mapstructure:"retry_max"`
// RetryInterval specifies the amount of time to wait in between join
// attempts on agent start. The minimum allowed value is 1 second and
// the default is 30s.
RetryInterval string `mapstructure:"retry_interval"`
retryInterval time.Duration `mapstructure:"-"`
// RejoinAfterLeave controls our interaction with the cluster after leave.
// When set to false (default), a leave causes Consul to not rejoin
// the cluster until an explicit join is received. If this is set to
// true, we ignore the leave, and rejoin the cluster on start.
RejoinAfterLeave bool `mapstructure:"rejoin_after_leave"`
}
// Telemetry is the telemetry configuration for the server
type Telemetry struct {
StatsiteAddr string `mapstructure:"statsite_address"`
StatsdAddr string `mapstructure:"statsd_address"`
DisableHostname bool `mapstructure:"disable_hostname"`
CollectionInterval string `mapstructure:"collection_interval"`
collectionInterval time.Duration `mapstructure:"-"`
PublishAllocationMetrics bool `mapstructure:"publish_allocation_metrics"`
PublishNodeMetrics bool `mapstructure:"publish_node_metrics"`
// Circonus: see https://github.com/circonus-labs/circonus-gometrics
// for more details on the various configuration options.
// Valid configuration combinations:
// - CirconusAPIToken
// metric management enabled (search for existing check or create a new one)
// - CirconusSubmissionUrl
// metric management disabled (use check with specified submission_url,
// broker must be using a public SSL certificate)
// - CirconusAPIToken + CirconusCheckSubmissionURL
// metric management enabled (use check with specified submission_url)
// - CirconusAPIToken + CirconusCheckID
// metric management enabled (use check with specified id)
// CirconusAPIToken is a valid API Token used to create/manage check. If provided,
// metric management is enabled.
// Default: none
CirconusAPIToken string `mapstructure:"circonus_api_token"`
// CirconusAPIApp is an app name associated with API token.
// Default: "consul"
CirconusAPIApp string `mapstructure:"circonus_api_app"`
// CirconusAPIURL is the base URL to use for contacting the Circonus API.
// Default: "https://api.circonus.com/v2"
CirconusAPIURL string `mapstructure:"circonus_api_url"`
// CirconusSubmissionInterval is the interval at which metrics are submitted to Circonus.
// Default: 10s
CirconusSubmissionInterval string `mapstructure:"circonus_submission_interval"`
// CirconusCheckSubmissionURL is the check.config.submission_url field from a
// previously created HTTPTRAP check.
// Default: none
CirconusCheckSubmissionURL string `mapstructure:"circonus_submission_url"`
// CirconusCheckID is the check id (not check bundle id) from a previously created
// HTTPTRAP check. The numeric portion of the check._cid field.
// Default: none
CirconusCheckID string `mapstructure:"circonus_check_id"`
// CirconusCheckForceMetricActivation will force enabling metrics, as they are encountered,
// if the metric already exists and is NOT active. If check management is enabled, the default
// behavior is to add new metrics as they are encoutered. If the metric already exists in the
// check, it will *NOT* be activated. This setting overrides that behavior.
// Default: "false"
CirconusCheckForceMetricActivation string `mapstructure:"circonus_check_force_metric_activation"`
// CirconusCheckInstanceID serves to uniquely identify the metrics comming from this "instance".
// It can be used to maintain metric continuity with transient or ephemeral instances as
// they move around within an infrastructure.
// Default: hostname:app
CirconusCheckInstanceID string `mapstructure:"circonus_check_instance_id"`
// CirconusCheckSearchTag is a special tag which, when coupled with the instance id, helps to
// narrow down the search results when neither a Submission URL or Check ID is provided.
// Default: service:app (e.g. service:consul)
CirconusCheckSearchTag string `mapstructure:"circonus_check_search_tag"`
// CirconusBrokerID is an explicit broker to use when creating a new check. The numeric portion
// of broker._cid. If metric management is enabled and neither a Submission URL nor Check ID
// is provided, an attempt will be made to search for an existing check using Instance ID and
// Search Tag. If one is not found, a new HTTPTRAP check will be created.
// Default: use Select Tag if provided, otherwise, a random Enterprise Broker associated
// with the specified API token or the default Circonus Broker.
// Default: none
CirconusBrokerID string `mapstructure:"circonus_broker_id"`
// CirconusBrokerSelectTag is a special tag which will be used to select a broker when
// a Broker ID is not provided. The best use of this is to as a hint for which broker
// should be used based on *where* this particular instance is running.
// (e.g. a specific geo location or datacenter, dc:sfo)
// Default: none
CirconusBrokerSelectTag string `mapstructure:"circonus_broker_select_tag"`
}
// Ports is used to encapsulate the various ports we bind to for network
// services. If any are not specified then the defaults are used instead.
type Ports struct {
HTTP int `mapstructure:"http"`
RPC int `mapstructure:"rpc"`
Serf int `mapstructure:"serf"`
}
// Addresses encapsulates all of the addresses we bind to for various
// network services. Everything is optional and defaults to BindAddr.
type Addresses struct {
HTTP string `mapstructure:"http"`
RPC string `mapstructure:"rpc"`
Serf string `mapstructure:"serf"`
}
// AdvertiseAddrs is used to control the addresses we advertise out for
// different network services. Not all network services support an
// advertise address. All are optional and default to BindAddr.
type AdvertiseAddrs struct {
HTTP string `mapstructure:"http"`
RPC string `mapstructure:"rpc"`
Serf string `mapstructure:"serf"`
}
type Resources struct {
CPU int `mapstructure:"cpu"`
MemoryMB int `mapstructure:"memory"`
DiskMB int `mapstructure:"disk"`
IOPS int `mapstructure:"iops"`
ReservedPorts string `mapstructure:"reserved_ports"`
ParsedReservedPorts []int `mapstructure:"-"`
}
// ParseReserved expands the ReservedPorts string into a slice of port numbers.
// The supported syntax is comma seperated integers or ranges seperated by
// hyphens. For example, "80,120-150,160"
func (r *Resources) ParseReserved() error {
parts := strings.Split(r.ReservedPorts, ",")
// Hot path the empty case
if len(parts) == 1 && parts[0] == "" {
return nil
}
ports := make(map[int]struct{})
for _, part := range parts {
part = strings.TrimSpace(part)
rangeParts := strings.Split(part, "-")
l := len(rangeParts)
switch l {
case 1:
if val := rangeParts[0]; val == "" {
return fmt.Errorf("can't specify empty port")
} else {
port, err := strconv.Atoi(val)
if err != nil {
return err
}
ports[port] = struct{}{}
}
case 2:
// We are parsing a range
start, err := strconv.Atoi(rangeParts[0])
if err != nil {
return err
}
end, err := strconv.Atoi(rangeParts[1])
if err != nil {
return err
}
if end < start {
return fmt.Errorf("invalid range: starting value (%v) less than ending (%v) value", end, start)
}
for i := start; i <= end; i++ {
ports[i] = struct{}{}
}
default:
return fmt.Errorf("can only parse single port numbers or port ranges (ex. 80,100-120,150)")
}
}
for port := range ports {
r.ParsedReservedPorts = append(r.ParsedReservedPorts, port)
}
sort.Ints(r.ParsedReservedPorts)
return nil
}
// DevConfig is a Config that is used for dev mode of Nomad.
func DevConfig() *Config {
conf := DefaultConfig()
conf.LogLevel = "DEBUG"
conf.Client.Enabled = true
conf.Server.Enabled = true
conf.DevMode = true
conf.EnableDebug = true
conf.DisableAnonymousSignature = true
conf.Consul.AutoAdvertise = true
if runtime.GOOS == "darwin" {
conf.Client.NetworkInterface = "lo0"
} else if runtime.GOOS == "linux" {
conf.Client.NetworkInterface = "lo"
}
conf.Client.Options = map[string]string{
"driver.raw_exec.enable": "true",
}
return conf
}
// DefaultConfig is a the baseline configuration for Nomad
func DefaultConfig() *Config {
return &Config{
LogLevel: "INFO",
Region: "global",
Datacenter: "dc1",
BindAddr: "127.0.0.1",
Ports: &Ports{
HTTP: 4646,
RPC: 4647,
Serf: 4648,
},
Addresses: &Addresses{},
AdvertiseAddrs: &AdvertiseAddrs{},
Atlas: &AtlasConfig{},
Consul: config.DefaultConsulConfig(),
Client: &ClientConfig{
Enabled: false,
NetworkSpeed: 100,
MaxKillTimeout: "30s",
ClientMinPort: 14000,
ClientMaxPort: 14512,
Reserved: &Resources{},
},
Server: &ServerConfig{
Enabled: false,
StartJoin: []string{},
RetryJoin: []string{},
RetryInterval: "30s",
RetryMaxAttempts: 0,
},
SyslogFacility: "LOCAL0",
Telemetry: &Telemetry{
CollectionInterval: "1s",
collectionInterval: 1 * time.Second,
},
}
}
// Listener can be used to get a new listener using a custom bind address.
// If the bind provided address is empty, the BindAddr is used instead.
func (c *Config) Listener(proto, addr string, port int) (net.Listener, error) {
if addr == "" {
addr = c.BindAddr
}
// Do our own range check to avoid bugs in package net.
//
// golang.org/issue/11715
// golang.org/issue/13447
//
// Both of the above bugs were fixed by golang.org/cl/12447 which will be
// included in Go 1.6. The error returned below is the same as what Go 1.6
// will return.
if 0 > port || port > 65535 {
return nil, &net.OpError{
Op: "listen",
Net: proto,
Err: &net.AddrError{Err: "invalid port", Addr: fmt.Sprint(port)},
}
}
return net.Listen(proto, fmt.Sprintf("%s:%d", addr, port))
}
// Merge merges two configurations.
func (c *Config) Merge(b *Config) *Config {
result := *c
if b.Region != "" {
result.Region = b.Region
}
if b.Datacenter != "" {
result.Datacenter = b.Datacenter
}
if b.NodeName != "" {
result.NodeName = b.NodeName
}
if b.DataDir != "" {
result.DataDir = b.DataDir
}
if b.LogLevel != "" {
result.LogLevel = b.LogLevel
}
if b.BindAddr != "" {
result.BindAddr = b.BindAddr
}
if b.EnableDebug {
result.EnableDebug = true
}
if b.LeaveOnInt {
result.LeaveOnInt = true
}
if b.LeaveOnTerm {
result.LeaveOnTerm = true
}
if b.EnableSyslog {
result.EnableSyslog = true
}
if b.SyslogFacility != "" {
result.SyslogFacility = b.SyslogFacility
}
if b.DisableUpdateCheck {
result.DisableUpdateCheck = true
}
if b.DisableAnonymousSignature {
result.DisableAnonymousSignature = true
}
// Apply the telemetry config
if result.Telemetry == nil && b.Telemetry != nil {
telemetry := *b.Telemetry
result.Telemetry = &telemetry
} else if b.Telemetry != nil {
result.Telemetry = result.Telemetry.Merge(b.Telemetry)
}
// Apply the client config
if result.Client == nil && b.Client != nil {
client := *b.Client
result.Client = &client
} else if b.Client != nil {
result.Client = result.Client.Merge(b.Client)
}
// Apply the server config
if result.Server == nil && b.Server != nil {
server := *b.Server
result.Server = &server
} else if b.Server != nil {
result.Server = result.Server.Merge(b.Server)
}
// Apply the ports config
if result.Ports == nil && b.Ports != nil {
ports := *b.Ports
result.Ports = &ports
} else if b.Ports != nil {
result.Ports = result.Ports.Merge(b.Ports)
}
// Apply the address config
if result.Addresses == nil && b.Addresses != nil {
addrs := *b.Addresses
result.Addresses = &addrs
} else if b.Addresses != nil {
result.Addresses = result.Addresses.Merge(b.Addresses)
}
// Apply the advertise addrs config
if result.AdvertiseAddrs == nil && b.AdvertiseAddrs != nil {
advertise := *b.AdvertiseAddrs
result.AdvertiseAddrs = &advertise
} else if b.AdvertiseAddrs != nil {
result.AdvertiseAddrs = result.AdvertiseAddrs.Merge(b.AdvertiseAddrs)
}
// Apply the Atlas configuration
if result.Atlas == nil && b.Atlas != nil {
atlasConfig := *b.Atlas
result.Atlas = &atlasConfig
} else if b.Atlas != nil {
result.Atlas = result.Atlas.Merge(b.Atlas)
}
// Apply the Consul Configuration
if result.Consul == nil && b.Consul != nil {
consulConfig := *b.Consul
result.Consul = &consulConfig
} else if b.Consul != nil {
result.Consul = result.Consul.Merge(b.Consul)
}
// Merge config files lists
result.Files = append(result.Files, b.Files...)
// Add the http API response header map values
if result.HTTPAPIResponseHeaders == nil {
result.HTTPAPIResponseHeaders = make(map[string]string)
}
for k, v := range b.HTTPAPIResponseHeaders {
result.HTTPAPIResponseHeaders[k] = v
}
return &result
}
// Merge is used to merge two server configs together
func (a *ServerConfig) Merge(b *ServerConfig) *ServerConfig {
result := *a
if b.Enabled {
result.Enabled = true
}
if b.BootstrapExpect > 0 {
result.BootstrapExpect = b.BootstrapExpect
}
if b.DataDir != "" {
result.DataDir = b.DataDir
}
if b.ProtocolVersion != 0 {
result.ProtocolVersion = b.ProtocolVersion
}
if b.NumSchedulers != 0 {
result.NumSchedulers = b.NumSchedulers
}
if b.NodeGCThreshold != "" {
result.NodeGCThreshold = b.NodeGCThreshold
}
if b.HeartbeatGrace != "" {
result.HeartbeatGrace = b.HeartbeatGrace
}
if b.RetryMaxAttempts != 0 {
result.RetryMaxAttempts = b.RetryMaxAttempts
}
if b.RetryInterval != "" {
result.RetryInterval = b.RetryInterval
result.retryInterval = b.retryInterval
}
if b.RejoinAfterLeave {
result.RejoinAfterLeave = true
}
// Add the schedulers
result.EnabledSchedulers = append(result.EnabledSchedulers, b.EnabledSchedulers...)
// Copy the start join addresses
result.StartJoin = make([]string, 0, len(a.StartJoin)+len(b.StartJoin))
result.StartJoin = append(result.StartJoin, a.StartJoin...)
result.StartJoin = append(result.StartJoin, b.StartJoin...)
// Copy the retry join addresses
result.RetryJoin = make([]string, 0, len(a.RetryJoin)+len(b.RetryJoin))
result.RetryJoin = append(result.RetryJoin, a.RetryJoin...)
result.RetryJoin = append(result.RetryJoin, b.RetryJoin...)
return &result
}
// Merge is used to merge two client configs together
func (a *ClientConfig) Merge(b *ClientConfig) *ClientConfig {
result := *a
if b.Enabled {
result.Enabled = true
}
if b.StateDir != "" {
result.StateDir = b.StateDir
}
if b.AllocDir != "" {
result.AllocDir = b.AllocDir
}
if b.NodeClass != "" {
result.NodeClass = b.NodeClass
}
if b.NetworkInterface != "" {
result.NetworkInterface = b.NetworkInterface
}
if b.NetworkSpeed != 0 {
result.NetworkSpeed = b.NetworkSpeed
}
if b.MaxKillTimeout != "" {
result.MaxKillTimeout = b.MaxKillTimeout
}
if b.ClientMaxPort != 0 {
result.ClientMaxPort = b.ClientMaxPort
}
if b.ClientMinPort != 0 {
result.ClientMinPort = b.ClientMinPort
}
if b.Reserved != nil {
result.Reserved = result.Reserved.Merge(b.Reserved)
}
// Add the servers
result.Servers = append(result.Servers, b.Servers...)
// Add the options map values
if result.Options == nil {
result.Options = make(map[string]string)
}
for k, v := range b.Options {
result.Options[k] = v
}
// Add the meta map values
if result.Meta == nil {
result.Meta = make(map[string]string)
}
for k, v := range b.Meta {
result.Meta[k] = v
}
// Add the chroot_env map values
if result.ChrootEnv == nil {
result.ChrootEnv = make(map[string]string)
}
for k, v := range b.ChrootEnv {
result.ChrootEnv[k] = v
}
return &result
}
// Merge is used to merge two telemetry configs together
func (a *Telemetry) Merge(b *Telemetry) *Telemetry {
result := *a
if b.StatsiteAddr != "" {
result.StatsiteAddr = b.StatsiteAddr
}
if b.StatsdAddr != "" {
result.StatsdAddr = b.StatsdAddr
}
if b.DisableHostname {
result.DisableHostname = true
}
if b.CollectionInterval != "" {
result.CollectionInterval = b.CollectionInterval
}
if b.collectionInterval != 0 {
result.collectionInterval = b.collectionInterval
}
if b.CirconusAPIToken != "" {
result.CirconusAPIToken = b.CirconusAPIToken
}
if b.CirconusAPIApp != "" {
result.CirconusAPIApp = b.CirconusAPIApp
}
if b.CirconusAPIURL != "" {
result.CirconusAPIURL = b.CirconusAPIURL
}
if b.CirconusCheckSubmissionURL != "" {
result.CirconusCheckSubmissionURL = b.CirconusCheckSubmissionURL
}
if b.CirconusSubmissionInterval != "" {
result.CirconusSubmissionInterval = b.CirconusSubmissionInterval
}
if b.CirconusCheckID != "" {
result.CirconusCheckID = b.CirconusCheckID
}
if b.CirconusCheckForceMetricActivation != "" {
result.CirconusCheckForceMetricActivation = b.CirconusCheckForceMetricActivation
}
if b.CirconusCheckInstanceID != "" {
result.CirconusCheckInstanceID = b.CirconusCheckInstanceID
}
if b.CirconusCheckSearchTag != "" {
result.CirconusCheckSearchTag = b.CirconusCheckSearchTag
}
if b.CirconusBrokerID != "" {
result.CirconusBrokerID = b.CirconusBrokerID
}
if b.CirconusBrokerSelectTag != "" {
result.CirconusBrokerSelectTag = b.CirconusBrokerSelectTag
}
return &result
}
// Merge is used to merge two port configurations.
func (a *Ports) Merge(b *Ports) *Ports {
result := *a
if b.HTTP != 0 {
result.HTTP = b.HTTP
}
if b.RPC != 0 {
result.RPC = b.RPC
}
if b.Serf != 0 {
result.Serf = b.Serf
}
return &result
}
// Merge is used to merge two address configs together.
func (a *Addresses) Merge(b *Addresses) *Addresses {
result := *a
if b.HTTP != "" {
result.HTTP = b.HTTP
}
if b.RPC != "" {
result.RPC = b.RPC
}
if b.Serf != "" {
result.Serf = b.Serf
}
return &result
}
// Merge merges two advertise addrs configs together.
func (a *AdvertiseAddrs) Merge(b *AdvertiseAddrs) *AdvertiseAddrs {
result := *a
if b.RPC != "" {
result.RPC = b.RPC
}
if b.Serf != "" {
result.Serf = b.Serf
}
if b.HTTP != "" {
result.HTTP = b.HTTP
}
return &result
}
// Merge merges two Atlas configurations together.
func (a *AtlasConfig) Merge(b *AtlasConfig) *AtlasConfig {
result := *a
if b.Infrastructure != "" {
result.Infrastructure = b.Infrastructure
}
if b.Token != "" {
result.Token = b.Token
}
if b.Join {
result.Join = true
}
if b.Endpoint != "" {
result.Endpoint = b.Endpoint
}
return &result
}
func (r *Resources) Merge(b *Resources) *Resources {
result := *r
if b.CPU != 0 {
result.CPU = b.CPU
}
if b.MemoryMB != 0 {
result.MemoryMB = b.MemoryMB
}
if b.DiskMB != 0 {
result.DiskMB = b.DiskMB
}
if b.IOPS != 0 {
result.IOPS = b.IOPS
}
if b.ReservedPorts != "" {
result.ReservedPorts = b.ReservedPorts
}
if len(b.ParsedReservedPorts) != 0 {
result.ParsedReservedPorts = b.ParsedReservedPorts
}
return &result
}
// LoadConfig loads the configuration at the given path, regardless if
// its a file or directory.
func LoadConfig(path string) (*Config, error) {
fi, err := os.Stat(path)
if err != nil {
return nil, err
}
if fi.IsDir() {
return LoadConfigDir(path)
}
cleaned := filepath.Clean(path)
config, err := ParseConfigFile(cleaned)
if err != nil {
return nil, fmt.Errorf("Error loading %s: %s", cleaned, err)
}
config.Files = append(config.Files, cleaned)
return config, nil
}
// LoadConfigDir loads all the configurations in the given directory
// in alphabetical order.
func LoadConfigDir(dir string) (*Config, error) {
f, err := os.Open(dir)
if err != nil {
return nil, err
}
defer f.Close()
fi, err := f.Stat()
if err != nil {
return nil, err
}
if !fi.IsDir() {
return nil, fmt.Errorf(
"configuration path must be a directory: %s", dir)
}
var files []string
err = nil
for err != io.EOF {
var fis []os.FileInfo
fis, err = f.Readdir(128)
if err != nil && err != io.EOF {
return nil, err
}
for _, fi := range fis {
// Ignore directories
if fi.IsDir() {
continue
}
// Only care about files that are valid to load.
name := fi.Name()
skip := true
if strings.HasSuffix(name, ".hcl") {
skip = false
} else if strings.HasSuffix(name, ".json") {
skip = false
}
if skip || isTemporaryFile(name) {
continue
}
path := filepath.Join(dir, name)
files = append(files, path)
}
}
// Fast-path if we have no files
if len(files) == 0 {
return &Config{}, nil
}
sort.Strings(files)
var result *Config
for _, f := range files {
config, err := ParseConfigFile(f)
if err != nil {
return nil, fmt.Errorf("Error loading %s: %s", f, err)
}
config.Files = append(config.Files, f)
if result == nil {
result = config
} else {
result = result.Merge(config)
}
}
return result, nil
}
// isTemporaryFile returns true or false depending on whether the
// provided file name is a temporary file for the following editors:
// emacs or vim.
func isTemporaryFile(name string) bool {
return strings.HasSuffix(name, "~") || // vim
strings.HasPrefix(name, ".#") || // emacs
(strings.HasPrefix(name, "#") && strings.HasSuffix(name, "#")) // emacs
}

View File

@ -1,662 +0,0 @@
package agent
import (
"bytes"
"fmt"
"io"
"os"
"path/filepath"
"time"
"github.com/hashicorp/go-multierror"
"github.com/hashicorp/hcl"
"github.com/hashicorp/hcl/hcl/ast"
"github.com/hashicorp/nomad/nomad/structs/config"
"github.com/mitchellh/mapstructure"
)
// ParseConfigFile parses the given path as a config file.
func ParseConfigFile(path string) (*Config, error) {
path, err := filepath.Abs(path)
if err != nil {
return nil, err
}
f, err := os.Open(path)
if err != nil {
return nil, err
}
defer f.Close()
config, err := ParseConfig(f)
if err != nil {
return nil, err
}
return config, nil
}
// ParseConfig parses the config from the given io.Reader.
//
// Due to current internal limitations, the entire contents of the
// io.Reader will be copied into memory first before parsing.
func ParseConfig(r io.Reader) (*Config, error) {
// Copy the reader into an in-memory buffer first since HCL requires it.
var buf bytes.Buffer
if _, err := io.Copy(&buf, r); err != nil {
return nil, err
}
// Parse the buffer
root, err := hcl.Parse(buf.String())
if err != nil {
return nil, fmt.Errorf("error parsing: %s", err)
}
buf.Reset()
// Top-level item should be a list
list, ok := root.Node.(*ast.ObjectList)
if !ok {
return nil, fmt.Errorf("error parsing: root should be an object")
}
var config Config
if err := parseConfig(&config, list); err != nil {
return nil, fmt.Errorf("error parsing 'config': %v", err)
}
return &config, nil
}
func parseConfig(result *Config, list *ast.ObjectList) error {
// Check for invalid keys
valid := []string{
"region",
"datacenter",
"name",
"data_dir",
"log_level",
"bind_addr",
"enable_debug",
"ports",
"addresses",
"interfaces",
"advertise",
"client",
"server",
"telemetry",
"leave_on_interrupt",
"leave_on_terminate",
"enable_syslog",
"syslog_facility",
"disable_update_check",
"disable_anonymous_signature",
"atlas",
"consul",
"http_api_response_headers",
}
if err := checkHCLKeys(list, valid); err != nil {
return multierror.Prefix(err, "config:")
}
// Decode the full thing into a map[string]interface for ease
var m map[string]interface{}
if err := hcl.DecodeObject(&m, list); err != nil {
return err
}
delete(m, "ports")
delete(m, "addresses")
delete(m, "interfaces")
delete(m, "advertise")
delete(m, "client")
delete(m, "server")
delete(m, "telemetry")
delete(m, "atlas")
delete(m, "consul")
delete(m, "http_api_response_headers")
// Decode the rest
if err := mapstructure.WeakDecode(m, result); err != nil {
return err
}
// Parse ports
if o := list.Filter("ports"); len(o.Items) > 0 {
if err := parsePorts(&result.Ports, o); err != nil {
return multierror.Prefix(err, "ports ->")
}
}
// Parse addresses
if o := list.Filter("addresses"); len(o.Items) > 0 {
if err := parseAddresses(&result.Addresses, o); err != nil {
return multierror.Prefix(err, "addresses ->")
}
}
// Parse advertise
if o := list.Filter("advertise"); len(o.Items) > 0 {
if err := parseAdvertise(&result.AdvertiseAddrs, o); err != nil {
return multierror.Prefix(err, "advertise ->")
}
}
// Parse client config
if o := list.Filter("client"); len(o.Items) > 0 {
if err := parseClient(&result.Client, o); err != nil {
return multierror.Prefix(err, "client ->")
}
}
// Parse server config
if o := list.Filter("server"); len(o.Items) > 0 {
if err := parseServer(&result.Server, o); err != nil {
return multierror.Prefix(err, "server ->")
}
}
// Parse telemetry config
if o := list.Filter("telemetry"); len(o.Items) > 0 {
if err := parseTelemetry(&result.Telemetry, o); err != nil {
return multierror.Prefix(err, "telemetry ->")
}
}
// Parse atlas config
if o := list.Filter("atlas"); len(o.Items) > 0 {
if err := parseAtlas(&result.Atlas, o); err != nil {
return multierror.Prefix(err, "atlas ->")
}
}
// Parse the consul config
if o := list.Filter("consul"); len(o.Items) > 0 {
if err := parseConsulConfig(&result.Consul, o); err != nil {
return multierror.Prefix(err, "consul ->")
}
}
// Parse out http_api_response_headers fields. These are in HCL as a list so
// we need to iterate over them and merge them.
if headersO := list.Filter("http_api_response_headers"); len(headersO.Items) > 0 {
for _, o := range headersO.Elem().Items {
var m map[string]interface{}
if err := hcl.DecodeObject(&m, o.Val); err != nil {
return err
}
if err := mapstructure.WeakDecode(m, &result.HTTPAPIResponseHeaders); err != nil {
return err
}
}
}
return nil
}
func parsePorts(result **Ports, list *ast.ObjectList) error {
list = list.Elem()
if len(list.Items) > 1 {
return fmt.Errorf("only one 'ports' block allowed")
}
// Get our ports object
listVal := list.Items[0].Val
// Check for invalid keys
valid := []string{
"http",
"rpc",
"serf",
}
if err := checkHCLKeys(listVal, valid); err != nil {
return err
}
var m map[string]interface{}
if err := hcl.DecodeObject(&m, listVal); err != nil {
return err
}
var ports Ports
if err := mapstructure.WeakDecode(m, &ports); err != nil {
return err
}
*result = &ports
return nil
}
func parseAddresses(result **Addresses, list *ast.ObjectList) error {
list = list.Elem()
if len(list.Items) > 1 {
return fmt.Errorf("only one 'addresses' block allowed")
}
// Get our addresses object
listVal := list.Items[0].Val
// Check for invalid keys
valid := []string{
"http",
"rpc",
"serf",
}
if err := checkHCLKeys(listVal, valid); err != nil {
return err
}
var m map[string]interface{}
if err := hcl.DecodeObject(&m, listVal); err != nil {
return err
}
var addresses Addresses
if err := mapstructure.WeakDecode(m, &addresses); err != nil {
return err
}
*result = &addresses
return nil
}
func parseAdvertise(result **AdvertiseAddrs, list *ast.ObjectList) error {
list = list.Elem()
if len(list.Items) > 1 {
return fmt.Errorf("only one 'advertise' block allowed")
}
// Get our advertise object
listVal := list.Items[0].Val
// Check for invalid keys
valid := []string{
"http",
"rpc",
"serf",
}
if err := checkHCLKeys(listVal, valid); err != nil {
return err
}
var m map[string]interface{}
if err := hcl.DecodeObject(&m, listVal); err != nil {
return err
}
var advertise AdvertiseAddrs
if err := mapstructure.WeakDecode(m, &advertise); err != nil {
return err
}
*result = &advertise
return nil
}
func parseClient(result **ClientConfig, list *ast.ObjectList) error {
list = list.Elem()
if len(list.Items) > 1 {
return fmt.Errorf("only one 'client' block allowed")
}
// Get our client object
obj := list.Items[0]
// Value should be an object
var listVal *ast.ObjectList
if ot, ok := obj.Val.(*ast.ObjectType); ok {
listVal = ot.List
} else {
return fmt.Errorf("client value: should be an object")
}
// Check for invalid keys
valid := []string{
"enabled",
"state_dir",
"alloc_dir",
"servers",
"node_class",
"options",
"meta",
"chroot_env",
"network_interface",
"network_speed",
"max_kill_timeout",
"client_max_port",
"client_min_port",
"reserved",
"stats",
}
if err := checkHCLKeys(listVal, valid); err != nil {
return err
}
var m map[string]interface{}
if err := hcl.DecodeObject(&m, listVal); err != nil {
return err
}
delete(m, "options")
delete(m, "meta")
delete(m, "chroot_env")
delete(m, "reserved")
delete(m, "stats")
var config ClientConfig
if err := mapstructure.WeakDecode(m, &config); err != nil {
return err
}
// Parse out options fields. These are in HCL as a list so we need to
// iterate over them and merge them.
if optionsO := listVal.Filter("options"); len(optionsO.Items) > 0 {
for _, o := range optionsO.Elem().Items {
var m map[string]interface{}
if err := hcl.DecodeObject(&m, o.Val); err != nil {
return err
}
if err := mapstructure.WeakDecode(m, &config.Options); err != nil {
return err
}
}
}
// Parse out options meta. These are in HCL as a list so we need to
// iterate over them and merge them.
if metaO := listVal.Filter("meta"); len(metaO.Items) > 0 {
for _, o := range metaO.Elem().Items {
var m map[string]interface{}
if err := hcl.DecodeObject(&m, o.Val); err != nil {
return err
}
if err := mapstructure.WeakDecode(m, &config.Meta); err != nil {
return err
}
}
}
// Parse out chroot_env fields. These are in HCL as a list so we need to
// iterate over them and merge them.
if chrootEnvO := listVal.Filter("chroot_env"); len(chrootEnvO.Items) > 0 {
for _, o := range chrootEnvO.Elem().Items {
var m map[string]interface{}
if err := hcl.DecodeObject(&m, o.Val); err != nil {
return err
}
if err := mapstructure.WeakDecode(m, &config.ChrootEnv); err != nil {
return err
}
}
}
// Parse reserved config
if o := listVal.Filter("reserved"); len(o.Items) > 0 {
if err := parseReserved(&config.Reserved, o); err != nil {
return multierror.Prefix(err, "reserved ->")
}
}
*result = &config
return nil
}
func parseReserved(result **Resources, list *ast.ObjectList) error {
list = list.Elem()
if len(list.Items) > 1 {
return fmt.Errorf("only one 'reserved' block allowed")
}
// Get our reserved object
obj := list.Items[0]
// Value should be an object
var listVal *ast.ObjectList
if ot, ok := obj.Val.(*ast.ObjectType); ok {
listVal = ot.List
} else {
return fmt.Errorf("client value: should be an object")
}
// Check for invalid keys
valid := []string{
"cpu",
"memory",
"disk",
"iops",
"reserved_ports",
}
if err := checkHCLKeys(listVal, valid); err != nil {
return err
}
var m map[string]interface{}
if err := hcl.DecodeObject(&m, listVal); err != nil {
return err
}
var reserved Resources
if err := mapstructure.WeakDecode(m, &reserved); err != nil {
return err
}
if err := reserved.ParseReserved(); err != nil {
return err
}
*result = &reserved
return nil
}
func parseServer(result **ServerConfig, list *ast.ObjectList) error {
list = list.Elem()
if len(list.Items) > 1 {
return fmt.Errorf("only one 'server' block allowed")
}
// Get our server object
obj := list.Items[0]
// Value should be an object
var listVal *ast.ObjectList
if ot, ok := obj.Val.(*ast.ObjectType); ok {
listVal = ot.List
} else {
return fmt.Errorf("client value: should be an object")
}
// Check for invalid keys
valid := []string{
"enabled",
"bootstrap_expect",
"data_dir",
"protocol_version",
"num_schedulers",
"enabled_schedulers",
"node_gc_threshold",
"heartbeat_grace",
"start_join",
"retry_join",
"retry_max",
"retry_interval",
"rejoin_after_leave",
}
if err := checkHCLKeys(listVal, valid); err != nil {
return err
}
var m map[string]interface{}
if err := hcl.DecodeObject(&m, listVal); err != nil {
return err
}
var config ServerConfig
if err := mapstructure.WeakDecode(m, &config); err != nil {
return err
}
*result = &config
return nil
}
func parseTelemetry(result **Telemetry, list *ast.ObjectList) error {
list = list.Elem()
if len(list.Items) > 1 {
return fmt.Errorf("only one 'telemetry' block allowed")
}
// Get our telemetry object
listVal := list.Items[0].Val
// Check for invalid keys
valid := []string{
"statsite_address",
"statsd_address",
"disable_hostname",
"collection_interval",
"publish_allocation_metrics",
"publish_node_metrics",
"circonus_api_token",
"circonus_api_app",
"circonus_api_url",
"circonus_submission_interval",
"circonus_submission_url",
"circonus_check_id",
"circonus_check_force_metric_activation",
"circonus_check_instance_id",
"circonus_check_search_tag",
"circonus_broker_id",
"circonus_broker_select_tag",
}
if err := checkHCLKeys(listVal, valid); err != nil {
return err
}
var m map[string]interface{}
if err := hcl.DecodeObject(&m, listVal); err != nil {
return err
}
var telemetry Telemetry
if err := mapstructure.WeakDecode(m, &telemetry); err != nil {
return err
}
if telemetry.CollectionInterval != "" {
if dur, err := time.ParseDuration(telemetry.CollectionInterval); err != nil {
return fmt.Errorf("error parsing value of %q: %v", "collection_interval", err)
} else {
telemetry.collectionInterval = dur
}
}
*result = &telemetry
return nil
}
func parseAtlas(result **AtlasConfig, list *ast.ObjectList) error {
list = list.Elem()
if len(list.Items) > 1 {
return fmt.Errorf("only one 'atlas' block allowed")
}
// Get our atlas object
listVal := list.Items[0].Val
// Check for invalid keys
valid := []string{
"infrastructure",
"token",
"join",
"endpoint",
}
if err := checkHCLKeys(listVal, valid); err != nil {
return err
}
var m map[string]interface{}
if err := hcl.DecodeObject(&m, listVal); err != nil {
return err
}
var atlas AtlasConfig
if err := mapstructure.WeakDecode(m, &atlas); err != nil {
return err
}
*result = &atlas
return nil
}
func parseConsulConfig(result **config.ConsulConfig, list *ast.ObjectList) error {
list = list.Elem()
if len(list.Items) > 1 {
return fmt.Errorf("only one 'consul' block allowed")
}
// Get our Consul object
listVal := list.Items[0].Val
// Check for invalid keys
valid := []string{
"address",
"auth",
"auto_advertise",
"ca_file",
"cert_file",
"client_auto_join",
"client_service_name",
"key_file",
"server_auto_join",
"server_service_name",
"ssl",
"timeout",
"token",
"verify_ssl",
}
if err := checkHCLKeys(listVal, valid); err != nil {
return err
}
var m map[string]interface{}
if err := hcl.DecodeObject(&m, listVal); err != nil {
return err
}
consulConfig := config.DefaultConsulConfig()
dec, err := mapstructure.NewDecoder(&mapstructure.DecoderConfig{
DecodeHook: mapstructure.StringToTimeDurationHookFunc(),
WeaklyTypedInput: true,
Result: &consulConfig,
})
if err != nil {
return err
}
if err := dec.Decode(m); err != nil {
return err
}
*result = consulConfig
return nil
}
func checkHCLKeys(node ast.Node, valid []string) error {
var list *ast.ObjectList
switch n := node.(type) {
case *ast.ObjectList:
list = n
case *ast.ObjectType:
list = n.List
default:
return fmt.Errorf("cannot check HCL keys of type %T", n)
}
validMap := make(map[string]struct{}, len(valid))
for _, v := range valid {
validMap[v] = struct{}{}
}
var result error
for _, item := range list.Items {
key := item.Keys[0].Token.Value().(string)
if _, ok := validMap[key]; !ok {
result = multierror.Append(result, fmt.Errorf(
"invalid key: %s", key))
}
}
return result
}

View File

@ -1,84 +0,0 @@
package consul
import (
"log"
"sync"
"time"
"github.com/hashicorp/consul/lib"
cstructs "github.com/hashicorp/nomad/client/driver/structs"
)
// CheckRunner runs a given check in a specific interval and update a
// corresponding Consul TTL check
type CheckRunner struct {
check Check
runCheck func(Check)
logger *log.Logger
stop bool
stopCh chan struct{}
stopLock sync.Mutex
started bool
startedLock sync.Mutex
}
// NewCheckRunner configures and returns a CheckRunner
func NewCheckRunner(check Check, runCheck func(Check), logger *log.Logger) *CheckRunner {
cr := CheckRunner{
check: check,
runCheck: runCheck,
logger: logger,
stopCh: make(chan struct{}),
}
return &cr
}
// Start is used to start the check. The check runs until stop is called
func (r *CheckRunner) Start() {
r.startedLock.Lock()
defer r.startedLock.Unlock()
if r.started {
return
}
r.stopLock.Lock()
defer r.stopLock.Unlock()
go r.run()
r.started = true
}
// Stop is used to stop the check.
func (r *CheckRunner) Stop() {
r.stopLock.Lock()
defer r.stopLock.Unlock()
if !r.stop {
r.stop = true
close(r.stopCh)
}
}
// run is invoked by a goroutine to run until Stop() is called
func (r *CheckRunner) run() {
// Get the randomized initial pause time
initialPauseTime := lib.RandomStagger(r.check.Interval())
r.logger.Printf("[DEBUG] agent: pausing %v before first invocation of %s", initialPauseTime, r.check.ID())
next := time.NewTimer(initialPauseTime)
for {
select {
case <-next.C:
r.runCheck(r.check)
next.Reset(r.check.Interval())
case <-r.stopCh:
next.Stop()
return
}
}
}
// Check is an interface which check providers can implement for Nomad to run
type Check interface {
Run() *cstructs.CheckResult
ID() string
Interval() time.Duration
Timeout() time.Duration
}

View File

@ -1,983 +0,0 @@
// Package consul is used by Nomad to register all services both static services
// and dynamic via allocations.
//
// Consul Service IDs have the following format: ${nomadServicePrefix}-${groupName}-${serviceKey}
// groupName takes on one of the following values:
// - server
// - client
// - executor-${alloc-id}-${task-name}
//
// serviceKey should be generated by service registrators.
// If the serviceKey is being generated by the executor for a Nomad Task.Services
// the following helper should be used:
// NOTE: Executor should interpolate the service prior to calling
// func GenerateTaskServiceKey(service *structs.Service) string
//
// The Nomad Client reaps services registered from dead allocations that were
// not properly cleaned up by the executor (this is not the expected case).
//
// TODO fix this comment
// The Consul ServiceIDs generated by the executor will contain the allocation
// ID. Thus the client can generate the list of Consul ServiceIDs to keep by
// calling the following method on all running allocations the client is aware
// of:
// func GenerateExecutorServiceKeyPrefixFromAlloc(allocID string) string
package consul
import (
"fmt"
"log"
"net"
"net/url"
"strconv"
"strings"
"sync"
"time"
consul "github.com/hashicorp/consul/api"
"github.com/hashicorp/consul/lib"
"github.com/hashicorp/go-multierror"
"github.com/hashicorp/nomad/nomad/structs"
"github.com/hashicorp/nomad/nomad/structs/config"
"github.com/hashicorp/nomad/nomad/types"
)
const (
// initialSyncBuffer is the max time an initial sync will sleep
// before syncing.
initialSyncBuffer = 30 * time.Second
// initialSyncDelay is the delay before an initial sync.
initialSyncDelay = 5 * time.Second
// nomadServicePrefix is the first prefix that scopes all Nomad registered
// services
nomadServicePrefix = "_nomad"
// The periodic time interval for syncing services and checks with Consul
syncInterval = 5 * time.Second
// syncJitter provides a little variance in the frequency at which
// Syncer polls Consul.
syncJitter = 8
// ttlCheckBuffer is the time interval that Nomad can take to report Consul
// the check result
ttlCheckBuffer = 31 * time.Second
// DefaultQueryWaitDuration is the max duration the Consul Agent will
// spend waiting for a response from a Consul Query.
DefaultQueryWaitDuration = 2 * time.Second
// ServiceTagHTTP is the tag assigned to HTTP services
ServiceTagHTTP = "http"
// ServiceTagRPC is the tag assigned to RPC services
ServiceTagRPC = "rpc"
// ServiceTagSerf is the tag assigned to Serf services
ServiceTagSerf = "serf"
)
// consulServiceID and consulCheckID are the IDs registered with Consul
type consulServiceID string
type consulCheckID string
// ServiceKey is the generated service key that is used to build the Consul
// ServiceID
type ServiceKey string
// ServiceDomain is the domain of services registered by Nomad
type ServiceDomain string
const (
ClientDomain ServiceDomain = "client"
ServerDomain ServiceDomain = "server"
)
// NewExecutorDomain returns a domain specific to the alloc ID and task
func NewExecutorDomain(allocID, task string) ServiceDomain {
return ServiceDomain(fmt.Sprintf("executor-%s-%s", allocID, task))
}
// Syncer allows syncing of services and checks with Consul
type Syncer struct {
client *consul.Client
consulAvailable bool
// servicesGroups and checkGroups are named groups of services and checks
// respectively that will be flattened and reconciled with Consul when
// SyncServices() is called. The key to the servicesGroups map is unique
// per handler and is used to allow the Agent's services to be maintained
// independently of the Client or Server's services.
servicesGroups map[ServiceDomain]map[ServiceKey]*consul.AgentServiceRegistration
checkGroups map[ServiceDomain]map[ServiceKey][]*consul.AgentCheckRegistration
groupsLock sync.RWMutex
// The "Consul Registry" is a collection of Consul Services and
// Checks all guarded by the registryLock.
registryLock sync.RWMutex
// trackedChecks and trackedServices are registered with consul
trackedChecks map[consulCheckID]*consul.AgentCheckRegistration
trackedServices map[consulServiceID]*consul.AgentServiceRegistration
// checkRunners are delegated Consul checks being ran by the Syncer
checkRunners map[consulCheckID]*CheckRunner
addrFinder func(portLabel string) (string, int)
createDelegatedCheck func(*structs.ServiceCheck, string) (Check, error)
delegateChecks map[string]struct{} // delegateChecks are the checks that the Nomad client runs and reports to Consul
// End registryLock guarded attributes.
logger *log.Logger
shutdownCh chan struct{}
shutdown bool
shutdownLock sync.Mutex
// notifyShutdownCh is used to notify a Syncer it needs to shutdown.
// This can happen because there was an explicit call to the Syncer's
// Shutdown() method, or because the calling task signaled the
// program is going to exit by closing its shutdownCh.
notifyShutdownCh chan struct{}
// periodicCallbacks is walked sequentially when the timer in Run
// fires.
periodicCallbacks map[string]types.PeriodicCallback
notifySyncCh chan struct{}
periodicLock sync.RWMutex
}
// NewSyncer returns a new consul.Syncer
func NewSyncer(consulConfig *config.ConsulConfig, shutdownCh chan struct{}, logger *log.Logger) (*Syncer, error) {
var consulClientConfig *consul.Config
var err error
consulClientConfig, err = consulConfig.ApiConfig()
if err != nil {
return nil, err
}
var consulClient *consul.Client
if consulClient, err = consul.NewClient(consulClientConfig); err != nil {
return nil, err
}
consulSyncer := Syncer{
client: consulClient,
logger: logger,
consulAvailable: true,
shutdownCh: shutdownCh,
servicesGroups: make(map[ServiceDomain]map[ServiceKey]*consul.AgentServiceRegistration),
checkGroups: make(map[ServiceDomain]map[ServiceKey][]*consul.AgentCheckRegistration),
trackedServices: make(map[consulServiceID]*consul.AgentServiceRegistration),
trackedChecks: make(map[consulCheckID]*consul.AgentCheckRegistration),
checkRunners: make(map[consulCheckID]*CheckRunner),
periodicCallbacks: make(map[string]types.PeriodicCallback),
}
return &consulSyncer, nil
}
// SetDelegatedChecks sets the checks that nomad is going to run and report the
// result back to consul
func (c *Syncer) SetDelegatedChecks(delegateChecks map[string]struct{}, createDelegatedCheckFn func(*structs.ServiceCheck, string) (Check, error)) *Syncer {
c.delegateChecks = delegateChecks
c.createDelegatedCheck = createDelegatedCheckFn
return c
}
// SetAddrFinder sets a function to find the host and port for a Service given its port label
func (c *Syncer) SetAddrFinder(addrFinder func(string) (string, int)) *Syncer {
c.addrFinder = addrFinder
return c
}
// GenerateServiceKey should be called to generate a serviceKey based on the
// Service.
func GenerateServiceKey(service *structs.Service) ServiceKey {
var key string
numTags := len(service.Tags)
switch numTags {
case 0:
key = fmt.Sprintf("%s", service.Name)
default:
tags := strings.Join(service.Tags, "-")
key = fmt.Sprintf("%s-%s", service.Name, tags)
}
return ServiceKey(key)
}
// SetServices stores the map of Nomad Services to the provided service
// domain name.
func (c *Syncer) SetServices(domain ServiceDomain, services map[ServiceKey]*structs.Service) error {
var mErr multierror.Error
numServ := len(services)
registeredServices := make(map[ServiceKey]*consul.AgentServiceRegistration, numServ)
registeredChecks := make(map[ServiceKey][]*consul.AgentCheckRegistration, numServ)
for serviceKey, service := range services {
serviceReg, err := c.createService(service, domain, serviceKey)
if err != nil {
mErr.Errors = append(mErr.Errors, err)
continue
}
registeredServices[serviceKey] = serviceReg
// Register the check(s) for this service
for _, chk := range service.Checks {
// Create a Consul check registration
chkReg, err := c.createCheckReg(chk, serviceReg)
if err != nil {
mErr.Errors = append(mErr.Errors, err)
continue
}
// creating a nomad check if we have to handle this particular check type
c.registryLock.RLock()
if _, ok := c.delegateChecks[chk.Type]; ok {
_, ok := c.checkRunners[consulCheckID(chkReg.ID)]
c.registryLock.RUnlock()
if ok {
continue
}
nc, err := c.createDelegatedCheck(chk, chkReg.ID)
if err != nil {
mErr.Errors = append(mErr.Errors, err)
continue
}
cr := NewCheckRunner(nc, c.runCheck, c.logger)
c.registryLock.Lock()
// TODO type the CheckRunner
c.checkRunners[consulCheckID(nc.ID())] = cr
c.registryLock.Unlock()
} else {
c.registryLock.RUnlock()
}
registeredChecks[serviceKey] = append(registeredChecks[serviceKey], chkReg)
}
}
if len(mErr.Errors) > 0 {
return mErr.ErrorOrNil()
}
c.groupsLock.Lock()
for serviceKey, service := range registeredServices {
serviceKeys, ok := c.servicesGroups[domain]
if !ok {
serviceKeys = make(map[ServiceKey]*consul.AgentServiceRegistration, len(registeredServices))
c.servicesGroups[domain] = serviceKeys
}
serviceKeys[serviceKey] = service
}
for serviceKey, checks := range registeredChecks {
serviceKeys, ok := c.checkGroups[domain]
if !ok {
serviceKeys = make(map[ServiceKey][]*consul.AgentCheckRegistration, len(registeredChecks))
c.checkGroups[domain] = serviceKeys
}
serviceKeys[serviceKey] = checks
}
c.groupsLock.Unlock()
// Sync immediately
c.SyncNow()
return nil
}
// SyncNow expires the current timer forcing the list of periodic callbacks
// to be synced immediately.
func (c *Syncer) SyncNow() {
select {
case c.notifySyncCh <- struct{}{}:
default:
}
}
// flattenedServices returns a flattened list of services that are registered
// locally
func (c *Syncer) flattenedServices() []*consul.AgentServiceRegistration {
const initialNumServices = 8
services := make([]*consul.AgentServiceRegistration, 0, initialNumServices)
c.groupsLock.RLock()
defer c.groupsLock.RUnlock()
for _, servicesGroup := range c.servicesGroups {
for _, service := range servicesGroup {
services = append(services, service)
}
}
return services
}
// flattenedChecks returns a flattened list of checks that are registered
// locally
func (c *Syncer) flattenedChecks() []*consul.AgentCheckRegistration {
const initialNumChecks = 8
checks := make([]*consul.AgentCheckRegistration, 0, initialNumChecks)
c.groupsLock.RLock()
for _, checkGroup := range c.checkGroups {
for _, check := range checkGroup {
checks = append(checks, check...)
}
}
c.groupsLock.RUnlock()
return checks
}
func (c *Syncer) signalShutdown() {
select {
case c.notifyShutdownCh <- struct{}{}:
default:
}
}
// Shutdown de-registers the services and checks and shuts down periodic syncing
func (c *Syncer) Shutdown() error {
var mErr multierror.Error
c.shutdownLock.Lock()
if !c.shutdown {
c.shutdown = true
}
c.shutdownLock.Unlock()
c.signalShutdown()
// Stop all the checks that nomad is running
c.registryLock.RLock()
defer c.registryLock.RUnlock()
for _, cr := range c.checkRunners {
cr.Stop()
}
// De-register all the services from Consul
for serviceID := range c.trackedServices {
convertedID := string(serviceID)
if err := c.client.Agent().ServiceDeregister(convertedID); err != nil {
c.logger.Printf("[WARN] consul.syncer: failed to deregister service ID %+q: %v", convertedID, err)
mErr.Errors = append(mErr.Errors, err)
}
}
return mErr.ErrorOrNil()
}
// queryChecks queries the Consul Agent for a list of Consul checks that
// have been registered with this Consul Syncer.
func (c *Syncer) queryChecks() (map[consulCheckID]*consul.AgentCheck, error) {
checks, err := c.client.Agent().Checks()
if err != nil {
return nil, err
}
return c.filterConsulChecks(checks), nil
}
// queryAgentServices queries the Consul Agent for a list of Consul services that
// have been registered with this Consul Syncer.
func (c *Syncer) queryAgentServices() (map[consulServiceID]*consul.AgentService, error) {
services, err := c.client.Agent().Services()
if err != nil {
return nil, err
}
return c.filterConsulServices(services), nil
}
// syncChecks synchronizes this Syncer's Consul Checks with the Consul Agent.
func (c *Syncer) syncChecks() error {
var mErr multierror.Error
consulChecks, err := c.queryChecks()
if err != nil {
return err
}
// Synchronize checks with Consul
missingChecks, _, changedChecks, staleChecks := c.calcChecksDiff(consulChecks)
for _, check := range missingChecks {
if err := c.registerCheck(check); err != nil {
mErr.Errors = append(mErr.Errors, err)
}
c.registryLock.Lock()
c.trackedChecks[consulCheckID(check.ID)] = check
c.registryLock.Unlock()
}
for _, check := range changedChecks {
// NOTE(sean@): Do we need to deregister the check before
// re-registering it? Not deregistering to avoid missing the
// TTL but doesn't correct reconcile any possible drift with
// the check.
//
// if err := c.deregisterCheck(check.ID); err != nil {
// mErr.Errors = append(mErr.Errors, err)
// }
if err := c.registerCheck(check); err != nil {
mErr.Errors = append(mErr.Errors, err)
}
}
for _, check := range staleChecks {
if err := c.deregisterCheck(consulCheckID(check.ID)); err != nil {
mErr.Errors = append(mErr.Errors, err)
}
c.registryLock.Lock()
delete(c.trackedChecks, consulCheckID(check.ID))
c.registryLock.Unlock()
}
return mErr.ErrorOrNil()
}
// compareConsulCheck takes a consul.AgentCheckRegistration instance and
// compares it with a consul.AgentCheck. Returns true if they are equal
// according to consul.AgentCheck, otherwise false.
func compareConsulCheck(localCheck *consul.AgentCheckRegistration, consulCheck *consul.AgentCheck) bool {
if consulCheck.CheckID != localCheck.ID ||
consulCheck.Name != localCheck.Name ||
consulCheck.Notes != localCheck.Notes ||
consulCheck.ServiceID != localCheck.ServiceID {
return false
}
return true
}
// calcChecksDiff takes the argument (consulChecks) and calculates the delta
// between the consul.Syncer's list of known checks (c.trackedChecks). Three
// arrays are returned:
//
// 1) a slice of checks that exist only locally in the Syncer and are missing
// from the Consul Agent (consulChecks) and therefore need to be registered.
//
// 2) a slice of checks that exist in both the local consul.Syncer's
// tracked list and Consul Agent (consulChecks).
//
// 3) a slice of checks that exist in both the local consul.Syncer's
// tracked list and Consul Agent (consulServices) but have diverged state.
//
// 4) a slice of checks that exist only in the Consul Agent (consulChecks)
// and should be removed because the Consul Agent has drifted from the
// Syncer.
func (c *Syncer) calcChecksDiff(consulChecks map[consulCheckID]*consul.AgentCheck) (
missingChecks []*consul.AgentCheckRegistration,
equalChecks []*consul.AgentCheckRegistration,
changedChecks []*consul.AgentCheckRegistration,
staleChecks []*consul.AgentCheckRegistration) {
type mergedCheck struct {
check *consul.AgentCheckRegistration
// 'l' == Nomad local only
// 'e' == equal
// 'c' == changed
// 'a' == Consul agent only
state byte
}
var (
localChecksCount = 0
equalChecksCount = 0
changedChecksCount = 0
agentChecks = 0
)
c.registryLock.RLock()
localChecks := make(map[string]*mergedCheck, len(c.trackedChecks)+len(consulChecks))
for _, localCheck := range c.flattenedChecks() {
localChecksCount++
localChecks[localCheck.ID] = &mergedCheck{localCheck, 'l'}
}
c.registryLock.RUnlock()
for _, consulCheck := range consulChecks {
if localCheck, found := localChecks[consulCheck.CheckID]; found {
localChecksCount--
if compareConsulCheck(localCheck.check, consulCheck) {
equalChecksCount++
localChecks[consulCheck.CheckID].state = 'e'
} else {
changedChecksCount++
localChecks[consulCheck.CheckID].state = 'c'
}
} else {
agentChecks++
agentCheckReg := &consul.AgentCheckRegistration{
ID: consulCheck.CheckID,
Name: consulCheck.Name,
Notes: consulCheck.Notes,
ServiceID: consulCheck.ServiceID,
}
localChecks[consulCheck.CheckID] = &mergedCheck{agentCheckReg, 'a'}
}
}
missingChecks = make([]*consul.AgentCheckRegistration, 0, localChecksCount)
equalChecks = make([]*consul.AgentCheckRegistration, 0, equalChecksCount)
changedChecks = make([]*consul.AgentCheckRegistration, 0, changedChecksCount)
staleChecks = make([]*consul.AgentCheckRegistration, 0, agentChecks)
for _, check := range localChecks {
switch check.state {
case 'l':
missingChecks = append(missingChecks, check.check)
case 'e':
equalChecks = append(equalChecks, check.check)
case 'c':
changedChecks = append(changedChecks, check.check)
case 'a':
staleChecks = append(staleChecks, check.check)
}
}
return missingChecks, equalChecks, changedChecks, staleChecks
}
// compareConsulService takes a consul.AgentServiceRegistration instance and
// compares it with a consul.AgentService. Returns true if they are equal
// according to consul.AgentService, otherwise false.
func compareConsulService(localService *consul.AgentServiceRegistration, consulService *consul.AgentService) bool {
if consulService.ID != localService.ID ||
consulService.Service != localService.Name ||
consulService.Port != localService.Port ||
consulService.Address != localService.Address ||
consulService.EnableTagOverride != localService.EnableTagOverride {
return false
}
serviceTags := make(map[string]byte, len(localService.Tags))
for _, tag := range localService.Tags {
serviceTags[tag] = 'l'
}
for _, tag := range consulService.Tags {
if _, found := serviceTags[tag]; !found {
return false
}
serviceTags[tag] = 'b'
}
for _, state := range serviceTags {
if state == 'l' {
return false
}
}
return true
}
// calcServicesDiff takes the argument (consulServices) and calculates the
// delta between the consul.Syncer's list of known services
// (c.trackedServices). Four arrays are returned:
//
// 1) a slice of services that exist only locally in the Syncer and are
// missing from the Consul Agent (consulServices) and therefore need to be
// registered.
//
// 2) a slice of services that exist in both the local consul.Syncer's
// tracked list and Consul Agent (consulServices) *AND* are identical.
//
// 3) a slice of services that exist in both the local consul.Syncer's
// tracked list and Consul Agent (consulServices) but have diverged state.
//
// 4) a slice of services that exist only in the Consul Agent
// (consulServices) and should be removed because the Consul Agent has
// drifted from the Syncer.
func (c *Syncer) calcServicesDiff(consulServices map[consulServiceID]*consul.AgentService) (missingServices []*consul.AgentServiceRegistration, equalServices []*consul.AgentServiceRegistration, changedServices []*consul.AgentServiceRegistration, staleServices []*consul.AgentServiceRegistration) {
type mergedService struct {
service *consul.AgentServiceRegistration
// 'l' == Nomad local only
// 'e' == equal
// 'c' == changed
// 'a' == Consul agent only
state byte
}
var (
localServicesCount = 0
equalServicesCount = 0
changedServicesCount = 0
agentServices = 0
)
c.registryLock.RLock()
localServices := make(map[string]*mergedService, len(c.trackedServices)+len(consulServices))
c.registryLock.RUnlock()
for _, localService := range c.flattenedServices() {
localServicesCount++
localServices[localService.ID] = &mergedService{localService, 'l'}
}
for _, consulService := range consulServices {
if localService, found := localServices[consulService.ID]; found {
localServicesCount--
if compareConsulService(localService.service, consulService) {
equalServicesCount++
localServices[consulService.ID].state = 'e'
} else {
changedServicesCount++
localServices[consulService.ID].state = 'c'
}
} else {
agentServices++
agentServiceReg := &consul.AgentServiceRegistration{
ID: consulService.ID,
Name: consulService.Service,
Tags: consulService.Tags,
Port: consulService.Port,
Address: consulService.Address,
}
localServices[consulService.ID] = &mergedService{agentServiceReg, 'a'}
}
}
missingServices = make([]*consul.AgentServiceRegistration, 0, localServicesCount)
equalServices = make([]*consul.AgentServiceRegistration, 0, equalServicesCount)
changedServices = make([]*consul.AgentServiceRegistration, 0, changedServicesCount)
staleServices = make([]*consul.AgentServiceRegistration, 0, agentServices)
for _, service := range localServices {
switch service.state {
case 'l':
missingServices = append(missingServices, service.service)
case 'e':
equalServices = append(equalServices, service.service)
case 'c':
changedServices = append(changedServices, service.service)
case 'a':
staleServices = append(staleServices, service.service)
}
}
return missingServices, equalServices, changedServices, staleServices
}
// syncServices synchronizes this Syncer's Consul Services with the Consul
// Agent.
func (c *Syncer) syncServices() error {
consulServices, err := c.queryAgentServices()
if err != nil {
return err
}
// Synchronize services with Consul
var mErr multierror.Error
missingServices, _, changedServices, removedServices := c.calcServicesDiff(consulServices)
for _, service := range missingServices {
if err := c.client.Agent().ServiceRegister(service); err != nil {
mErr.Errors = append(mErr.Errors, err)
}
c.registryLock.Lock()
c.trackedServices[consulServiceID(service.ID)] = service
c.registryLock.Unlock()
}
for _, service := range changedServices {
// Re-register the local service
if err := c.client.Agent().ServiceRegister(service); err != nil {
mErr.Errors = append(mErr.Errors, err)
}
}
for _, service := range removedServices {
if err := c.deregisterService(service.ID); err != nil {
mErr.Errors = append(mErr.Errors, err)
}
c.registryLock.Lock()
delete(c.trackedServices, consulServiceID(service.ID))
c.registryLock.Unlock()
}
return mErr.ErrorOrNil()
}
// registerCheck registers a check definition with Consul
func (c *Syncer) registerCheck(chkReg *consul.AgentCheckRegistration) error {
c.registryLock.RLock()
if cr, ok := c.checkRunners[consulCheckID(chkReg.ID)]; ok {
cr.Start()
}
c.registryLock.RUnlock()
return c.client.Agent().CheckRegister(chkReg)
}
// createCheckReg creates a Check that can be registered with Nomad. It also
// creates a Nomad check for the check types that it can handle.
func (c *Syncer) createCheckReg(check *structs.ServiceCheck, serviceReg *consul.AgentServiceRegistration) (*consul.AgentCheckRegistration, error) {
chkReg := consul.AgentCheckRegistration{
ID: check.Hash(serviceReg.ID),
Name: check.Name,
ServiceID: serviceReg.ID,
}
chkReg.Timeout = check.Timeout.String()
chkReg.Interval = check.Interval.String()
host, port := serviceReg.Address, serviceReg.Port
if check.PortLabel != "" {
host, port = c.addrFinder(check.PortLabel)
}
switch check.Type {
case structs.ServiceCheckHTTP:
if check.Protocol == "" {
check.Protocol = "http"
}
base := url.URL{
Scheme: check.Protocol,
Host: net.JoinHostPort(host, strconv.Itoa(port)),
}
relative, err := url.Parse(check.Path)
if err != nil {
return nil, err
}
url := base.ResolveReference(relative)
chkReg.HTTP = url.String()
case structs.ServiceCheckTCP:
chkReg.TCP = net.JoinHostPort(host, strconv.Itoa(port))
case structs.ServiceCheckScript:
chkReg.TTL = (check.Interval + ttlCheckBuffer).String()
default:
return nil, fmt.Errorf("check type %+q not valid", check.Type)
}
chkReg.Status = check.InitialStatus
return &chkReg, nil
}
// generateConsulServiceID takes the domain and service key and returns a Consul
// ServiceID
func generateConsulServiceID(domain ServiceDomain, key ServiceKey) consulServiceID {
return consulServiceID(fmt.Sprintf("%s-%s-%s", nomadServicePrefix, domain, key))
}
// createService creates a Consul AgentService from a Nomad ConsulService.
func (c *Syncer) createService(service *structs.Service, domain ServiceDomain, key ServiceKey) (*consul.AgentServiceRegistration, error) {
c.registryLock.RLock()
defer c.registryLock.RUnlock()
srv := consul.AgentServiceRegistration{
ID: string(generateConsulServiceID(domain, key)),
Name: service.Name,
Tags: service.Tags,
}
host, port := c.addrFinder(service.PortLabel)
if host != "" {
srv.Address = host
}
if port != 0 {
srv.Port = port
}
return &srv, nil
}
// deregisterService de-registers a service with the given ID from consul
func (c *Syncer) deregisterService(serviceID string) error {
return c.client.Agent().ServiceDeregister(serviceID)
}
// deregisterCheck de-registers a check from Consul
func (c *Syncer) deregisterCheck(id consulCheckID) error {
c.registryLock.Lock()
defer c.registryLock.Unlock()
// Deleting from Consul Agent
if err := c.client.Agent().CheckDeregister(string(id)); err != nil {
// CheckDeregister() will be reattempted again in a future
// sync.
return err
}
// Remove the check from the local registry
if cr, ok := c.checkRunners[id]; ok {
cr.Stop()
delete(c.checkRunners, id)
}
return nil
}
// Run triggers periodic syncing of services and checks with Consul. This is
// a long lived go-routine which is stopped during shutdown.
func (c *Syncer) Run() {
sync := time.NewTimer(0)
for {
select {
case <-sync.C:
d := syncInterval - lib.RandomStagger(syncInterval/syncJitter)
sync.Reset(d)
if err := c.SyncServices(); err != nil {
if c.consulAvailable {
c.logger.Printf("[DEBUG] consul.syncer: error in syncing: %v", err)
}
c.consulAvailable = false
} else {
if !c.consulAvailable {
c.logger.Printf("[DEBUG] consul.syncer: syncs succesful")
}
c.consulAvailable = true
}
case <-c.notifySyncCh:
sync.Reset(syncInterval)
case <-c.shutdownCh:
c.Shutdown()
case <-c.notifyShutdownCh:
sync.Stop()
c.logger.Printf("[INFO] consul.syncer: shutting down syncer ")
return
}
}
}
// RunHandlers executes each handler (randomly)
func (c *Syncer) RunHandlers() error {
c.periodicLock.RLock()
handlers := make(map[string]types.PeriodicCallback, len(c.periodicCallbacks))
for name, fn := range c.periodicCallbacks {
handlers[name] = fn
}
c.periodicLock.RUnlock()
var mErr multierror.Error
for _, fn := range handlers {
if err := fn(); err != nil {
mErr.Errors = append(mErr.Errors, err)
}
}
return mErr.ErrorOrNil()
}
// SyncServices sync the services with the Consul Agent
func (c *Syncer) SyncServices() error {
var mErr multierror.Error
if err := c.syncServices(); err != nil {
mErr.Errors = append(mErr.Errors, err)
}
if err := c.syncChecks(); err != nil {
mErr.Errors = append(mErr.Errors, err)
}
if err := c.RunHandlers(); err != nil {
return err
}
return mErr.ErrorOrNil()
}
// filterConsulServices prunes out all the service who were not registered with
// the syncer
func (c *Syncer) filterConsulServices(consulServices map[string]*consul.AgentService) map[consulServiceID]*consul.AgentService {
localServices := make(map[consulServiceID]*consul.AgentService, len(consulServices))
c.registryLock.RLock()
defer c.registryLock.RUnlock()
for serviceID, service := range consulServices {
for domain := range c.servicesGroups {
if strings.HasPrefix(service.ID, fmt.Sprintf("%s-%s", nomadServicePrefix, domain)) {
localServices[consulServiceID(serviceID)] = service
break
}
}
}
return localServices
}
// filterConsulChecks prunes out all the consul checks which do not have
// services with Syncer's idPrefix.
func (c *Syncer) filterConsulChecks(consulChecks map[string]*consul.AgentCheck) map[consulCheckID]*consul.AgentCheck {
localChecks := make(map[consulCheckID]*consul.AgentCheck, len(consulChecks))
c.registryLock.RLock()
defer c.registryLock.RUnlock()
for checkID, check := range consulChecks {
for domain := range c.checkGroups {
if strings.HasPrefix(check.ServiceID, fmt.Sprintf("%s-%s", nomadServicePrefix, domain)) {
localChecks[consulCheckID(checkID)] = check
break
}
}
}
return localChecks
}
// consulPresent indicates whether the Consul Agent is responding
func (c *Syncer) consulPresent() bool {
_, err := c.client.Agent().Self()
return err == nil
}
// runCheck runs a check and updates the corresponding ttl check in consul
func (c *Syncer) runCheck(check Check) {
res := check.Run()
if res.Duration >= check.Timeout() {
c.logger.Printf("[DEBUG] consul.syncer: check took time: %v, timeout: %v", res.Duration, check.Timeout())
}
state := consul.HealthCritical
output := res.Output
switch res.ExitCode {
case 0:
state = consul.HealthPassing
case 1:
state = consul.HealthWarning
default:
state = consul.HealthCritical
}
if res.Err != nil {
state = consul.HealthCritical
output = res.Err.Error()
}
if err := c.client.Agent().UpdateTTL(check.ID(), output, state); err != nil {
if c.consulAvailable {
c.logger.Printf("[DEBUG] consul.syncer: check %+q failed, disabling Consul checks until until next successful sync: %v", check.ID(), err)
c.consulAvailable = false
} else {
c.consulAvailable = true
}
}
}
// ReapUnmatched prunes all services that do not exist in the passed domains
func (c *Syncer) ReapUnmatched(domains []ServiceDomain) error {
servicesInConsul, err := c.ConsulClient().Agent().Services()
if err != nil {
return err
}
var mErr multierror.Error
for serviceID := range servicesInConsul {
// Skip any service that was not registered by Nomad
if !strings.HasPrefix(serviceID, nomadServicePrefix) {
continue
}
// Filter services that do not exist in the desired domains
match := false
for _, domain := range domains {
// Include the hyphen so it is explicit to that domain otherwise it
// maybe a subset match
desired := fmt.Sprintf("%s-%s-", nomadServicePrefix, domain)
if strings.HasPrefix(serviceID, desired) {
match = true
break
}
}
if !match {
if err := c.deregisterService(serviceID); err != nil {
mErr.Errors = append(mErr.Errors, err)
}
}
}
return mErr.ErrorOrNil()
}
// AddPeriodicHandler adds a uniquely named callback. Returns true if
// successful, false if a handler with the same name already exists.
func (c *Syncer) AddPeriodicHandler(name string, fn types.PeriodicCallback) bool {
c.periodicLock.Lock()
defer c.periodicLock.Unlock()
if _, found := c.periodicCallbacks[name]; found {
c.logger.Printf("[ERROR] consul.syncer: failed adding handler %+q", name)
return false
}
c.periodicCallbacks[name] = fn
return true
}
// NumHandlers returns the number of callbacks registered with the syncer
func (c *Syncer) NumHandlers() int {
c.periodicLock.RLock()
defer c.periodicLock.RUnlock()
return len(c.periodicCallbacks)
}
// RemovePeriodicHandler removes a handler with a given name.
func (c *Syncer) RemovePeriodicHandler(name string) {
c.periodicLock.Lock()
defer c.periodicLock.Unlock()
delete(c.periodicCallbacks, name)
}
// ConsulClient returns the Consul client used by the Syncer.
func (c *Syncer) ConsulClient() *consul.Client {
return c.client
}

View File

@ -1,89 +0,0 @@
package agent
import (
"net/http"
"strings"
"github.com/hashicorp/nomad/nomad/structs"
)
func (s *HTTPServer) EvalsRequest(resp http.ResponseWriter, req *http.Request) (interface{}, error) {
if req.Method != "GET" {
return nil, CodedError(405, ErrInvalidMethod)
}
args := structs.EvalListRequest{}
if s.parse(resp, req, &args.Region, &args.QueryOptions) {
return nil, nil
}
var out structs.EvalListResponse
if err := s.agent.RPC("Eval.List", &args, &out); err != nil {
return nil, err
}
setMeta(resp, &out.QueryMeta)
if out.Evaluations == nil {
out.Evaluations = make([]*structs.Evaluation, 0)
}
return out.Evaluations, nil
}
func (s *HTTPServer) EvalSpecificRequest(resp http.ResponseWriter, req *http.Request) (interface{}, error) {
path := strings.TrimPrefix(req.URL.Path, "/v1/evaluation/")
switch {
case strings.HasSuffix(path, "/allocations"):
evalID := strings.TrimSuffix(path, "/allocations")
return s.evalAllocations(resp, req, evalID)
default:
return s.evalQuery(resp, req, path)
}
}
func (s *HTTPServer) evalAllocations(resp http.ResponseWriter, req *http.Request, evalID string) (interface{}, error) {
if req.Method != "GET" {
return nil, CodedError(405, ErrInvalidMethod)
}
args := structs.EvalSpecificRequest{
EvalID: evalID,
}
if s.parse(resp, req, &args.Region, &args.QueryOptions) {
return nil, nil
}
var out structs.EvalAllocationsResponse
if err := s.agent.RPC("Eval.Allocations", &args, &out); err != nil {
return nil, err
}
setMeta(resp, &out.QueryMeta)
if out.Allocations == nil {
out.Allocations = make([]*structs.AllocListStub, 0)
}
return out.Allocations, nil
}
func (s *HTTPServer) evalQuery(resp http.ResponseWriter, req *http.Request, evalID string) (interface{}, error) {
if req.Method != "GET" {
return nil, CodedError(405, ErrInvalidMethod)
}
args := structs.EvalSpecificRequest{
EvalID: evalID,
}
if s.parse(resp, req, &args.Region, &args.QueryOptions) {
return nil, nil
}
var out structs.SingleEvalResponse
if err := s.agent.RPC("Eval.GetEval", &args, &out); err != nil {
return nil, err
}
setMeta(resp, &out.QueryMeta)
if out.Eval == nil {
return nil, CodedError(404, "eval not found")
}
return out.Eval, nil
}

File diff suppressed because it is too large Load Diff

View File

@ -1,316 +0,0 @@
package agent
import (
"bytes"
"encoding/json"
"fmt"
"io"
"log"
"net"
"net/http"
"net/http/pprof"
"strconv"
"time"
"github.com/NYTimes/gziphandler"
"github.com/hashicorp/nomad/nomad/structs"
"github.com/ugorji/go/codec"
)
const (
// ErrInvalidMethod is used if the HTTP method is not supported
ErrInvalidMethod = "Invalid method"
// scadaHTTPAddr is the address associated with the
// HTTPServer. When populating an ACL token for a request,
// this is checked to switch between the ACLToken and
// AtlasACLToken
scadaHTTPAddr = "SCADA"
)
var (
// jsonHandle and jsonHandlePretty are the codec handles to JSON encode
// structs. The pretty handle will add indents for easier human consumption.
jsonHandle = &codec.JsonHandle{}
jsonHandlePretty = &codec.JsonHandle{Indent: 4}
)
// HTTPServer is used to wrap an Agent and expose it over an HTTP interface
type HTTPServer struct {
agent *Agent
mux *http.ServeMux
listener net.Listener
logger *log.Logger
addr string
}
// NewHTTPServer starts new HTTP server over the agent
func NewHTTPServer(agent *Agent, config *Config, logOutput io.Writer) (*HTTPServer, error) {
// Start the listener
ln, err := config.Listener("tcp", config.Addresses.HTTP, config.Ports.HTTP)
if err != nil {
return nil, fmt.Errorf("failed to start HTTP listener: %v", err)
}
// Create the mux
mux := http.NewServeMux()
// Create the server
srv := &HTTPServer{
agent: agent,
mux: mux,
listener: ln,
logger: agent.logger,
addr: ln.Addr().String(),
}
srv.registerHandlers(config.EnableDebug)
// Start the server
go http.Serve(ln, gziphandler.GzipHandler(mux))
return srv, nil
}
// newScadaHttp creates a new HTTP server wrapping the SCADA
// listener such that HTTP calls can be sent from the brokers.
func newScadaHttp(agent *Agent, list net.Listener) *HTTPServer {
// Create the mux
mux := http.NewServeMux()
// Create the server
srv := &HTTPServer{
agent: agent,
mux: mux,
listener: list,
logger: agent.logger,
addr: scadaHTTPAddr,
}
srv.registerHandlers(false) // Never allow debug for SCADA
// Start the server
go http.Serve(list, gziphandler.GzipHandler(mux))
return srv
}
// Shutdown is used to shutdown the HTTP server
func (s *HTTPServer) Shutdown() {
if s != nil {
s.logger.Printf("[DEBUG] http: Shutting down http server")
s.listener.Close()
}
}
// registerHandlers is used to attach our handlers to the mux
func (s *HTTPServer) registerHandlers(enableDebug bool) {
s.mux.HandleFunc("/v1/jobs", s.wrap(s.JobsRequest))
s.mux.HandleFunc("/v1/job/", s.wrap(s.JobSpecificRequest))
s.mux.HandleFunc("/v1/nodes", s.wrap(s.NodesRequest))
s.mux.HandleFunc("/v1/node/", s.wrap(s.NodeSpecificRequest))
s.mux.HandleFunc("/v1/allocations", s.wrap(s.AllocsRequest))
s.mux.HandleFunc("/v1/allocation/", s.wrap(s.AllocSpecificRequest))
s.mux.HandleFunc("/v1/evaluations", s.wrap(s.EvalsRequest))
s.mux.HandleFunc("/v1/evaluation/", s.wrap(s.EvalSpecificRequest))
s.mux.HandleFunc("/v1/client/fs/", s.wrap(s.FsRequest))
s.mux.HandleFunc("/v1/client/stats", s.wrap(s.ClientStatsRequest))
s.mux.HandleFunc("/v1/client/allocation/", s.wrap(s.ClientAllocRequest))
s.mux.HandleFunc("/v1/agent/self", s.wrap(s.AgentSelfRequest))
s.mux.HandleFunc("/v1/agent/join", s.wrap(s.AgentJoinRequest))
s.mux.HandleFunc("/v1/agent/members", s.wrap(s.AgentMembersRequest))
s.mux.HandleFunc("/v1/agent/force-leave", s.wrap(s.AgentForceLeaveRequest))
s.mux.HandleFunc("/v1/agent/servers", s.wrap(s.AgentServersRequest))
s.mux.HandleFunc("/v1/regions", s.wrap(s.RegionListRequest))
s.mux.HandleFunc("/v1/status/leader", s.wrap(s.StatusLeaderRequest))
s.mux.HandleFunc("/v1/status/peers", s.wrap(s.StatusPeersRequest))
s.mux.HandleFunc("/v1/system/gc", s.wrap(s.GarbageCollectRequest))
s.mux.HandleFunc("/v1/system/reconcile/summaries", s.wrap(s.ReconcileJobSummaries))
if enableDebug {
s.mux.HandleFunc("/debug/pprof/", pprof.Index)
s.mux.HandleFunc("/debug/pprof/cmdline", pprof.Cmdline)
s.mux.HandleFunc("/debug/pprof/profile", pprof.Profile)
s.mux.HandleFunc("/debug/pprof/symbol", pprof.Symbol)
}
}
// HTTPCodedError is used to provide the HTTP error code
type HTTPCodedError interface {
error
Code() int
}
func CodedError(c int, s string) HTTPCodedError {
return &codedError{s, c}
}
type codedError struct {
s string
code int
}
func (e *codedError) Error() string {
return e.s
}
func (e *codedError) Code() int {
return e.code
}
// wrap is used to wrap functions to make them more convenient
func (s *HTTPServer) wrap(handler func(resp http.ResponseWriter, req *http.Request) (interface{}, error)) func(resp http.ResponseWriter, req *http.Request) {
f := func(resp http.ResponseWriter, req *http.Request) {
setHeaders(resp, s.agent.config.HTTPAPIResponseHeaders)
// Invoke the handler
reqURL := req.URL.String()
start := time.Now()
defer func() {
s.logger.Printf("[DEBUG] http: Request %v (%v)", reqURL, time.Now().Sub(start))
}()
obj, err := handler(resp, req)
// Check for an error
HAS_ERR:
if err != nil {
s.logger.Printf("[ERR] http: Request %v, error: %v", reqURL, err)
code := 500
if http, ok := err.(HTTPCodedError); ok {
code = http.Code()
}
resp.WriteHeader(code)
resp.Write([]byte(err.Error()))
return
}
prettyPrint := false
if v, ok := req.URL.Query()["pretty"]; ok {
if len(v) > 0 && (len(v[0]) == 0 || v[0] != "0") {
prettyPrint = true
}
}
// Write out the JSON object
if obj != nil {
var buf bytes.Buffer
if prettyPrint {
enc := codec.NewEncoder(&buf, jsonHandlePretty)
err = enc.Encode(obj)
if err == nil {
buf.Write([]byte("\n"))
}
} else {
enc := codec.NewEncoder(&buf, jsonHandle)
err = enc.Encode(obj)
}
if err != nil {
goto HAS_ERR
}
resp.Header().Set("Content-Type", "application/json")
resp.Write(buf.Bytes())
}
}
return f
}
// decodeBody is used to decode a JSON request body
func decodeBody(req *http.Request, out interface{}) error {
dec := json.NewDecoder(req.Body)
return dec.Decode(&out)
}
// setIndex is used to set the index response header
func setIndex(resp http.ResponseWriter, index uint64) {
resp.Header().Set("X-Nomad-Index", strconv.FormatUint(index, 10))
}
// setKnownLeader is used to set the known leader header
func setKnownLeader(resp http.ResponseWriter, known bool) {
s := "true"
if !known {
s = "false"
}
resp.Header().Set("X-Nomad-KnownLeader", s)
}
// setLastContact is used to set the last contact header
func setLastContact(resp http.ResponseWriter, last time.Duration) {
lastMsec := uint64(last / time.Millisecond)
resp.Header().Set("X-Nomad-LastContact", strconv.FormatUint(lastMsec, 10))
}
// setMeta is used to set the query response meta data
func setMeta(resp http.ResponseWriter, m *structs.QueryMeta) {
setIndex(resp, m.Index)
setLastContact(resp, m.LastContact)
setKnownLeader(resp, m.KnownLeader)
}
// setHeaders is used to set canonical response header fields
func setHeaders(resp http.ResponseWriter, headers map[string]string) {
for field, value := range headers {
resp.Header().Set(http.CanonicalHeaderKey(field), value)
}
}
// parseWait is used to parse the ?wait and ?index query params
// Returns true on error
func parseWait(resp http.ResponseWriter, req *http.Request, b *structs.QueryOptions) bool {
query := req.URL.Query()
if wait := query.Get("wait"); wait != "" {
dur, err := time.ParseDuration(wait)
if err != nil {
resp.WriteHeader(400)
resp.Write([]byte("Invalid wait time"))
return true
}
b.MaxQueryTime = dur
}
if idx := query.Get("index"); idx != "" {
index, err := strconv.ParseUint(idx, 10, 64)
if err != nil {
resp.WriteHeader(400)
resp.Write([]byte("Invalid index"))
return true
}
b.MinQueryIndex = index
}
return false
}
// parseConsistency is used to parse the ?stale query params.
func parseConsistency(req *http.Request, b *structs.QueryOptions) {
query := req.URL.Query()
if _, ok := query["stale"]; ok {
b.AllowStale = true
}
}
// parsePrefix is used to parse the ?prefix query param
func parsePrefix(req *http.Request, b *structs.QueryOptions) {
query := req.URL.Query()
if prefix := query.Get("prefix"); prefix != "" {
b.Prefix = prefix
}
}
// parseRegion is used to parse the ?region query param
func (s *HTTPServer) parseRegion(req *http.Request, r *string) {
if other := req.URL.Query().Get("region"); other != "" {
*r = other
} else if *r == "" {
*r = s.agent.config.Region
}
}
// parse is a convenience method for endpoints that need to parse multiple flags
func (s *HTTPServer) parse(resp http.ResponseWriter, req *http.Request, r *string, b *structs.QueryOptions) bool {
s.parseRegion(req, r)
parseConsistency(req, b)
parsePrefix(req, b)
return parseWait(resp, req, b)
}

View File

@ -1,267 +0,0 @@
package agent
import (
"net/http"
"strings"
"github.com/hashicorp/nomad/nomad/structs"
)
func (s *HTTPServer) JobsRequest(resp http.ResponseWriter, req *http.Request) (interface{}, error) {
switch req.Method {
case "GET":
return s.jobListRequest(resp, req)
case "PUT", "POST":
return s.jobUpdate(resp, req, "")
default:
return nil, CodedError(405, ErrInvalidMethod)
}
}
func (s *HTTPServer) jobListRequest(resp http.ResponseWriter, req *http.Request) (interface{}, error) {
args := structs.JobListRequest{}
if s.parse(resp, req, &args.Region, &args.QueryOptions) {
return nil, nil
}
var out structs.JobListResponse
if err := s.agent.RPC("Job.List", &args, &out); err != nil {
return nil, err
}
setMeta(resp, &out.QueryMeta)
if out.Jobs == nil {
out.Jobs = make([]*structs.JobListStub, 0)
}
return out.Jobs, nil
}
func (s *HTTPServer) JobSpecificRequest(resp http.ResponseWriter, req *http.Request) (interface{}, error) {
path := strings.TrimPrefix(req.URL.Path, "/v1/job/")
switch {
case strings.HasSuffix(path, "/evaluate"):
jobName := strings.TrimSuffix(path, "/evaluate")
return s.jobForceEvaluate(resp, req, jobName)
case strings.HasSuffix(path, "/allocations"):
jobName := strings.TrimSuffix(path, "/allocations")
return s.jobAllocations(resp, req, jobName)
case strings.HasSuffix(path, "/evaluations"):
jobName := strings.TrimSuffix(path, "/evaluations")
return s.jobEvaluations(resp, req, jobName)
case strings.HasSuffix(path, "/periodic/force"):
jobName := strings.TrimSuffix(path, "/periodic/force")
return s.periodicForceRequest(resp, req, jobName)
case strings.HasSuffix(path, "/plan"):
jobName := strings.TrimSuffix(path, "/plan")
return s.jobPlan(resp, req, jobName)
case strings.HasSuffix(path, "/summary"):
jobName := strings.TrimSuffix(path, "/summary")
return s.jobSummaryRequest(resp, req, jobName)
default:
return s.jobCRUD(resp, req, path)
}
}
func (s *HTTPServer) jobForceEvaluate(resp http.ResponseWriter, req *http.Request,
jobName string) (interface{}, error) {
if req.Method != "PUT" && req.Method != "POST" {
return nil, CodedError(405, ErrInvalidMethod)
}
args := structs.JobEvaluateRequest{
JobID: jobName,
}
s.parseRegion(req, &args.Region)
var out structs.JobRegisterResponse
if err := s.agent.RPC("Job.Evaluate", &args, &out); err != nil {
return nil, err
}
setIndex(resp, out.Index)
return out, nil
}
func (s *HTTPServer) jobPlan(resp http.ResponseWriter, req *http.Request,
jobName string) (interface{}, error) {
if req.Method != "PUT" && req.Method != "POST" {
return nil, CodedError(405, ErrInvalidMethod)
}
var args structs.JobPlanRequest
if err := decodeBody(req, &args); err != nil {
return nil, CodedError(400, err.Error())
}
if args.Job == nil {
return nil, CodedError(400, "Job must be specified")
}
if jobName != "" && args.Job.ID != jobName {
return nil, CodedError(400, "Job ID does not match")
}
s.parseRegion(req, &args.Region)
var out structs.JobPlanResponse
if err := s.agent.RPC("Job.Plan", &args, &out); err != nil {
return nil, err
}
setIndex(resp, out.Index)
return out, nil
}
func (s *HTTPServer) periodicForceRequest(resp http.ResponseWriter, req *http.Request,
jobName string) (interface{}, error) {
if req.Method != "PUT" && req.Method != "POST" {
return nil, CodedError(405, ErrInvalidMethod)
}
args := structs.PeriodicForceRequest{
JobID: jobName,
}
s.parseRegion(req, &args.Region)
var out structs.PeriodicForceResponse
if err := s.agent.RPC("Periodic.Force", &args, &out); err != nil {
return nil, err
}
setIndex(resp, out.Index)
return out, nil
}
func (s *HTTPServer) jobAllocations(resp http.ResponseWriter, req *http.Request,
jobName string) (interface{}, error) {
if req.Method != "GET" {
return nil, CodedError(405, ErrInvalidMethod)
}
args := structs.JobSpecificRequest{
JobID: jobName,
}
if s.parse(resp, req, &args.Region, &args.QueryOptions) {
return nil, nil
}
var out structs.JobAllocationsResponse
if err := s.agent.RPC("Job.Allocations", &args, &out); err != nil {
return nil, err
}
setMeta(resp, &out.QueryMeta)
if out.Allocations == nil {
out.Allocations = make([]*structs.AllocListStub, 0)
}
return out.Allocations, nil
}
func (s *HTTPServer) jobEvaluations(resp http.ResponseWriter, req *http.Request,
jobName string) (interface{}, error) {
if req.Method != "GET" {
return nil, CodedError(405, ErrInvalidMethod)
}
args := structs.JobSpecificRequest{
JobID: jobName,
}
if s.parse(resp, req, &args.Region, &args.QueryOptions) {
return nil, nil
}
var out structs.JobEvaluationsResponse
if err := s.agent.RPC("Job.Evaluations", &args, &out); err != nil {
return nil, err
}
setMeta(resp, &out.QueryMeta)
if out.Evaluations == nil {
out.Evaluations = make([]*structs.Evaluation, 0)
}
return out.Evaluations, nil
}
func (s *HTTPServer) jobCRUD(resp http.ResponseWriter, req *http.Request,
jobName string) (interface{}, error) {
switch req.Method {
case "GET":
return s.jobQuery(resp, req, jobName)
case "PUT", "POST":
return s.jobUpdate(resp, req, jobName)
case "DELETE":
return s.jobDelete(resp, req, jobName)
default:
return nil, CodedError(405, ErrInvalidMethod)
}
}
func (s *HTTPServer) jobQuery(resp http.ResponseWriter, req *http.Request,
jobName string) (interface{}, error) {
args := structs.JobSpecificRequest{
JobID: jobName,
}
if s.parse(resp, req, &args.Region, &args.QueryOptions) {
return nil, nil
}
var out structs.SingleJobResponse
if err := s.agent.RPC("Job.GetJob", &args, &out); err != nil {
return nil, err
}
setMeta(resp, &out.QueryMeta)
if out.Job == nil {
return nil, CodedError(404, "job not found")
}
return out.Job, nil
}
func (s *HTTPServer) jobUpdate(resp http.ResponseWriter, req *http.Request,
jobName string) (interface{}, error) {
var args structs.JobRegisterRequest
if err := decodeBody(req, &args); err != nil {
return nil, CodedError(400, err.Error())
}
if args.Job == nil {
return nil, CodedError(400, "Job must be specified")
}
if jobName != "" && args.Job.ID != jobName {
return nil, CodedError(400, "Job ID does not match")
}
s.parseRegion(req, &args.Region)
var out structs.JobRegisterResponse
if err := s.agent.RPC("Job.Register", &args, &out); err != nil {
return nil, err
}
setIndex(resp, out.Index)
return out, nil
}
func (s *HTTPServer) jobDelete(resp http.ResponseWriter, req *http.Request,
jobName string) (interface{}, error) {
args := structs.JobDeregisterRequest{
JobID: jobName,
}
s.parseRegion(req, &args.Region)
var out structs.JobDeregisterResponse
if err := s.agent.RPC("Job.Deregister", &args, &out); err != nil {
return nil, err
}
setIndex(resp, out.Index)
return out, nil
}
func (s *HTTPServer) jobSummaryRequest(resp http.ResponseWriter, req *http.Request, name string) (interface{}, error) {
args := structs.JobSummaryRequest{
JobID: name,
}
if s.parse(resp, req, &args.Region, &args.QueryOptions) {
return nil, nil
}
var out structs.JobSummaryResponse
if err := s.agent.RPC("Job.Summary", &args, &out); err != nil {
return nil, err
}
setMeta(resp, &out.QueryMeta)
if out.JobSummary == nil {
return nil, CodedError(404, "job not found")
}
setIndex(resp, out.Index)
return out.JobSummary, nil
}

View File

@ -1,28 +0,0 @@
package agent
import (
"io/ioutil"
"github.com/hashicorp/logutils"
)
// LevelFilter returns a LevelFilter that is configured with the log
// levels that we use.
func LevelFilter() *logutils.LevelFilter {
return &logutils.LevelFilter{
Levels: []logutils.LogLevel{"TRACE", "DEBUG", "INFO", "WARN", "ERR"},
MinLevel: "INFO",
Writer: ioutil.Discard,
}
}
// ValidateLevelFilter verifies that the log levels within the filter
// are valid.
func ValidateLevelFilter(minLevel logutils.LogLevel, filter *logutils.LevelFilter) bool {
for _, level := range filter.Levels {
if level == minLevel {
return true
}
}
return false
}

View File

@ -1,83 +0,0 @@
package agent
import (
"sync"
)
// LogHandler interface is used for clients that want to subscribe
// to logs, for example to stream them over an IPC mechanism
type LogHandler interface {
HandleLog(string)
}
// logWriter implements io.Writer so it can be used as a log sink.
// It maintains a circular buffer of logs, and a set of handlers to
// which it can stream the logs to.
type logWriter struct {
sync.Mutex
logs []string
index int
handlers map[LogHandler]struct{}
}
// NewLogWriter creates a logWriter with the given buffer capacity
func NewLogWriter(buf int) *logWriter {
return &logWriter{
logs: make([]string, buf),
index: 0,
handlers: make(map[LogHandler]struct{}),
}
}
// RegisterHandler adds a log handler to receive logs, and sends
// the last buffered logs to the handler
func (l *logWriter) RegisterHandler(lh LogHandler) {
l.Lock()
defer l.Unlock()
// Do nothing if already registered
if _, ok := l.handlers[lh]; ok {
return
}
// Register
l.handlers[lh] = struct{}{}
// Send the old logs
if l.logs[l.index] != "" {
for i := l.index; i < len(l.logs); i++ {
lh.HandleLog(l.logs[i])
}
}
for i := 0; i < l.index; i++ {
lh.HandleLog(l.logs[i])
}
}
// DeregisterHandler removes a LogHandler and prevents more invocations
func (l *logWriter) DeregisterHandler(lh LogHandler) {
l.Lock()
defer l.Unlock()
delete(l.handlers, lh)
}
// Write is used to accumulate new logs
func (l *logWriter) Write(p []byte) (n int, err error) {
l.Lock()
defer l.Unlock()
// Strip off newlines at the end if there are any since we store
// individual log lines in the agent.
n = len(p)
if p[n-1] == '\n' {
p = p[:n-1]
}
l.logs[l.index] = string(p)
l.index = (l.index + 1) % len(l.logs)
for lh, _ := range l.handlers {
lh.HandleLog(string(p))
}
return
}

View File

@ -1,144 +0,0 @@
package agent
import (
"net/http"
"strconv"
"strings"
"github.com/hashicorp/nomad/nomad/structs"
)
func (s *HTTPServer) NodesRequest(resp http.ResponseWriter, req *http.Request) (interface{}, error) {
if req.Method != "GET" {
return nil, CodedError(405, ErrInvalidMethod)
}
args := structs.NodeListRequest{}
if s.parse(resp, req, &args.Region, &args.QueryOptions) {
return nil, nil
}
var out structs.NodeListResponse
if err := s.agent.RPC("Node.List", &args, &out); err != nil {
return nil, err
}
setMeta(resp, &out.QueryMeta)
if out.Nodes == nil {
out.Nodes = make([]*structs.NodeListStub, 0)
}
return out.Nodes, nil
}
func (s *HTTPServer) NodeSpecificRequest(resp http.ResponseWriter, req *http.Request) (interface{}, error) {
path := strings.TrimPrefix(req.URL.Path, "/v1/node/")
switch {
case strings.HasSuffix(path, "/evaluate"):
nodeName := strings.TrimSuffix(path, "/evaluate")
return s.nodeForceEvaluate(resp, req, nodeName)
case strings.HasSuffix(path, "/allocations"):
nodeName := strings.TrimSuffix(path, "/allocations")
return s.nodeAllocations(resp, req, nodeName)
case strings.HasSuffix(path, "/drain"):
nodeName := strings.TrimSuffix(path, "/drain")
return s.nodeToggleDrain(resp, req, nodeName)
default:
return s.nodeQuery(resp, req, path)
}
}
func (s *HTTPServer) nodeForceEvaluate(resp http.ResponseWriter, req *http.Request,
nodeID string) (interface{}, error) {
if req.Method != "PUT" && req.Method != "POST" {
return nil, CodedError(405, ErrInvalidMethod)
}
args := structs.NodeEvaluateRequest{
NodeID: nodeID,
}
s.parseRegion(req, &args.Region)
var out structs.NodeUpdateResponse
if err := s.agent.RPC("Node.Evaluate", &args, &out); err != nil {
return nil, err
}
setIndex(resp, out.Index)
return out, nil
}
func (s *HTTPServer) nodeAllocations(resp http.ResponseWriter, req *http.Request,
nodeID string) (interface{}, error) {
if req.Method != "GET" {
return nil, CodedError(405, ErrInvalidMethod)
}
args := structs.NodeSpecificRequest{
NodeID: nodeID,
}
if s.parse(resp, req, &args.Region, &args.QueryOptions) {
return nil, nil
}
var out structs.NodeAllocsResponse
if err := s.agent.RPC("Node.GetAllocs", &args, &out); err != nil {
return nil, err
}
setMeta(resp, &out.QueryMeta)
if out.Allocs == nil {
out.Allocs = make([]*structs.Allocation, 0)
}
return out.Allocs, nil
}
func (s *HTTPServer) nodeToggleDrain(resp http.ResponseWriter, req *http.Request,
nodeID string) (interface{}, error) {
if req.Method != "PUT" && req.Method != "POST" {
return nil, CodedError(405, ErrInvalidMethod)
}
// Get the enable value
enableRaw := req.URL.Query().Get("enable")
if enableRaw == "" {
return nil, CodedError(400, "missing enable value")
}
enable, err := strconv.ParseBool(enableRaw)
if err != nil {
return nil, CodedError(400, "invalid enable value")
}
args := structs.NodeUpdateDrainRequest{
NodeID: nodeID,
Drain: enable,
}
s.parseRegion(req, &args.Region)
var out structs.NodeDrainUpdateResponse
if err := s.agent.RPC("Node.UpdateDrain", &args, &out); err != nil {
return nil, err
}
setIndex(resp, out.Index)
return out, nil
}
func (s *HTTPServer) nodeQuery(resp http.ResponseWriter, req *http.Request,
nodeID string) (interface{}, error) {
if req.Method != "GET" {
return nil, CodedError(405, ErrInvalidMethod)
}
args := structs.NodeSpecificRequest{
NodeID: nodeID,
}
if s.parse(resp, req, &args.Region, &args.QueryOptions) {
return nil, nil
}
var out structs.SingleNodeResponse
if err := s.agent.RPC("Node.GetNode", &args, &out); err != nil {
return nil, err
}
setMeta(resp, &out.QueryMeta)
if out.Node == nil {
return nil, CodedError(404, "node not found")
}
return out.Node, nil
}

View File

@ -1,24 +0,0 @@
package agent
import (
"net/http"
"github.com/hashicorp/nomad/nomad/structs"
)
func (s *HTTPServer) RegionListRequest(resp http.ResponseWriter, req *http.Request) (interface{}, error) {
if req.Method != "GET" {
return nil, CodedError(405, ErrInvalidMethod)
}
var args structs.GenericRequest
if s.parse(resp, req, &args.Region, &args.QueryOptions) {
return nil, nil
}
var regions []string
if err := s.agent.RPC("Region.List", &args, &regions); err != nil {
return nil, err
}
return regions, nil
}

View File

@ -1,12 +0,0 @@
package agent
import "net/http"
func (s *HTTPServer) ClientStatsRequest(resp http.ResponseWriter, req *http.Request) (interface{}, error) {
if s.agent.client == nil {
return nil, clientNotRunning
}
clientStats := s.agent.client.StatsReporter()
return clientStats.LatestHostStats(), nil
}

View File

@ -1,44 +0,0 @@
package agent
import (
"net/http"
"github.com/hashicorp/nomad/nomad/structs"
)
func (s *HTTPServer) StatusLeaderRequest(resp http.ResponseWriter, req *http.Request) (interface{}, error) {
if req.Method != "GET" {
return nil, CodedError(405, ErrInvalidMethod)
}
var args structs.GenericRequest
if s.parse(resp, req, &args.Region, &args.QueryOptions) {
return nil, nil
}
var leader string
if err := s.agent.RPC("Status.Leader", &args, &leader); err != nil {
return nil, err
}
return leader, nil
}
func (s *HTTPServer) StatusPeersRequest(resp http.ResponseWriter, req *http.Request) (interface{}, error) {
if req.Method != "GET" {
return nil, CodedError(405, ErrInvalidMethod)
}
var args structs.GenericRequest
if s.parse(resp, req, &args.Region, &args.QueryOptions) {
return nil, nil
}
var peers []string
if err := s.agent.RPC("Status.Peers", &args, &peers); err != nil {
return nil, err
}
if len(peers) == 0 {
peers = make([]string, 0)
}
return peers, nil
}

View File

@ -1,56 +0,0 @@
package agent
import (
"bytes"
"github.com/hashicorp/go-syslog"
"github.com/hashicorp/logutils"
)
// levelPriority is used to map a log level to a
// syslog priority level
var levelPriority = map[string]gsyslog.Priority{
"TRACE": gsyslog.LOG_DEBUG,
"DEBUG": gsyslog.LOG_INFO,
"INFO": gsyslog.LOG_NOTICE,
"WARN": gsyslog.LOG_WARNING,
"ERR": gsyslog.LOG_ERR,
"CRIT": gsyslog.LOG_CRIT,
}
// SyslogWrapper is used to cleaup log messages before
// writing them to a Syslogger. Implements the io.Writer
// interface.
type SyslogWrapper struct {
l gsyslog.Syslogger
filt *logutils.LevelFilter
}
// Write is used to implement io.Writer
func (s *SyslogWrapper) Write(p []byte) (int, error) {
// Skip syslog if the log level doesn't apply
if !s.filt.Check(p) {
return 0, nil
}
// Extract log level
var level string
afterLevel := p
x := bytes.IndexByte(p, '[')
if x >= 0 {
y := bytes.IndexByte(p[x:], ']')
if y >= 0 {
level = string(p[x+1 : x+y])
afterLevel = p[x+y+2:]
}
}
// Each log level will be handled by a specific syslog priority
priority, ok := levelPriority[level]
if !ok {
priority = gsyslog.LOG_NOTICE
}
// Attempt the write
err := s.l.WriteLevel(priority, afterLevel)
return len(p), err
}

View File

@ -1,41 +0,0 @@
package agent
import (
"net/http"
"github.com/hashicorp/nomad/nomad/structs"
)
func (s *HTTPServer) GarbageCollectRequest(resp http.ResponseWriter, req *http.Request) (interface{}, error) {
if req.Method != "PUT" {
return nil, CodedError(405, ErrInvalidMethod)
}
var args structs.GenericRequest
if s.parse(resp, req, &args.Region, &args.QueryOptions) {
return nil, nil
}
var gResp structs.GenericResponse
if err := s.agent.RPC("System.GarbageCollect", &args, &gResp); err != nil {
return nil, err
}
return nil, nil
}
func (s *HTTPServer) ReconcileJobSummaries(resp http.ResponseWriter, req *http.Request) (interface{}, error) {
if req.Method != "PUT" {
return nil, CodedError(405, ErrInvalidMethod)
}
var args structs.GenericRequest
if s.parse(resp, req, &args.Region, &args.QueryOptions) {
return nil, nil
}
var gResp structs.GenericResponse
if err := s.agent.RPC("System.ReconcileJobSummaries", &args, &gResp); err != nil {
return nil, err
}
return nil, nil
}

View File

@ -1 +0,0 @@
bind_addr = "0.0.0.0"

View File

@ -1,5 +0,0 @@
{
"advertise": {
"rpc": "127.0.0.1:4647"
}
}

View File

@ -1 +0,0 @@
data_dir = "/var/lib/nomad"

View File

@ -1,83 +0,0 @@
package command
import (
"fmt"
"sort"
"strings"
)
type AgentInfoCommand struct {
Meta
}
func (c *AgentInfoCommand) Help() string {
helpText := `
Usage: nomad agent-info [options]
Display status information about the local agent.
General Options:
` + generalOptionsUsage()
return strings.TrimSpace(helpText)
}
func (c *AgentInfoCommand) Synopsis() string {
return "Display status information about the local agent"
}
func (c *AgentInfoCommand) Run(args []string) int {
flags := c.Meta.FlagSet("agent-info", FlagSetClient)
flags.Usage = func() { c.Ui.Output(c.Help()) }
if err := flags.Parse(args); err != nil {
return 1
}
// Check that we either got no jobs or exactly one.
args = flags.Args()
if len(args) > 0 {
c.Ui.Error(c.Help())
return 1
}
// Get the HTTP client
client, err := c.Meta.Client()
if err != nil {
c.Ui.Error(fmt.Sprintf("Error initializing client: %s", err))
return 1
}
// Query the agent info
info, err := client.Agent().Self()
if err != nil {
c.Ui.Error(fmt.Sprintf("Error querying agent info: %s", err))
return 1
}
// Sort and output agent info
var stats map[string]interface{}
stats, _ = info["stats"]
statsKeys := make([]string, 0, len(stats))
for key := range stats {
statsKeys = append(statsKeys, key)
}
sort.Strings(statsKeys)
for _, key := range statsKeys {
c.Ui.Output(key)
statsData, _ := stats[key].(map[string]interface{})
statsDataKeys := make([]string, len(statsData))
i := 0
for key := range statsData {
statsDataKeys[i] = key
i++
}
sort.Strings(statsDataKeys)
for _, key := range statsDataKeys {
c.Ui.Output(fmt.Sprintf(" %s = %v", key, statsData[key]))
}
}
return 0
}

View File

@ -1,506 +0,0 @@
package command
import (
"fmt"
"math"
"sort"
"strconv"
"strings"
"time"
"github.com/dustin/go-humanize"
"github.com/mitchellh/colorstring"
"github.com/hashicorp/nomad/api"
"github.com/hashicorp/nomad/client"
)
type AllocStatusCommand struct {
Meta
color *colorstring.Colorize
}
func (c *AllocStatusCommand) Help() string {
helpText := `
Usage: nomad alloc-status [options] <allocation>
Display information about existing allocations and its tasks. This command can
be used to inspect the current status of all allocation, including its running
status, metadata, and verbose failure messages reported by internal
subsystems.
General Options:
` + generalOptionsUsage() + `
Alloc Status Options:
-short
Display short output. Shows only the most recent task event.
-stats
Display detailed resource usage statistics.
-verbose
Show full information.
-json
Output the allocation in its JSON format.
-t
Format and display allocation using a Go template.
`
return strings.TrimSpace(helpText)
}
func (c *AllocStatusCommand) Synopsis() string {
return "Display allocation status information and metadata"
}
func (c *AllocStatusCommand) Run(args []string) int {
var short, displayStats, verbose, json bool
var tmpl string
flags := c.Meta.FlagSet("alloc-status", FlagSetClient)
flags.Usage = func() { c.Ui.Output(c.Help()) }
flags.BoolVar(&short, "short", false, "")
flags.BoolVar(&verbose, "verbose", false, "")
flags.BoolVar(&displayStats, "stats", false, "")
flags.BoolVar(&json, "json", false, "")
flags.StringVar(&tmpl, "t", "", "")
if err := flags.Parse(args); err != nil {
return 1
}
// Check that we got exactly one allocation ID
args = flags.Args()
// Get the HTTP client
client, err := c.Meta.Client()
if err != nil {
c.Ui.Error(fmt.Sprintf("Error initializing client: %s", err))
return 1
}
// If args not specified but output format is specified, format and output the allocations data list
if len(args) == 0 {
var format string
if json && len(tmpl) > 0 {
c.Ui.Error("Both -json and -t are not allowed")
return 1
} else if json {
format = "json"
} else if len(tmpl) > 0 {
format = "template"
}
if len(format) > 0 {
allocs, _, err := client.Allocations().List(nil)
if err != nil {
c.Ui.Error(fmt.Sprintf("Error querying allocations: %v", err))
return 1
}
// Return nothing if no allocations found
if len(allocs) == 0 {
return 0
}
f, err := DataFormat(format, tmpl)
if err != nil {
c.Ui.Error(fmt.Sprintf("Error getting formatter: %s", err))
return 1
}
out, err := f.TransformData(allocs)
if err != nil {
c.Ui.Error(fmt.Sprintf("Error formatting the data: %s", err))
return 1
}
c.Ui.Output(out)
return 0
}
}
if len(args) != 1 {
c.Ui.Error(c.Help())
return 1
}
allocID := args[0]
// Truncate the id unless full length is requested
length := shortId
if verbose {
length = fullId
}
// Query the allocation info
if len(allocID) == 1 {
c.Ui.Error(fmt.Sprintf("Identifier must contain at least two characters."))
return 1
}
if len(allocID)%2 == 1 {
// Identifiers must be of even length, so we strip off the last byte
// to provide a consistent user experience.
allocID = allocID[:len(allocID)-1]
}
allocs, _, err := client.Allocations().PrefixList(allocID)
if err != nil {
c.Ui.Error(fmt.Sprintf("Error querying allocation: %v", err))
return 1
}
if len(allocs) == 0 {
c.Ui.Error(fmt.Sprintf("No allocation(s) with prefix or id %q found", allocID))
return 1
}
if len(allocs) > 1 {
// Format the allocs
out := make([]string, len(allocs)+1)
out[0] = "ID|Eval ID|Job ID|Task Group|Desired Status|Client Status"
for i, alloc := range allocs {
out[i+1] = fmt.Sprintf("%s|%s|%s|%s|%s|%s",
limit(alloc.ID, length),
limit(alloc.EvalID, length),
alloc.JobID,
alloc.TaskGroup,
alloc.DesiredStatus,
alloc.ClientStatus,
)
}
c.Ui.Output(fmt.Sprintf("Prefix matched multiple allocations\n\n%s", formatList(out)))
return 0
}
// Prefix lookup matched a single allocation
alloc, _, err := client.Allocations().Info(allocs[0].ID, nil)
if err != nil {
c.Ui.Error(fmt.Sprintf("Error querying allocation: %s", err))
return 1
}
// If output format is specified, format and output the data
var format string
if json && len(tmpl) > 0 {
c.Ui.Error("Both -json and -t are not allowed")
return 1
} else if json {
format = "json"
} else if len(tmpl) > 0 {
format = "template"
}
if len(format) > 0 {
f, err := DataFormat(format, tmpl)
if err != nil {
c.Ui.Error(fmt.Sprintf("Error getting formatter: %s", err))
return 1
}
out, err := f.TransformData(alloc)
if err != nil {
c.Ui.Error(fmt.Sprintf("Error formatting the data: %s", err))
return 1
}
c.Ui.Output(out)
return 0
}
var statsErr error
var stats *api.AllocResourceUsage
stats, statsErr = client.Allocations().Stats(alloc, nil)
if statsErr != nil {
c.Ui.Output("")
c.Ui.Error(fmt.Sprintf("couldn't retrieve stats (HINT: ensure Client.Advertise.HTTP is set): %v", statsErr))
}
// Format the allocation data
basic := []string{
fmt.Sprintf("ID|%s", limit(alloc.ID, length)),
fmt.Sprintf("Eval ID|%s", limit(alloc.EvalID, length)),
fmt.Sprintf("Name|%s", alloc.Name),
fmt.Sprintf("Node ID|%s", limit(alloc.NodeID, length)),
fmt.Sprintf("Job ID|%s", alloc.JobID),
fmt.Sprintf("Client Status|%s", alloc.ClientStatus),
}
if verbose {
basic = append(basic,
fmt.Sprintf("Evaluated Nodes|%d", alloc.Metrics.NodesEvaluated),
fmt.Sprintf("Filtered Nodes|%d", alloc.Metrics.NodesFiltered),
fmt.Sprintf("Exhausted Nodes|%d", alloc.Metrics.NodesExhausted),
fmt.Sprintf("Allocation Time|%s", alloc.Metrics.AllocationTime),
fmt.Sprintf("Failures|%d", alloc.Metrics.CoalescedFailures))
}
c.Ui.Output(formatKV(basic))
if short {
c.shortTaskStatus(alloc)
} else {
c.outputTaskDetails(alloc, stats, displayStats)
}
// Format the detailed status
if verbose {
c.Ui.Output(c.Colorize().Color("\n[bold]Placement Metrics[reset]"))
c.Ui.Output(formatAllocMetrics(alloc.Metrics, true, " "))
}
return 0
}
// outputTaskDetails prints task details for each task in the allocation,
// optionally printing verbose statistics if displayStats is set
func (c *AllocStatusCommand) outputTaskDetails(alloc *api.Allocation, stats *api.AllocResourceUsage, displayStats bool) {
for task := range c.sortedTaskStateIterator(alloc.TaskStates) {
state := alloc.TaskStates[task]
c.Ui.Output(c.Colorize().Color(fmt.Sprintf("\n[bold]Task %q is %q[reset]", task, state.State)))
c.outputTaskResources(alloc, task, stats, displayStats)
c.Ui.Output("")
c.outputTaskStatus(state)
}
}
// outputTaskStatus prints out a list of the most recent events for the given
// task state.
func (c *AllocStatusCommand) outputTaskStatus(state *api.TaskState) {
c.Ui.Output("Recent Events:")
events := make([]string, len(state.Events)+1)
events[0] = "Time|Type|Description"
size := len(state.Events)
for i, event := range state.Events {
formatedTime := formatUnixNanoTime(event.Time)
// Build up the description based on the event type.
var desc string
switch event.Type {
case api.TaskStarted:
desc = "Task started by client"
case api.TaskReceived:
desc = "Task received by client"
case api.TaskFailedValidation:
if event.ValidationError != "" {
desc = event.ValidationError
} else {
desc = "Validation of task failed"
}
case api.TaskDriverFailure:
if event.DriverError != "" {
desc = event.DriverError
} else {
desc = "Failed to start task"
}
case api.TaskDownloadingArtifacts:
desc = "Client is downloading artifacts"
case api.TaskArtifactDownloadFailed:
if event.DownloadError != "" {
desc = event.DownloadError
} else {
desc = "Failed to download artifacts"
}
case api.TaskKilling:
if event.KillTimeout != 0 {
desc = fmt.Sprintf("Sent interupt. Waiting %v before force killing", event.KillTimeout)
} else {
desc = "Sent interupt"
}
case api.TaskKilled:
if event.KillError != "" {
desc = event.KillError
} else {
desc = "Task successfully killed"
}
case api.TaskTerminated:
var parts []string
parts = append(parts, fmt.Sprintf("Exit Code: %d", event.ExitCode))
if event.Signal != 0 {
parts = append(parts, fmt.Sprintf("Signal: %d", event.Signal))
}
if event.Message != "" {
parts = append(parts, fmt.Sprintf("Exit Message: %q", event.Message))
}
desc = strings.Join(parts, ", ")
case api.TaskRestarting:
in := fmt.Sprintf("Task restarting in %v", time.Duration(event.StartDelay))
if event.RestartReason != "" && event.RestartReason != client.ReasonWithinPolicy {
desc = fmt.Sprintf("%s - %s", event.RestartReason, in)
} else {
desc = in
}
case api.TaskNotRestarting:
if event.RestartReason != "" {
desc = event.RestartReason
} else {
desc = "Task exceeded restart policy"
}
}
// Reverse order so we are sorted by time
events[size-i] = fmt.Sprintf("%s|%s|%s", formatedTime, event.Type, desc)
}
c.Ui.Output(formatList(events))
}
// outputTaskResources prints the task resources for the passed task and if
// displayStats is set, verbose resource usage statistics
func (c *AllocStatusCommand) outputTaskResources(alloc *api.Allocation, task string, stats *api.AllocResourceUsage, displayStats bool) {
resource, ok := alloc.TaskResources[task]
if !ok {
return
}
c.Ui.Output("Task Resources")
var addr []string
for _, nw := range resource.Networks {
ports := append(nw.DynamicPorts, nw.ReservedPorts...)
for _, port := range ports {
addr = append(addr, fmt.Sprintf("%v: %v:%v\n", port.Label, nw.IP, port.Value))
}
}
var resourcesOutput []string
resourcesOutput = append(resourcesOutput, "CPU|Memory|Disk|IOPS|Addresses")
firstAddr := ""
if len(addr) > 0 {
firstAddr = addr[0]
}
// Display the rolled up stats. If possible prefer the live stastics
cpuUsage := strconv.Itoa(resource.CPU)
memUsage := humanize.IBytes(uint64(resource.MemoryMB * bytesPerMegabyte))
if ru, ok := stats.Tasks[task]; ok && ru != nil && ru.ResourceUsage != nil {
if cs := ru.ResourceUsage.CpuStats; cs != nil {
cpuUsage = fmt.Sprintf("%v/%v", math.Floor(cs.TotalTicks), resource.CPU)
}
if ms := ru.ResourceUsage.MemoryStats; ms != nil {
memUsage = fmt.Sprintf("%v/%v", humanize.IBytes(ms.RSS), memUsage)
}
}
resourcesOutput = append(resourcesOutput, fmt.Sprintf("%v MHz|%v|%v|%v|%v",
cpuUsage,
memUsage,
humanize.IBytes(uint64(resource.DiskMB*bytesPerMegabyte)),
resource.IOPS,
firstAddr))
for i := 1; i < len(addr); i++ {
resourcesOutput = append(resourcesOutput, fmt.Sprintf("||||%v", addr[i]))
}
c.Ui.Output(formatListWithSpaces(resourcesOutput))
if ru, ok := stats.Tasks[task]; ok && ru != nil && displayStats && ru.ResourceUsage != nil {
c.Ui.Output("")
c.outputVerboseResourceUsage(task, ru.ResourceUsage)
}
}
// outputVerboseResourceUsage outputs the verbose resource usage for the passed
// task
func (c *AllocStatusCommand) outputVerboseResourceUsage(task string, resourceUsage *api.ResourceUsage) {
memoryStats := resourceUsage.MemoryStats
cpuStats := resourceUsage.CpuStats
if memoryStats != nil && len(memoryStats.Measured) > 0 {
c.Ui.Output("Memory Stats")
// Sort the measured stats
sort.Strings(memoryStats.Measured)
var measuredStats []string
for _, measured := range memoryStats.Measured {
switch measured {
case "RSS":
measuredStats = append(measuredStats, humanize.IBytes(memoryStats.RSS))
case "Cache":
measuredStats = append(measuredStats, humanize.IBytes(memoryStats.Cache))
case "Swap":
measuredStats = append(measuredStats, humanize.IBytes(memoryStats.Swap))
case "Max Usage":
measuredStats = append(measuredStats, humanize.IBytes(memoryStats.MaxUsage))
case "Kernel Usage":
measuredStats = append(measuredStats, humanize.IBytes(memoryStats.KernelUsage))
case "Kernel Max Usage":
measuredStats = append(measuredStats, humanize.IBytes(memoryStats.KernelMaxUsage))
}
}
out := make([]string, 2)
out[0] = strings.Join(memoryStats.Measured, "|")
out[1] = strings.Join(measuredStats, "|")
c.Ui.Output(formatList(out))
c.Ui.Output("")
}
if cpuStats != nil && len(cpuStats.Measured) > 0 {
c.Ui.Output("CPU Stats")
// Sort the measured stats
sort.Strings(cpuStats.Measured)
var measuredStats []string
for _, measured := range cpuStats.Measured {
switch measured {
case "Percent":
percent := strconv.FormatFloat(cpuStats.Percent, 'f', 2, 64)
measuredStats = append(measuredStats, fmt.Sprintf("%v%%", percent))
case "Throttled Periods":
measuredStats = append(measuredStats, fmt.Sprintf("%v", cpuStats.ThrottledPeriods))
case "Throttled Time":
measuredStats = append(measuredStats, fmt.Sprintf("%v", cpuStats.ThrottledTime))
case "User Mode":
percent := strconv.FormatFloat(cpuStats.UserMode, 'f', 2, 64)
measuredStats = append(measuredStats, fmt.Sprintf("%v%%", percent))
case "System Mode":
percent := strconv.FormatFloat(cpuStats.SystemMode, 'f', 2, 64)
measuredStats = append(measuredStats, fmt.Sprintf("%v%%", percent))
}
}
out := make([]string, 2)
out[0] = strings.Join(cpuStats.Measured, "|")
out[1] = strings.Join(measuredStats, "|")
c.Ui.Output(formatList(out))
}
}
// shortTaskStatus prints out the current state of each task.
func (c *AllocStatusCommand) shortTaskStatus(alloc *api.Allocation) {
tasks := make([]string, 0, len(alloc.TaskStates)+1)
tasks = append(tasks, "Name|State|Last Event|Time")
for task := range c.sortedTaskStateIterator(alloc.TaskStates) {
state := alloc.TaskStates[task]
lastState := state.State
var lastEvent, lastTime string
l := len(state.Events)
if l != 0 {
last := state.Events[l-1]
lastEvent = last.Type
lastTime = formatUnixNanoTime(last.Time)
}
tasks = append(tasks, fmt.Sprintf("%s|%s|%s|%s",
task, lastState, lastEvent, lastTime))
}
c.Ui.Output(c.Colorize().Color("\n[bold]Tasks[reset]"))
c.Ui.Output(formatList(tasks))
}
// sortedTaskStateIterator is a helper that takes the task state map and returns a
// channel that returns the keys in a sorted order.
func (c *AllocStatusCommand) sortedTaskStateIterator(m map[string]*api.TaskState) <-chan string {
output := make(chan string, len(m))
keys := make([]string, len(m))
i := 0
for k := range m {
keys[i] = k
i++
}
sort.Strings(keys)
for _, key := range keys {
output <- key
}
close(output)
return output
}

View File

@ -1,135 +0,0 @@
package command
import (
"fmt"
"reflect"
"strconv"
"strings"
"time"
)
const (
HealthCritical = 2
HealthWarn = 1
HealthPass = 0
HealthUnknown = 3
)
type AgentCheckCommand struct {
Meta
}
func (c *AgentCheckCommand) Help() string {
helpText := `
Usage: nomad check
Display state of the Nomad agent. The exit code of the command is Nagios
compatible and could be used with alerting systems.
General Options:
` + generalOptionsUsage() + `
Agent Check Options:
-min-peers
Minimum number of peers that a server is expected to know.
-min-servers
Minumum number of servers that a client is expected to know.
`
return strings.TrimSpace(helpText)
}
func (c *AgentCheckCommand) Synopsis() string {
return "Displays health of the local Nomad agent"
}
func (c *AgentCheckCommand) Run(args []string) int {
var minPeers, minServers int
flags := c.Meta.FlagSet("check", FlagSetClient)
flags.Usage = func() { c.Ui.Output(c.Help()) }
flags.IntVar(&minPeers, "min-peers", 0, "")
flags.IntVar(&minServers, "min-servers", 1, "")
if err := flags.Parse(args); err != nil {
return 1
}
client, err := c.Meta.Client()
if err != nil {
c.Ui.Error(fmt.Sprintf("error initializing client: %s", err))
return HealthCritical
}
info, err := client.Agent().Self()
if err != nil {
c.Ui.Output(fmt.Sprintf("unable to query agent info: %v", err))
return HealthCritical
}
if stats, ok := info["stats"]; !ok && (reflect.TypeOf(stats).Kind() == reflect.Map) {
c.Ui.Error("error getting stats from the agent api")
return 1
}
if _, ok := info["stats"]["nomad"]; ok {
return c.checkServerHealth(info["stats"], minPeers)
}
if _, ok := info["stats"]["client"]; ok {
return c.checkClientHealth(info["stats"], minServers)
}
return HealthWarn
}
// checkServerHealth returns the health of a server.
// TODO Add more rules for determining server health
func (c *AgentCheckCommand) checkServerHealth(info map[string]interface{}, minPeers int) int {
raft := info["raft"].(map[string]interface{})
knownPeers, err := strconv.Atoi(raft["num_peers"].(string))
if err != nil {
c.Ui.Output(fmt.Sprintf("unable to get known peers: %v", err))
return HealthCritical
}
if knownPeers < minPeers {
c.Ui.Output(fmt.Sprintf("known peers: %v, is less than expected number of peers: %v", knownPeers, minPeers))
return HealthCritical
}
return HealthPass
}
// checkClientHealth returns the health of a client
func (c *AgentCheckCommand) checkClientHealth(info map[string]interface{}, minServers int) int {
clientStats := info["client"].(map[string]interface{})
knownServers, err := strconv.Atoi(clientStats["known_servers"].(string))
if err != nil {
c.Ui.Output(fmt.Sprintf("unable to get known servers: %v", err))
return HealthCritical
}
heartbeatTTL, err := time.ParseDuration(clientStats["heartbeat_ttl"].(string))
if err != nil {
c.Ui.Output(fmt.Sprintf("unable to parse heartbeat TTL: %v", err))
return HealthCritical
}
lastHeartbeat, err := time.ParseDuration(clientStats["last_heartbeat"].(string))
if err != nil {
c.Ui.Output(fmt.Sprintf("unable to parse last heartbeat: %v", err))
return HealthCritical
}
if lastHeartbeat > heartbeatTTL {
c.Ui.Output(fmt.Sprintf("last heartbeat was %q time ago, expected heartbeat ttl: %q", lastHeartbeat, heartbeatTTL))
return HealthCritical
}
if knownServers < minServers {
c.Ui.Output(fmt.Sprintf("known servers: %v, is less than expected number of servers: %v", knownServers, minServers))
return HealthCritical
}
return HealthPass
}

View File

@ -1,110 +0,0 @@
package command
import (
"fmt"
"strings"
)
type ClientConfigCommand struct {
Meta
}
func (c *ClientConfigCommand) Help() string {
helpText := `
Usage: nomad client-config [options]
View or modify client configuration details. This command only
works on client nodes, and can be used to update the running
client configurations it supports.
The arguments behave differently depending on the flags given.
See each flag's description for its specific requirements.
General Options:
` + generalOptionsUsage() + `
Client Config Options:
-servers
List the known server addresses of the client node. Client
nodes do not participate in the gossip pool, and instead
register with these servers periodically over the network.
-update-servers
Updates the client's server list using the provided
arguments. Multiple server addresses may be passed using
multiple arguments. IMPORTANT: When updating the servers
list, you must specify ALL of the server nodes you wish
to configure. The set is updated atomically.
Example:
$ nomad client-config -update-servers foo:4647 bar:4647
`
return strings.TrimSpace(helpText)
}
func (c *ClientConfigCommand) Synopsis() string {
return "View or modify client configuration details"
}
func (c *ClientConfigCommand) Run(args []string) int {
var listServers, updateServers bool
flags := c.Meta.FlagSet("client-servers", FlagSetClient)
flags.Usage = func() { c.Ui.Output(c.Help()) }
flags.BoolVar(&listServers, "servers", false, "")
flags.BoolVar(&updateServers, "update-servers", false, "")
if err := flags.Parse(args); err != nil {
return 1
}
args = flags.Args()
// Check the flags for misuse
if !listServers && !updateServers {
c.Ui.Error(c.Help())
return 1
}
// Get the HTTP client
client, err := c.Meta.Client()
if err != nil {
c.Ui.Error(fmt.Sprintf("Error initializing client: %s", err))
return 1
}
if updateServers {
// Get the server addresses
if len(args) == 0 {
c.Ui.Error(c.Help())
return 1
}
// Set the servers list
if err := client.Agent().SetServers(args); err != nil {
c.Ui.Error(fmt.Sprintf("Error updating server list: %s", err))
return 1
}
c.Ui.Output(fmt.Sprint("Updated server list"))
return 0
}
if listServers {
// Query the current server list
servers, err := client.Agent().Servers()
if err != nil {
c.Ui.Error(fmt.Sprintf("Error querying server list: %s", err))
return 1
}
// Print the results
for _, server := range servers {
c.Ui.Output(server)
}
return 0
}
// Should not make it this far
return 1
}

View File

@ -1,65 +0,0 @@
package command
import (
"bytes"
"encoding/json"
"fmt"
"io"
"text/template"
)
//DataFormatter is a transformer of the data.
type DataFormatter interface {
// TransformData should return transformed string data.
TransformData(interface{}) (string, error)
}
// DataFormat returns the data formatter specified format.
func DataFormat(format, tmpl string) (DataFormatter, error) {
switch format {
case "json":
if len(tmpl) > 0 {
return nil, fmt.Errorf("json format does not support template option.")
}
return &JSONFormat{}, nil
case "template":
return &TemplateFormat{tmpl}, nil
}
return nil, fmt.Errorf("Unsupported format is specified.")
}
type JSONFormat struct {
}
// TransformData returns JSON format string data.
func (p *JSONFormat) TransformData(data interface{}) (string, error) {
out, err := json.MarshalIndent(&data, "", " ")
if err != nil {
return "", err
}
return string(out), nil
}
type TemplateFormat struct {
tmpl string
}
// TransformData returns template format string data.
func (p *TemplateFormat) TransformData(data interface{}) (string, error) {
var out io.Writer = new(bytes.Buffer)
if len(p.tmpl) == 0 {
return "", fmt.Errorf("template needs to be specified the golang templates.")
}
t, err := template.New("format").Parse(p.tmpl)
if err != nil {
return "", err
}
err = t.Execute(out, data)
if err != nil {
return "", err
}
return fmt.Sprint(out), nil
}

View File

@ -1,272 +0,0 @@
package command
import (
"fmt"
"sort"
"strings"
"github.com/hashicorp/nomad/api"
)
type EvalStatusCommand struct {
Meta
}
func (c *EvalStatusCommand) Help() string {
helpText := `
Usage: nomad eval-status [options] <evaluation-id>
Display information about evaluations. This command can be used to inspect the
current status of an evaluation as well as determine the reason an evaluation
did not place all allocations.
General Options:
` + generalOptionsUsage() + `
Eval Status Options:
-monitor
Monitor an outstanding evaluation
-verbose
Show full information.
-json
Output the evaluation in its JSON format.
-t
Format and display evaluation using a Go template.
`
return strings.TrimSpace(helpText)
}
func (c *EvalStatusCommand) Synopsis() string {
return "Display evaluation status and placement failure reasons"
}
func (c *EvalStatusCommand) Run(args []string) int {
var monitor, verbose, json bool
var tmpl string
flags := c.Meta.FlagSet("eval-status", FlagSetClient)
flags.Usage = func() { c.Ui.Output(c.Help()) }
flags.BoolVar(&monitor, "monitor", false, "")
flags.BoolVar(&verbose, "verbose", false, "")
flags.BoolVar(&json, "json", false, "")
flags.StringVar(&tmpl, "t", "", "")
if err := flags.Parse(args); err != nil {
return 1
}
// Check that we got exactly one evaluation ID
args = flags.Args()
// Get the HTTP client
client, err := c.Meta.Client()
if err != nil {
c.Ui.Error(fmt.Sprintf("Error initializing client: %s", err))
return 1
}
// If args not specified but output format is specified, format and output the evaluations data list
if len(args) == 0 {
var format string
if json && len(tmpl) > 0 {
c.Ui.Error("Both -json and -t are not allowed")
return 1
} else if json {
format = "json"
} else if len(tmpl) > 0 {
format = "template"
}
if len(format) > 0 {
evals, _, err := client.Evaluations().List(nil)
if err != nil {
c.Ui.Error(fmt.Sprintf("Error querying evaluations: %v", err))
return 1
}
// Return nothing if no evaluations found
if len(evals) == 0 {
return 0
}
f, err := DataFormat(format, tmpl)
if err != nil {
c.Ui.Error(fmt.Sprintf("Error getting formatter: %s", err))
return 1
}
out, err := f.TransformData(evals)
if err != nil {
c.Ui.Error(fmt.Sprintf("Error formatting the data: %s", err))
return 1
}
c.Ui.Output(out)
return 0
}
}
if len(args) != 1 {
c.Ui.Error(c.Help())
return 1
}
evalID := args[0]
// Truncate the id unless full length is requested
length := shortId
if verbose {
length = fullId
}
// Query the allocation info
if len(evalID) == 1 {
c.Ui.Error(fmt.Sprintf("Identifier must contain at least two characters."))
return 1
}
if len(evalID)%2 == 1 {
// Identifiers must be of even length, so we strip off the last byte
// to provide a consistent user experience.
evalID = evalID[:len(evalID)-1]
}
evals, _, err := client.Evaluations().PrefixList(evalID)
if err != nil {
c.Ui.Error(fmt.Sprintf("Error querying evaluation: %v", err))
return 1
}
if len(evals) == 0 {
c.Ui.Error(fmt.Sprintf("No evaluation(s) with prefix or id %q found", evalID))
return 1
}
if len(evals) > 1 {
// Format the evals
out := make([]string, len(evals)+1)
out[0] = "ID|Priority|Triggered By|Status|Placement Failures"
for i, eval := range evals {
failures, _ := evalFailureStatus(eval)
out[i+1] = fmt.Sprintf("%s|%d|%s|%s|%s",
limit(eval.ID, length),
eval.Priority,
eval.TriggeredBy,
eval.Status,
failures,
)
}
c.Ui.Output(fmt.Sprintf("Prefix matched multiple evaluations\n\n%s", formatList(out)))
return 0
}
// If we are in monitor mode, monitor and exit
if monitor {
mon := newMonitor(c.Ui, client, length)
return mon.monitor(evals[0].ID, true)
}
// Prefix lookup matched a single evaluation
eval, _, err := client.Evaluations().Info(evals[0].ID, nil)
if err != nil {
c.Ui.Error(fmt.Sprintf("Error querying evaluation: %s", err))
return 1
}
// If output format is specified, format and output the data
var format string
if json {
format = "json"
} else if len(tmpl) > 0 {
format = "template"
}
if len(format) > 0 {
f, err := DataFormat(format, tmpl)
if err != nil {
c.Ui.Error(fmt.Sprintf("Error getting formatter: %s", err))
return 1
}
out, err := f.TransformData(eval)
if err != nil {
c.Ui.Error(fmt.Sprintf("Error formatting the data: %s", err))
return 1
}
c.Ui.Output(out)
return 0
}
failureString, failures := evalFailureStatus(eval)
triggerNoun, triggerSubj := getTriggerDetails(eval)
statusDesc := eval.StatusDescription
if statusDesc == "" {
statusDesc = eval.Status
}
// Format the evaluation data
basic := []string{
fmt.Sprintf("ID|%s", limit(eval.ID, length)),
fmt.Sprintf("Status|%s", eval.Status),
fmt.Sprintf("Status Description|%s", statusDesc),
fmt.Sprintf("Type|%s", eval.Type),
fmt.Sprintf("TriggeredBy|%s", eval.TriggeredBy),
fmt.Sprintf("%s|%s", triggerNoun, triggerSubj),
fmt.Sprintf("Priority|%d", eval.Priority),
fmt.Sprintf("Placement Failures|%s", failureString),
}
if verbose {
// NextEval, PreviousEval, BlockedEval
basic = append(basic,
fmt.Sprintf("Previous Eval|%s", eval.PreviousEval),
fmt.Sprintf("Next Eval|%s", eval.NextEval),
fmt.Sprintf("Blocked Eval|%s", eval.BlockedEval))
}
c.Ui.Output(formatKV(basic))
if failures {
c.Ui.Output(c.Colorize().Color("\n[bold]Failed Placements[reset]"))
sorted := sortedTaskGroupFromMetrics(eval.FailedTGAllocs)
for _, tg := range sorted {
metrics := eval.FailedTGAllocs[tg]
noun := "allocation"
if metrics.CoalescedFailures > 0 {
noun += "s"
}
c.Ui.Output(fmt.Sprintf("Task Group %q (failed to place %d %s):", tg, metrics.CoalescedFailures+1, noun))
c.Ui.Output(formatAllocMetrics(metrics, false, " "))
c.Ui.Output("")
}
if eval.BlockedEval != "" {
c.Ui.Output(fmt.Sprintf("Evaluation %q waiting for additional capacity to place remainder",
limit(eval.BlockedEval, length)))
}
}
return 0
}
func sortedTaskGroupFromMetrics(groups map[string]*api.AllocationMetric) []string {
tgs := make([]string, 0, len(groups))
for tg, _ := range groups {
tgs = append(tgs, tg)
}
sort.Strings(tgs)
return tgs
}
func getTriggerDetails(eval *api.Evaluation) (noun, subject string) {
switch eval.TriggeredBy {
case "job-register", "job-deregister", "periodic-job", "rolling-update":
return "Job ID", eval.JobID
case "node-update":
return "Node ID", eval.NodeID
case "max-plan-attempts":
return "Previous Eval", eval.PreviousEval
default:
return "", ""
}
}

View File

@ -1,43 +0,0 @@
package command
import (
"os"
"strings"
"github.com/hashicorp/go-plugin"
"github.com/hashicorp/nomad/client/driver"
)
type ExecutorPluginCommand struct {
Meta
}
func (e *ExecutorPluginCommand) Help() string {
helpText := `
This is a command used by Nomad internally to launch an executor plugin"
`
return strings.TrimSpace(helpText)
}
func (e *ExecutorPluginCommand) Synopsis() string {
return "internal - launch an executor plugin"
}
func (e *ExecutorPluginCommand) Run(args []string) int {
if len(args) == 0 {
e.Ui.Error("log output file isn't provided")
return 1
}
logFileName := args[0]
stdo, err := os.OpenFile(logFileName, os.O_CREATE|os.O_RDWR|os.O_APPEND, 0666)
if err != nil {
e.Ui.Error(err.Error())
return 1
}
plugin.Serve(&plugin.ServeConfig{
HandshakeConfig: driver.HandshakeConfig,
Plugins: driver.GetPluginMap(stdo),
})
return 0
}

View File

@ -1,369 +0,0 @@
package command
import (
"fmt"
"io"
"math/rand"
"os"
"os/signal"
"strings"
"syscall"
"time"
humanize "github.com/dustin/go-humanize"
"github.com/hashicorp/nomad/api"
)
const (
// bytesToLines is an estimation of how many bytes are in each log line.
// This is used to set the offset to read from when a user specifies how
// many lines to tail from.
bytesToLines int64 = 120
// defaultTailLines is the number of lines to tail by default if the value
// is not overriden.
defaultTailLines int64 = 10
)
type FSCommand struct {
Meta
}
func (f *FSCommand) Help() string {
helpText := `
Usage: nomad fs <alloc-id> <path>
fs displays either the contents of an allocation directory for the passed allocation,
or displays the file at the given path. The path is relative to the root of the alloc
dir and defaults to root if unspecified.
General Options:
` + generalOptionsUsage() + `
FS Specific Options:
-H
Machine friendly output.
-verbose
Show full information.
-job <job-id>
Use a random allocation from the specified job ID.
-stat
Show file stat information instead of displaying the file, or listing the directory.
-f
Causes the output to not stop when the end of the file is reached, but rather to
wait for additional output.
-tail
Show the files contents with offsets relative to the end of the file. If no
offset is given, -n is defaulted to 10.
-n
Sets the tail location in best-efforted number of lines relative to the end
of the file.
-c
Sets the tail location in number of bytes relative to the end of the file.
`
return strings.TrimSpace(helpText)
}
func (f *FSCommand) Synopsis() string {
return "Inspect the contents of an allocation directory"
}
func (f *FSCommand) Run(args []string) int {
var verbose, machine, job, stat, tail, follow bool
var numLines, numBytes int64
flags := f.Meta.FlagSet("fs", FlagSetClient)
flags.Usage = func() { f.Ui.Output(f.Help()) }
flags.BoolVar(&verbose, "verbose", false, "")
flags.BoolVar(&machine, "H", false, "")
flags.BoolVar(&job, "job", false, "")
flags.BoolVar(&stat, "stat", false, "")
flags.BoolVar(&follow, "f", false, "")
flags.BoolVar(&tail, "tail", false, "")
flags.Int64Var(&numLines, "n", -1, "")
flags.Int64Var(&numBytes, "c", -1, "")
if err := flags.Parse(args); err != nil {
return 1
}
args = flags.Args()
if len(args) < 1 {
if job {
f.Ui.Error("job ID is required")
} else {
f.Ui.Error("allocation ID is required")
}
return 1
}
if len(args) > 2 {
f.Ui.Error(f.Help())
return 1
}
path := "/"
if len(args) == 2 {
path = args[1]
}
client, err := f.Meta.Client()
if err != nil {
f.Ui.Error(fmt.Sprintf("Error initializing client: %v", err))
return 1
}
// If -job is specified, use random allocation, otherwise use provided allocation
allocID := args[0]
if job {
allocID, err = getRandomJobAlloc(client, args[0])
if err != nil {
f.Ui.Error(fmt.Sprintf("Error fetching allocations: %v", err))
return 1
}
}
// Truncate the id unless full length is requested
length := shortId
if verbose {
length = fullId
}
// Query the allocation info
if len(allocID) == 1 {
f.Ui.Error(fmt.Sprintf("Alloc ID must contain at least two characters."))
return 1
}
if len(allocID)%2 == 1 {
// Identifiers must be of even length, so we strip off the last byte
// to provide a consistent user experience.
allocID = allocID[:len(allocID)-1]
}
allocs, _, err := client.Allocations().PrefixList(allocID)
if err != nil {
f.Ui.Error(fmt.Sprintf("Error querying allocation: %v", err))
return 1
}
if len(allocs) == 0 {
f.Ui.Error(fmt.Sprintf("No allocation(s) with prefix or id %q found", allocID))
return 1
}
if len(allocs) > 1 {
// Format the allocs
out := make([]string, len(allocs)+1)
out[0] = "ID|Eval ID|Job ID|Task Group|Desired Status|Client Status"
for i, alloc := range allocs {
out[i+1] = fmt.Sprintf("%s|%s|%s|%s|%s|%s",
limit(alloc.ID, length),
limit(alloc.EvalID, length),
alloc.JobID,
alloc.TaskGroup,
alloc.DesiredStatus,
alloc.ClientStatus,
)
}
f.Ui.Output(fmt.Sprintf("Prefix matched multiple allocations\n\n%s", formatList(out)))
return 0
}
// Prefix lookup matched a single allocation
alloc, _, err := client.Allocations().Info(allocs[0].ID, nil)
if err != nil {
f.Ui.Error(fmt.Sprintf("Error querying allocation: %s", err))
return 1
}
// Get file stat info
file, _, err := client.AllocFS().Stat(alloc, path, nil)
if err != nil {
f.Ui.Error(err.Error())
return 1
}
// If we want file stats, print those and exit.
if stat {
// Display the file information
out := make([]string, 2)
out[0] = "Mode|Size|Modified Time|Name"
if file != nil {
fn := file.Name
if file.IsDir {
fn = fmt.Sprintf("%s/", fn)
}
var size string
if machine {
size = fmt.Sprintf("%d", file.Size)
} else {
size = humanize.IBytes(uint64(file.Size))
}
out[1] = fmt.Sprintf("%s|%s|%s|%s", file.FileMode, size,
formatTime(file.ModTime), fn)
}
f.Ui.Output(formatList(out))
return 0
}
// Determine if the path is a file or a directory.
if file.IsDir {
// We have a directory, list it.
files, _, err := client.AllocFS().List(alloc, path, nil)
if err != nil {
f.Ui.Error(fmt.Sprintf("Error listing alloc dir: %s", err))
return 1
}
// Display the file information in a tabular format
out := make([]string, len(files)+1)
out[0] = "Mode|Size|Modified Time|Name"
for i, file := range files {
fn := file.Name
if file.IsDir {
fn = fmt.Sprintf("%s/", fn)
}
var size string
if machine {
size = fmt.Sprintf("%d", file.Size)
} else {
size = humanize.IBytes(uint64(file.Size))
}
out[i+1] = fmt.Sprintf("%s|%s|%s|%s",
file.FileMode,
size,
formatTime(file.ModTime),
fn,
)
}
f.Ui.Output(formatList(out))
return 0
}
// We have a file, output it.
var r io.ReadCloser
var readErr error
if !tail {
if follow {
r, readErr = f.followFile(client, alloc, path, api.OriginStart, 0, -1)
} else {
r, readErr = client.AllocFS().Cat(alloc, path, nil)
}
if readErr != nil {
readErr = fmt.Errorf("Error reading file: %v", readErr)
}
} else {
// Parse the offset
var offset int64 = defaultTailLines * bytesToLines
if nLines, nBytes := numLines != -1, numBytes != -1; nLines && nBytes {
f.Ui.Error("Both -n and -c are not allowed")
return 1
} else if numLines < -1 || numBytes < -1 {
f.Ui.Error("Invalid size is specified")
return 1
} else if nLines {
offset = numLines * bytesToLines
} else if nBytes {
offset = numBytes
} else {
numLines = defaultTailLines
}
if offset > file.Size {
offset = file.Size
}
if follow {
r, readErr = f.followFile(client, alloc, path, api.OriginEnd, offset, numLines)
} else {
// This offset needs to be relative from the front versus the follow
// is relative to the end
offset = file.Size - offset
r, readErr = client.AllocFS().ReadAt(alloc, path, offset, -1, nil)
// If numLines is set, wrap the reader
if numLines != -1 {
r = NewLineLimitReader(r, int(numLines), int(numLines*bytesToLines), 1*time.Second)
}
}
if readErr != nil {
readErr = fmt.Errorf("Error tailing file: %v", readErr)
}
}
defer r.Close()
if readErr != nil {
f.Ui.Error(readErr.Error())
return 1
}
io.Copy(os.Stdout, r)
return 0
}
// followFile outputs the contents of the file to stdout relative to the end of
// the file. If numLines does not equal -1, then tail -n behavior is used.
func (f *FSCommand) followFile(client *api.Client, alloc *api.Allocation,
path, origin string, offset, numLines int64) (io.ReadCloser, error) {
cancel := make(chan struct{})
frames, err := client.AllocFS().Stream(alloc, path, origin, offset, cancel, nil)
if err != nil {
return nil, err
}
signalCh := make(chan os.Signal, 1)
signal.Notify(signalCh, os.Interrupt, syscall.SIGTERM)
// Create a reader
var r io.ReadCloser
frameReader := api.NewFrameReader(frames, cancel)
frameReader.SetUnblockTime(500 * time.Millisecond)
r = frameReader
// If numLines is set, wrap the reader
if numLines != -1 {
r = NewLineLimitReader(r, int(numLines), int(numLines*bytesToLines), 1*time.Second)
}
go func() {
<-signalCh
// End the streaming
r.Close()
}()
return r, nil
}
// Get Random Allocation ID from a known jobID. Prefer to use a running allocation,
// but use a dead allocation if no running allocations are found
func getRandomJobAlloc(client *api.Client, jobID string) (string, error) {
var runningAllocs []*api.AllocationListStub
allocs, _, err := client.Jobs().Allocations(jobID, nil)
// Check that the job actually has allocations
if len(allocs) == 0 {
return "", fmt.Errorf("job %q doesn't exist or it has no allocations", jobID)
}
for _, v := range allocs {
if v.ClientStatus == "running" {
runningAllocs = append(runningAllocs, v)
}
}
// If we don't have any allocations running, use dead allocations
if len(runningAllocs) < 1 {
runningAllocs = allocs
}
r := rand.New(rand.NewSource(time.Now().UnixNano()))
allocID := runningAllocs[r.Intn(len(runningAllocs))].ID
return allocID, err
}

View File

@ -1,290 +0,0 @@
package command
import (
"bytes"
"fmt"
"io"
"io/ioutil"
"os"
"strconv"
"time"
gg "github.com/hashicorp/go-getter"
"github.com/hashicorp/nomad/api"
"github.com/hashicorp/nomad/jobspec"
"github.com/hashicorp/nomad/nomad/structs"
"github.com/ryanuber/columnize"
)
// formatKV takes a set of strings and formats them into properly
// aligned k = v pairs using the columnize library.
func formatKV(in []string) string {
columnConf := columnize.DefaultConfig()
columnConf.Empty = "<none>"
columnConf.Glue = " = "
return columnize.Format(in, columnConf)
}
// formatList takes a set of strings and formats them into properly
// aligned output, replacing any blank fields with a placeholder
// for awk-ability.
func formatList(in []string) string {
columnConf := columnize.DefaultConfig()
columnConf.Empty = "<none>"
return columnize.Format(in, columnConf)
}
// formatListWithSpaces takes a set of strings and formats them into properly
// aligned output. It should be used sparingly since it doesn't replace empty
// values and hence not awk/sed friendly
func formatListWithSpaces(in []string) string {
columnConf := columnize.DefaultConfig()
return columnize.Format(in, columnConf)
}
// Limits the length of the string.
func limit(s string, length int) string {
if len(s) < length {
return s
}
return s[:length]
}
// formatTime formats the time to string based on RFC822
func formatTime(t time.Time) string {
return t.Format("01/02/06 15:04:05 MST")
}
// formatUnixNanoTime is a helper for formatting time for output.
func formatUnixNanoTime(nano int64) string {
t := time.Unix(0, nano)
return formatTime(t)
}
// formatTimeDifference takes two times and determines their duration difference
// truncating to a passed unit.
// E.g. formatTimeDifference(first=1m22s33ms, second=1m28s55ms, time.Second) -> 6s
func formatTimeDifference(first, second time.Time, d time.Duration) string {
return second.Truncate(d).Sub(first.Truncate(d)).String()
}
// getLocalNodeID returns the node ID of the local Nomad Client and an error if
// it couldn't be determined or the Agent is not running in Client mode.
func getLocalNodeID(client *api.Client) (string, error) {
info, err := client.Agent().Self()
if err != nil {
return "", fmt.Errorf("Error querying agent info: %s", err)
}
var stats map[string]interface{}
stats, _ = info["stats"]
clientStats, ok := stats["client"].(map[string]interface{})
if !ok {
return "", fmt.Errorf("Nomad not running in client mode")
}
nodeID, ok := clientStats["node_id"].(string)
if !ok {
return "", fmt.Errorf("Failed to determine node ID")
}
return nodeID, nil
}
// evalFailureStatus returns whether the evaluation has failures and a string to
// display when presenting users with whether there are failures for the eval
func evalFailureStatus(eval *api.Evaluation) (string, bool) {
if eval == nil {
return "", false
}
hasFailures := len(eval.FailedTGAllocs) != 0
text := strconv.FormatBool(hasFailures)
if eval.Status == "blocked" {
text = "N/A - In Progress"
}
return text, hasFailures
}
// LineLimitReader wraps another reader and provides `tail -n` like behavior.
// LineLimitReader buffers up to the searchLimit and returns `-n` number of
// lines. After those lines have been returned, LineLimitReader streams the
// underlying ReadCloser
type LineLimitReader struct {
io.ReadCloser
lines int
searchLimit int
timeLimit time.Duration
lastRead time.Time
buffer *bytes.Buffer
bufFiled bool
foundLines bool
}
// NewLineLimitReader takes the ReadCloser to wrap, the number of lines to find
// searching backwards in the first searchLimit bytes. timeLimit can optionally
// be specified by passing a non-zero duration. When set, the search for the
// last n lines is aborted if no data has been read in the duration. This
// can be used to flush what is had if no extra data is being received. When
// used, the underlying reader must not block forever and must periodically
// unblock even when no data has been read.
func NewLineLimitReader(r io.ReadCloser, lines, searchLimit int, timeLimit time.Duration) *LineLimitReader {
return &LineLimitReader{
ReadCloser: r,
searchLimit: searchLimit,
timeLimit: timeLimit,
lines: lines,
buffer: bytes.NewBuffer(make([]byte, 0, searchLimit)),
}
}
func (l *LineLimitReader) Read(p []byte) (n int, err error) {
// Fill up the buffer so we can find the correct number of lines.
if !l.bufFiled {
b := make([]byte, len(p))
n, err := l.ReadCloser.Read(b)
if n > 0 {
if _, err := l.buffer.Write(b[:n]); err != nil {
return 0, err
}
}
if err != nil {
if err != io.EOF {
return 0, err
}
l.bufFiled = true
goto READ
}
if l.buffer.Len() >= l.searchLimit {
l.bufFiled = true
goto READ
}
if l.timeLimit.Nanoseconds() > 0 {
if l.lastRead.IsZero() {
l.lastRead = time.Now()
return 0, nil
}
now := time.Now()
if n == 0 {
// We hit the limit
if l.lastRead.Add(l.timeLimit).Before(now) {
l.bufFiled = true
goto READ
} else {
return 0, nil
}
} else {
l.lastRead = now
}
}
return 0, nil
}
READ:
if l.bufFiled && l.buffer.Len() != 0 {
b := l.buffer.Bytes()
// Find the lines
if !l.foundLines {
found := 0
i := len(b) - 1
sep := byte('\n')
lastIndex := len(b) - 1
for ; found < l.lines && i >= 0; i-- {
if b[i] == sep {
lastIndex = i
// Skip the first one
if i != len(b)-1 {
found++
}
}
}
// We found them all
if found == l.lines {
// Clear the buffer until the last index
l.buffer.Next(lastIndex + 1)
}
l.foundLines = true
}
// Read from the buffer
n := copy(p, l.buffer.Next(len(p)))
return n, nil
}
// Just stream from the underlying reader now
return l.ReadCloser.Read(p)
}
type JobGetter struct {
// The fields below can be overwritten for tests
testStdin io.Reader
}
// StructJob returns the Job struct from jobfile.
func (j *JobGetter) StructJob(jpath string) (*structs.Job, error) {
var jobfile io.Reader
switch jpath {
case "-":
if j.testStdin != nil {
jobfile = j.testStdin
} else {
jobfile = os.Stdin
}
default:
if len(jpath) == 0 {
return nil, fmt.Errorf("Error jobfile path has to be specified.")
}
job, err := ioutil.TempFile("", "jobfile")
if err != nil {
return nil, err
}
defer os.Remove(job.Name())
// Get the pwd
pwd, err := os.Getwd()
if err != nil {
return nil, err
}
client := &gg.Client{
Src: jpath,
Pwd: pwd,
Dst: job.Name(),
}
if err := client.Get(); err != nil {
return nil, fmt.Errorf("Error getting jobfile from %q: %v", jpath, err)
} else {
file, err := os.Open(job.Name())
defer file.Close()
if err != nil {
return nil, fmt.Errorf("Error opening file %q: %v", jpath, err)
}
jobfile = file
}
}
// Parse the JobFile
jobStruct, err := jobspec.Parse(jobfile)
if err != nil {
fmt.Errorf("Error parsing job file from %s: %v", jpath, err)
return nil, err
}
return jobStruct, nil
}

View File

@ -1,184 +0,0 @@
package command
import (
"fmt"
"io/ioutil"
"os"
"strings"
)
const (
// DefaultInitName is the default name we use when
// initializing the example file
DefaultInitName = "example.nomad"
)
// InitCommand generates a new job template that you can customize to your
// liking, like vagrant init
type InitCommand struct {
Meta
}
func (c *InitCommand) Help() string {
helpText := `
Usage: nomad init
Creates an example job file that can be used as a starting
point to customize further.
`
return strings.TrimSpace(helpText)
}
func (c *InitCommand) Synopsis() string {
return "Create an example job file"
}
func (c *InitCommand) Run(args []string) int {
// Check for misuse
if len(args) != 0 {
c.Ui.Error(c.Help())
return 1
}
// Check if the file already exists
_, err := os.Stat(DefaultInitName)
if err != nil && !os.IsNotExist(err) {
c.Ui.Error(fmt.Sprintf("Failed to stat '%s': %v", DefaultInitName, err))
return 1
}
if !os.IsNotExist(err) {
c.Ui.Error(fmt.Sprintf("Job '%s' already exists", DefaultInitName))
return 1
}
// Write out the example
err = ioutil.WriteFile(DefaultInitName, []byte(defaultJob), 0660)
if err != nil {
c.Ui.Error(fmt.Sprintf("Failed to write '%s': %v", DefaultInitName, err))
return 1
}
// Success
c.Ui.Output(fmt.Sprintf("Example job file written to %s", DefaultInitName))
return 0
}
var defaultJob = strings.TrimSpace(`
# There can only be a single job definition per file.
# Create a job with ID and Name 'example'
job "example" {
# Run the job in the global region, which is the default.
# region = "global"
# Specify the datacenters within the region this job can run in.
datacenters = ["dc1"]
# Service type jobs optimize for long-lived services. This is
# the default but we can change to batch for short-lived tasks.
# type = "service"
# Priority controls our access to resources and scheduling priority.
# This can be 1 to 100, inclusively, and defaults to 50.
# priority = 50
# Restrict our job to only linux. We can specify multiple
# constraints as needed.
constraint {
attribute = "${attr.kernel.name}"
value = "linux"
}
# Configure the job to do rolling updates
update {
# Stagger updates every 10 seconds
stagger = "10s"
# Update a single task at a time
max_parallel = 1
}
# Create a 'cache' group. Each task in the group will be
# scheduled onto the same machine.
group "cache" {
# Control the number of instances of this group.
# Defaults to 1
# count = 1
# Configure the restart policy for the task group. If not provided, a
# default is used based on the job type.
restart {
# The number of attempts to run the job within the specified interval.
attempts = 10
interval = "5m"
# A delay between a task failing and a restart occurring.
delay = "25s"
# Mode controls what happens when a task has restarted "attempts"
# times within the interval. "delay" mode delays the next restart
# till the next interval. "fail" mode does not restart the task if
# "attempts" has been hit within the interval.
mode = "delay"
}
# Define a task to run
task "redis" {
# Use Docker to run the task.
driver = "docker"
# Configure Docker driver with the image
config {
image = "redis:latest"
port_map {
db = 6379
}
}
service {
name = "${TASKGROUP}-redis"
tags = ["global", "cache"]
port = "db"
check {
name = "alive"
type = "tcp"
interval = "10s"
timeout = "2s"
}
}
# We must specify the resources required for
# this task to ensure it runs on a machine with
# enough capacity.
resources {
cpu = 500 # 500 MHz
memory = 256 # 256MB
network {
mbits = 10
port "db" {
}
}
}
# The artifact block can be specified one or more times to download
# artifacts prior to the task being started. This is convenient for
# shipping configs or data needed by the task.
# artifact {
# source = "http://foo.com/artifact.tar.gz"
# options {
# checksum = "md5:c4aa853ad2215426eb7d70a21922e794"
# }
# }
# Specify configuration related to log rotation
# logs {
# max_files = 10
# max_file_size = 15
# }
# Controls the timeout between signalling a task it will be killed
# and killing the task. If not set a default is used.
# kill_timeout = "20s"
}
}
}
`)

View File

@ -1,169 +0,0 @@
package command
import (
"encoding/json"
"fmt"
"strings"
"github.com/hashicorp/nomad/api"
)
type InspectCommand struct {
Meta
}
func (c *InspectCommand) Help() string {
helpText := `
Usage: nomad inspect [options] <job>
Inspect is used to see the specification of a submitted job.
General Options:
` + generalOptionsUsage() + `
Inspect Options:
-json
Output the evaluation in its JSON format.
-t
Format and display evaluation using a Go template.
`
return strings.TrimSpace(helpText)
}
func (c *InspectCommand) Synopsis() string {
return "Inspect a submitted job"
}
func (c *InspectCommand) Run(args []string) int {
var ojson bool
var tmpl string
flags := c.Meta.FlagSet("inspect", FlagSetClient)
flags.Usage = func() { c.Ui.Output(c.Help()) }
flags.BoolVar(&ojson, "json", false, "")
flags.StringVar(&tmpl, "t", "", "")
if err := flags.Parse(args); err != nil {
return 1
}
args = flags.Args()
// Get the HTTP client
client, err := c.Meta.Client()
if err != nil {
c.Ui.Error(fmt.Sprintf("Error initializing client: %s", err))
return 1
}
// If args not specified but output format is specified, format and output the jobs data list
if len(args) == 0 {
var format string
if ojson && len(tmpl) > 0 {
c.Ui.Error("Both -json and -t are not allowed")
return 1
} else if ojson {
format = "json"
} else if len(tmpl) > 0 {
format = "template"
}
if len(format) > 0 {
jobs, _, err := client.Jobs().List(nil)
if err != nil {
c.Ui.Error(fmt.Sprintf("Error querying jobs: %v", err))
return 1
}
f, err := DataFormat(format, tmpl)
if err != nil {
c.Ui.Error(fmt.Sprintf("Error getting formatter: %s", err))
return 1
}
// Return nothing if no jobs found
if len(jobs) == 0 {
return 0
}
out, err := f.TransformData(jobs)
if err != nil {
c.Ui.Error(fmt.Sprintf("Error formatting the data: %s", err))
return 1
}
c.Ui.Output(out)
return 0
}
}
// Check that we got exactly one job
if len(args) != 1 {
c.Ui.Error(c.Help())
return 1
}
jobID := args[0]
// Check if the job exists
jobs, _, err := client.Jobs().PrefixList(jobID)
if err != nil {
c.Ui.Error(fmt.Sprintf("Error inspecting job: %s", err))
return 1
}
if len(jobs) == 0 {
c.Ui.Error(fmt.Sprintf("No job(s) with prefix or id %q found", jobID))
return 1
}
if len(jobs) > 1 && strings.TrimSpace(jobID) != jobs[0].ID {
out := make([]string, len(jobs)+1)
out[0] = "ID|Type|Priority|Status"
for i, job := range jobs {
out[i+1] = fmt.Sprintf("%s|%s|%d|%s",
job.ID,
job.Type,
job.Priority,
job.Status)
}
c.Ui.Output(fmt.Sprintf("Prefix matched multiple jobs\n\n%s", formatList(out)))
return 0
}
// Prefix lookup matched a single job
job, _, err := client.Jobs().Info(jobs[0].ID, nil)
if err != nil {
c.Ui.Error(fmt.Sprintf("Error inspecting job: %s", err))
return 1
}
// If output format is specified, format and output the data
var format string
if ojson {
format = "json"
} else if len(tmpl) > 0 {
format = "template"
}
if len(format) > 0 {
f, err := DataFormat(format, tmpl)
if err != nil {
c.Ui.Error(fmt.Sprintf("Error getting formatter: %s", err))
return 1
}
out, err := f.TransformData(job)
if err != nil {
c.Ui.Error(fmt.Sprintf("Error formatting the data: %s", err))
return 1
}
c.Ui.Output(out)
return 0
}
// Print the contents of the job
req := api.RegisterJobRequest{Job: job}
buf, err := json.MarshalIndent(req, "", " ")
if err != nil {
c.Ui.Error(fmt.Sprintf("Error converting job: %s", err))
return 1
}
c.Ui.Output(string(buf))
return 0
}

View File

@ -1,270 +0,0 @@
package command
import (
"fmt"
"io"
"os"
"os/signal"
"strings"
"syscall"
"time"
"github.com/hashicorp/nomad/api"
)
type LogsCommand struct {
Meta
}
func (l *LogsCommand) Help() string {
helpText := `
Usage: nomad logs [options] <alloc-id> <task>
Streams the stdout/stderr of the given allocation and task.
General Options:
` + generalOptionsUsage() + `
Logs Specific Options:
-stderr:
Display stderr logs.
-verbose
Show full information.
-job <job-id>
Use a random allocation from the specified job ID.
-f
Causes the output to not stop when the end of the logs are reached, but
rather to wait for additional output.
-tail
Show the logs contents with offsets relative to the end of the logs. If no
offset is given, -n is defaulted to 10.
-n
Sets the tail location in best-efforted number of lines relative to the end
of the logs.
-c
Sets the tail location in number of bytes relative to the end of the logs.
`
return strings.TrimSpace(helpText)
}
func (l *LogsCommand) Synopsis() string {
return "Streams the logs of a task."
}
func (l *LogsCommand) Run(args []string) int {
var verbose, job, tail, stderr, follow bool
var numLines, numBytes int64
flags := l.Meta.FlagSet("logs", FlagSetClient)
flags.Usage = func() { l.Ui.Output(l.Help()) }
flags.BoolVar(&verbose, "verbose", false, "")
flags.BoolVar(&job, "job", false, "")
flags.BoolVar(&tail, "tail", false, "")
flags.BoolVar(&follow, "f", false, "")
flags.BoolVar(&stderr, "stderr", false, "")
flags.Int64Var(&numLines, "n", -1, "")
flags.Int64Var(&numBytes, "c", -1, "")
if err := flags.Parse(args); err != nil {
return 1
}
args = flags.Args()
if numArgs := len(args); numArgs < 1 {
if job {
l.Ui.Error("Job ID required. See help:\n")
} else {
l.Ui.Error("Allocation ID required. See help:\n")
}
l.Ui.Error(l.Help())
return 1
} else if numArgs > 2 {
l.Ui.Error(l.Help())
return 1
}
client, err := l.Meta.Client()
if err != nil {
l.Ui.Error(fmt.Sprintf("Error initializing client: %v", err))
return 1
}
// If -job is specified, use random allocation, otherwise use provided allocation
allocID := args[0]
if job {
allocID, err = getRandomJobAlloc(client, args[0])
if err != nil {
l.Ui.Error(fmt.Sprintf("Error fetching allocations: %v", err))
return 1
}
}
// Truncate the id unless full length is requested
length := shortId
if verbose {
length = fullId
}
// Query the allocation info
if len(allocID) == 1 {
l.Ui.Error(fmt.Sprintf("Alloc ID must contain at least two characters."))
return 1
}
if len(allocID)%2 == 1 {
// Identifiers must be of even length, so we strip off the last byte
// to provide a consistent user experience.
allocID = allocID[:len(allocID)-1]
}
allocs, _, err := client.Allocations().PrefixList(allocID)
if err != nil {
l.Ui.Error(fmt.Sprintf("Error querying allocation: %v", err))
return 1
}
if len(allocs) == 0 {
l.Ui.Error(fmt.Sprintf("No allocation(s) with prefix or id %q found", allocID))
return 1
}
if len(allocs) > 1 {
// Format the allocs
out := make([]string, len(allocs)+1)
out[0] = "ID|Eval ID|Job ID|Task Group|Desired Status|Client Status"
for i, alloc := range allocs {
out[i+1] = fmt.Sprintf("%s|%s|%s|%s|%s|%s",
limit(alloc.ID, length),
limit(alloc.EvalID, length),
alloc.JobID,
alloc.TaskGroup,
alloc.DesiredStatus,
alloc.ClientStatus,
)
}
l.Ui.Output(fmt.Sprintf("Prefix matched multiple allocations\n\n%s", formatList(out)))
return 0
}
// Prefix lookup matched a single allocation
alloc, _, err := client.Allocations().Info(allocs[0].ID, nil)
if err != nil {
l.Ui.Error(fmt.Sprintf("Error querying allocation: %s", err))
return 1
}
var task string
if len(args) >= 2 {
task = args[1]
if task == "" {
l.Ui.Error("Task name required")
return 1
}
} else {
// Try to determine the tasks name from the allocation
var tasks []*api.Task
for _, tg := range alloc.Job.TaskGroups {
if tg.Name == alloc.TaskGroup {
if len(tg.Tasks) == 1 {
task = tg.Tasks[0].Name
break
}
tasks = tg.Tasks
break
}
}
if task == "" {
l.Ui.Error(fmt.Sprintf("Allocation %q is running the following tasks:", limit(alloc.ID, length)))
for _, t := range tasks {
l.Ui.Error(fmt.Sprintf(" * %s", t.Name))
}
l.Ui.Error("\nPlease specify the task.")
return 1
}
}
logType := "stdout"
if stderr {
logType = "stderr"
}
// We have a file, output it.
var r io.ReadCloser
var readErr error
if !tail {
r, readErr = l.followFile(client, alloc, follow, task, logType, api.OriginStart, 0)
if readErr != nil {
readErr = fmt.Errorf("Error reading file: %v", readErr)
}
} else {
// Parse the offset
var offset int64 = defaultTailLines * bytesToLines
if nLines, nBytes := numLines != -1, numBytes != -1; nLines && nBytes {
l.Ui.Error("Both -n and -c set")
return 1
} else if nLines {
offset = numLines * bytesToLines
} else if nBytes {
offset = numBytes
} else {
numLines = defaultTailLines
}
r, readErr = l.followFile(client, alloc, follow, task, logType, api.OriginEnd, offset)
// If numLines is set, wrap the reader
if numLines != -1 {
r = NewLineLimitReader(r, int(numLines), int(numLines*bytesToLines), 1*time.Second)
}
if readErr != nil {
readErr = fmt.Errorf("Error tailing file: %v", readErr)
}
}
if readErr != nil {
l.Ui.Error(readErr.Error())
return 1
}
defer r.Close()
io.Copy(os.Stdout, r)
return 0
}
// followFile outputs the contents of the file to stdout relative to the end of
// the file.
func (l *LogsCommand) followFile(client *api.Client, alloc *api.Allocation,
follow bool, task, logType, origin string, offset int64) (io.ReadCloser, error) {
cancel := make(chan struct{})
frames, err := client.AllocFS().Logs(alloc, follow, task, logType, origin, offset, cancel, nil)
if err != nil {
return nil, err
}
signalCh := make(chan os.Signal, 1)
signal.Notify(signalCh, os.Interrupt, syscall.SIGTERM)
// Create a reader
var r io.ReadCloser
frameReader := api.NewFrameReader(frames, cancel)
frameReader.SetUnblockTime(500 * time.Millisecond)
r = frameReader
go func() {
<-signalCh
// End the streaming
r.Close()
}()
return r, nil
}

View File

@ -1,126 +0,0 @@
package command
import (
"bufio"
"flag"
"io"
"os"
"strings"
"github.com/hashicorp/nomad/api"
"github.com/mitchellh/cli"
"github.com/mitchellh/colorstring"
)
const (
// Names of environment variables used to supply various
// config options to the Nomad CLI.
EnvNomadAddress = "NOMAD_ADDR"
EnvNomadRegion = "NOMAD_REGION"
// Constants for CLI identifier length
shortId = 8
fullId = 36
)
// FlagSetFlags is an enum to define what flags are present in the
// default FlagSet returned by Meta.FlagSet.
type FlagSetFlags uint
const (
FlagSetNone FlagSetFlags = 0
FlagSetClient FlagSetFlags = 1 << iota
FlagSetDefault = FlagSetClient
)
// Meta contains the meta-options and functionality that nearly every
// Nomad command inherits.
type Meta struct {
Ui cli.Ui
// These are set by the command line flags.
flagAddress string
// Whether to not-colorize output
noColor bool
// The region to send API requests
region string
}
// FlagSet returns a FlagSet with the common flags that every
// command implements. The exact behavior of FlagSet can be configured
// using the flags as the second parameter, for example to disable
// server settings on the commands that don't talk to a server.
func (m *Meta) FlagSet(n string, fs FlagSetFlags) *flag.FlagSet {
f := flag.NewFlagSet(n, flag.ContinueOnError)
// FlagSetClient is used to enable the settings for specifying
// client connectivity options.
if fs&FlagSetClient != 0 {
f.StringVar(&m.flagAddress, "address", "", "")
f.StringVar(&m.region, "region", "", "")
f.BoolVar(&m.noColor, "no-color", false, "")
}
// Create an io.Writer that writes to our UI properly for errors.
// This is kind of a hack, but it does the job. Basically: create
// a pipe, use a scanner to break it into lines, and output each line
// to the UI. Do this forever.
errR, errW := io.Pipe()
errScanner := bufio.NewScanner(errR)
go func() {
for errScanner.Scan() {
m.Ui.Error(errScanner.Text())
}
}()
f.SetOutput(errW)
return f
}
// Client is used to initialize and return a new API client using
// the default command line arguments and env vars.
func (m *Meta) Client() (*api.Client, error) {
config := api.DefaultConfig()
if v := os.Getenv(EnvNomadAddress); v != "" {
config.Address = v
}
if m.flagAddress != "" {
config.Address = m.flagAddress
}
if v := os.Getenv(EnvNomadRegion); v != "" {
config.Region = v
}
if m.region != "" {
config.Region = m.region
}
return api.NewClient(config)
}
func (m *Meta) Colorize() *colorstring.Colorize {
return &colorstring.Colorize{
Colors: colorstring.DefaultColors,
Disable: m.noColor,
Reset: true,
}
}
// generalOptionsUsage returns the help string for the global options.
func generalOptionsUsage() string {
helpText := `
-address=<addr>
The address of the Nomad server.
Overrides the NOMAD_ADDR environment variable if set.
Default = http://127.0.0.1:4646
-region=<region>
The region of the Nomad servers to forward commands to.
Overrides the NOMAD_REGION environment variable if set.
Defaults to the Agent's local region.
-no-color
Disables colored command output.
`
return strings.TrimSpace(helpText)
}

View File

@ -1,382 +0,0 @@
package command
import (
"fmt"
"strings"
"sync"
"time"
"github.com/hashicorp/nomad/api"
"github.com/hashicorp/nomad/nomad/structs"
"github.com/mitchellh/cli"
)
const (
// updateWait is the amount of time to wait between status
// updates. Because the monitor is poll-based, we use this
// delay to avoid overwhelming the API server.
updateWait = time.Second
)
// evalState is used to store the current "state of the world"
// in the context of monitoring an evaluation.
type evalState struct {
status string
desc string
node string
job string
allocs map[string]*allocState
wait time.Duration
index uint64
}
// newEvalState creates and initializes a new monitorState
func newEvalState() *evalState {
return &evalState{
status: structs.EvalStatusPending,
allocs: make(map[string]*allocState),
}
}
// allocState is used to track the state of an allocation
type allocState struct {
id string
group string
node string
desired string
desiredDesc string
client string
clientDesc string
index uint64
// full is the allocation struct with full details. This
// must be queried for explicitly so it is only included
// if there is important error information inside.
full *api.Allocation
}
// monitor wraps an evaluation monitor and holds metadata and
// state information.
type monitor struct {
ui cli.Ui
client *api.Client
state *evalState
// length determines the number of characters for identifiers in the ui.
length int
sync.Mutex
}
// newMonitor returns a new monitor. The returned monitor will
// write output information to the provided ui. The length parameter determines
// the number of characters for identifiers in the ui.
func newMonitor(ui cli.Ui, client *api.Client, length int) *monitor {
mon := &monitor{
ui: &cli.PrefixedUi{
InfoPrefix: "==> ",
OutputPrefix: " ",
ErrorPrefix: "==> ",
Ui: ui,
},
client: client,
state: newEvalState(),
length: length,
}
return mon
}
// update is used to update our monitor with new state. It can be
// called whether the passed information is new or not, and will
// only dump update messages when state changes.
func (m *monitor) update(update *evalState) {
m.Lock()
defer m.Unlock()
existing := m.state
// Swap in the new state at the end
defer func() {
m.state = update
}()
// Check if the evaluation was triggered by a node
if existing.node == "" && update.node != "" {
m.ui.Output(fmt.Sprintf("Evaluation triggered by node %q",
limit(update.node, m.length)))
}
// Check if the evaluation was triggered by a job
if existing.job == "" && update.job != "" {
m.ui.Output(fmt.Sprintf("Evaluation triggered by job %q", update.job))
}
// Check the allocations
for allocID, alloc := range update.allocs {
if existing, ok := existing.allocs[allocID]; !ok {
switch {
case alloc.index < update.index:
// New alloc with create index lower than the eval
// create index indicates modification
m.ui.Output(fmt.Sprintf(
"Allocation %q modified: node %q, group %q",
limit(alloc.id, m.length), limit(alloc.node, m.length), alloc.group))
case alloc.desired == structs.AllocDesiredStatusRun:
// New allocation with desired status running
m.ui.Output(fmt.Sprintf(
"Allocation %q created: node %q, group %q",
limit(alloc.id, m.length), limit(alloc.node, m.length), alloc.group))
}
} else {
switch {
case existing.client != alloc.client:
description := ""
if alloc.clientDesc != "" {
description = fmt.Sprintf(" (%s)", alloc.clientDesc)
}
// Allocation status has changed
m.ui.Output(fmt.Sprintf(
"Allocation %q status changed: %q -> %q%s",
limit(alloc.id, m.length), existing.client, alloc.client, description))
}
}
}
// Check if the status changed. We skip any transitions to pending status.
if existing.status != "" &&
update.status != structs.AllocClientStatusPending &&
existing.status != update.status {
m.ui.Output(fmt.Sprintf("Evaluation status changed: %q -> %q",
existing.status, update.status))
}
}
// monitor is used to start monitoring the given evaluation ID. It
// writes output directly to the monitor's ui, and returns the
// exit code for the command. If allowPrefix is false, monitor will only accept
// exact matching evalIDs.
//
// The return code will be 0 on successful evaluation. If there are
// problems scheduling the job (impossible constraints, resources
// exhausted, etc), then the return code will be 2. For any other
// failures (API connectivity, internal errors, etc), the return code
// will be 1.
func (m *monitor) monitor(evalID string, allowPrefix bool) int {
// Track if we encounter a scheduling failure. This can only be
// detected while querying allocations, so we use this bool to
// carry that status into the return code.
var schedFailure bool
// The user may have specified a prefix as eval id. We need to lookup the
// full id from the database first. Since we do this in a loop we need a
// variable to keep track if we've already written the header message.
var headerWritten bool
// Add the initial pending state
m.update(newEvalState())
for {
// Query the evaluation
eval, _, err := m.client.Evaluations().Info(evalID, nil)
if err != nil {
if !allowPrefix {
m.ui.Error(fmt.Sprintf("No evaluation with id %q found", evalID))
return 1
}
if len(evalID) == 1 {
m.ui.Error(fmt.Sprintf("Identifier must contain at least two characters."))
return 1
}
if len(evalID)%2 == 1 {
// Identifiers must be of even length, so we strip off the last byte
// to provide a consistent user experience.
evalID = evalID[:len(evalID)-1]
}
evals, _, err := m.client.Evaluations().PrefixList(evalID)
if err != nil {
m.ui.Error(fmt.Sprintf("Error reading evaluation: %s", err))
return 1
}
if len(evals) == 0 {
m.ui.Error(fmt.Sprintf("No evaluation(s) with prefix or id %q found", evalID))
return 1
}
if len(evals) > 1 {
// Format the evaluations
out := make([]string, len(evals)+1)
out[0] = "ID|Priority|Type|Triggered By|Status"
for i, eval := range evals {
out[i+1] = fmt.Sprintf("%s|%d|%s|%s|%s",
limit(eval.ID, m.length),
eval.Priority,
eval.Type,
eval.TriggeredBy,
eval.Status)
}
m.ui.Output(fmt.Sprintf("Prefix matched multiple evaluations\n\n%s", formatList(out)))
return 0
}
// Prefix lookup matched a single evaluation
eval, _, err = m.client.Evaluations().Info(evals[0].ID, nil)
if err != nil {
m.ui.Error(fmt.Sprintf("Error reading evaluation: %s", err))
}
}
if !headerWritten {
m.ui.Info(fmt.Sprintf("Monitoring evaluation %q", limit(eval.ID, m.length)))
headerWritten = true
}
// Create the new eval state.
state := newEvalState()
state.status = eval.Status
state.desc = eval.StatusDescription
state.node = eval.NodeID
state.job = eval.JobID
state.wait = eval.Wait
state.index = eval.CreateIndex
// Query the allocations associated with the evaluation
allocs, _, err := m.client.Evaluations().Allocations(eval.ID, nil)
if err != nil {
m.ui.Error(fmt.Sprintf("Error reading allocations: %s", err))
return 1
}
// Add the allocs to the state
for _, alloc := range allocs {
state.allocs[alloc.ID] = &allocState{
id: alloc.ID,
group: alloc.TaskGroup,
node: alloc.NodeID,
desired: alloc.DesiredStatus,
desiredDesc: alloc.DesiredDescription,
client: alloc.ClientStatus,
clientDesc: alloc.ClientDescription,
index: alloc.CreateIndex,
}
}
// Update the state
m.update(state)
switch eval.Status {
case structs.EvalStatusComplete, structs.EvalStatusFailed, structs.EvalStatusCancelled:
if len(eval.FailedTGAllocs) == 0 {
m.ui.Info(fmt.Sprintf("Evaluation %q finished with status %q",
limit(eval.ID, m.length), eval.Status))
} else {
// There were failures making the allocations
schedFailure = true
m.ui.Info(fmt.Sprintf("Evaluation %q finished with status %q but failed to place all allocations:",
limit(eval.ID, m.length), eval.Status))
// Print the failures per task group
for tg, metrics := range eval.FailedTGAllocs {
noun := "allocation"
if metrics.CoalescedFailures > 0 {
noun += "s"
}
m.ui.Output(fmt.Sprintf("Task Group %q (failed to place %d %s):", tg, metrics.CoalescedFailures+1, noun))
metrics := formatAllocMetrics(metrics, false, " ")
for _, line := range strings.Split(metrics, "\n") {
m.ui.Output(line)
}
}
if eval.BlockedEval != "" {
m.ui.Output(fmt.Sprintf("Evaluation %q waiting for additional capacity to place remainder",
limit(eval.BlockedEval, m.length)))
}
}
default:
// Wait for the next update
time.Sleep(updateWait)
continue
}
// Monitor the next eval in the chain, if present
if eval.NextEval != "" {
if eval.Wait.Nanoseconds() != 0 {
m.ui.Info(fmt.Sprintf(
"Monitoring next evaluation %q in %s",
limit(eval.NextEval, m.length), eval.Wait))
// Skip some unnecessary polling
time.Sleep(eval.Wait)
}
// Reset the state and monitor the new eval
m.state = newEvalState()
return m.monitor(eval.NextEval, allowPrefix)
}
break
}
// Treat scheduling failures specially using a dedicated exit code.
// This makes it easier to detect failures from the CLI.
if schedFailure {
return 2
}
return 0
}
// dumpAllocStatus is a helper to generate a more user-friendly error message
// for scheduling failures, displaying a high level status of why the job
// could not be scheduled out.
func dumpAllocStatus(ui cli.Ui, alloc *api.Allocation, length int) {
// Print filter stats
ui.Output(fmt.Sprintf("Allocation %q status %q (%d/%d nodes filtered)",
limit(alloc.ID, length), alloc.ClientStatus,
alloc.Metrics.NodesFiltered, alloc.Metrics.NodesEvaluated))
ui.Output(formatAllocMetrics(alloc.Metrics, true, " "))
}
func formatAllocMetrics(metrics *api.AllocationMetric, scores bool, prefix string) string {
// Print a helpful message if we have an eligibility problem
var out string
if metrics.NodesEvaluated == 0 {
out += fmt.Sprintf("%s* No nodes were eligible for evaluation\n", prefix)
}
// Print a helpful message if the user has asked for a DC that has no
// available nodes.
for dc, available := range metrics.NodesAvailable {
if available == 0 {
out += fmt.Sprintf("%s* No nodes are available in datacenter %q\n", prefix, dc)
}
}
// Print filter info
for class, num := range metrics.ClassFiltered {
out += fmt.Sprintf("%s* Class %q filtered %d nodes\n", prefix, class, num)
}
for cs, num := range metrics.ConstraintFiltered {
out += fmt.Sprintf("%s* Constraint %q filtered %d nodes\n", prefix, cs, num)
}
// Print exhaustion info
if ne := metrics.NodesExhausted; ne > 0 {
out += fmt.Sprintf("%s* Resources exhausted on %d nodes\n", prefix, ne)
}
for class, num := range metrics.ClassExhausted {
out += fmt.Sprintf("%s* Class %q exhausted on %d nodes\n", prefix, class, num)
}
for dim, num := range metrics.DimensionExhausted {
out += fmt.Sprintf("%s* Dimension %q exhausted on %d nodes\n", prefix, dim, num)
}
// Print scores
if scores {
for name, score := range metrics.Scores {
out += fmt.Sprintf("%s* Score %q = %f\n", prefix, name, score)
}
}
out = strings.TrimSuffix(out, "\n")
return out
}

View File

@ -1,171 +0,0 @@
package command
import (
"fmt"
"strings"
)
type NodeDrainCommand struct {
Meta
}
func (c *NodeDrainCommand) Help() string {
helpText := `
Usage: nomad node-drain [options] <node>
Toggles node draining on a specified node. It is required
that either -enable or -disable is specified, but not both.
The -self flag is useful to drain the local node.
General Options:
` + generalOptionsUsage() + `
Node Drain Options:
-disable
Disable draining for the specified node.
-enable
Enable draining for the specified node.
-self
Query the status of the local node.
-yes
Automatic yes to prompts.
`
return strings.TrimSpace(helpText)
}
func (c *NodeDrainCommand) Synopsis() string {
return "Toggle drain mode on a given node"
}
func (c *NodeDrainCommand) Run(args []string) int {
var enable, disable, self, autoYes bool
flags := c.Meta.FlagSet("node-drain", FlagSetClient)
flags.Usage = func() { c.Ui.Output(c.Help()) }
flags.BoolVar(&enable, "enable", false, "Enable drain mode")
flags.BoolVar(&disable, "disable", false, "Disable drain mode")
flags.BoolVar(&self, "self", false, "")
flags.BoolVar(&autoYes, "yes", false, "Automatic yes to prompts.")
if err := flags.Parse(args); err != nil {
return 1
}
// Check that we got either enable or disable, but not both.
if (enable && disable) || (!enable && !disable) {
c.Ui.Error(c.Help())
return 1
}
// Check that we got a node ID
args = flags.Args()
if l := len(args); self && l != 0 || !self && l != 1 {
c.Ui.Error(c.Help())
return 1
}
// Get the HTTP client
client, err := c.Meta.Client()
if err != nil {
c.Ui.Error(fmt.Sprintf("Error initializing client: %s", err))
return 1
}
// If -self flag is set then determine the current node.
nodeID := ""
if !self {
nodeID = args[0]
} else {
var err error
if nodeID, err = getLocalNodeID(client); err != nil {
c.Ui.Error(err.Error())
return 1
}
}
// Check if node exists
if len(nodeID) == 1 {
c.Ui.Error(fmt.Sprintf("Identifier must contain at least two characters."))
return 1
}
if len(nodeID)%2 == 1 {
// Identifiers must be of even length, so we strip off the last byte
// to provide a consistent user experience.
nodeID = nodeID[:len(nodeID)-1]
}
nodes, _, err := client.Nodes().PrefixList(nodeID)
if err != nil {
c.Ui.Error(fmt.Sprintf("Error toggling drain mode: %s", err))
return 1
}
// Return error if no nodes are found
if len(nodes) == 0 {
c.Ui.Error(fmt.Sprintf("No node(s) with prefix or id %q found", nodeID))
return 1
}
if len(nodes) > 1 {
// Format the nodes list that matches the prefix so that the user
// can create a more specific request
out := make([]string, len(nodes)+1)
out[0] = "ID|Datacenter|Name|Class|Drain|Status"
for i, node := range nodes {
out[i+1] = fmt.Sprintf("%s|%s|%s|%s|%v|%s",
node.ID,
node.Datacenter,
node.Name,
node.NodeClass,
node.Drain,
node.Status)
}
// Dump the output
c.Ui.Output(fmt.Sprintf("Prefix matched multiple nodes\n\n%s", formatList(out)))
return 0
}
// Prefix lookup matched a single node
node, _, err := client.Nodes().Info(nodes[0].ID, nil)
if err != nil {
c.Ui.Error(fmt.Sprintf("Error toggling drain mode: %s", err))
return 1
}
// Confirm drain if the node was a prefix match.
if nodeID != node.ID && !autoYes {
verb := "enable"
if disable {
verb = "disable"
}
question := fmt.Sprintf("Are you sure you want to %s drain mode for node %q? [y/N]", verb, node.ID)
answer, err := c.Ui.Ask(question)
if err != nil {
c.Ui.Error(fmt.Sprintf("Failed to parse answer: %v", err))
return 1
}
if answer == "" || strings.ToLower(answer)[0] == 'n' {
// No case
c.Ui.Output("Canceling drain toggle")
return 0
} else if strings.ToLower(answer)[0] == 'y' && len(answer) > 1 {
// Non exact match yes
c.Ui.Output("For confirmation, an exact y is required.")
return 0
} else if answer != "y" {
c.Ui.Output("No confirmation detected. For confirmation, an exact 'y' is required.")
return 1
}
}
// Toggle node draining
if _, err := client.Nodes().ToggleDrain(node.ID, enable, nil); err != nil {
c.Ui.Error(fmt.Sprintf("Error toggling drain mode: %s", err))
return 1
}
return 0
}

View File

@ -1,579 +0,0 @@
package command
import (
"fmt"
"math"
"sort"
"strings"
"time"
"github.com/dustin/go-humanize"
"github.com/mitchellh/colorstring"
"github.com/hashicorp/nomad/api"
)
const (
// floatFormat is a format string for formatting floats.
floatFormat = "#,###.##"
// bytesPerMegabyte is the number of bytes per MB
bytesPerMegabyte = 1024 * 1024
)
type NodeStatusCommand struct {
Meta
color *colorstring.Colorize
length int
short bool
verbose bool
list_allocs bool
self bool
stats bool
json bool
tmpl string
}
func (c *NodeStatusCommand) Help() string {
helpText := `
Usage: nomad node-status [options] <node>
Display status information about a given node. The list of nodes
returned includes only nodes which jobs may be scheduled to, and
includes status and other high-level information.
If a node ID is passed, information for that specific node will be displayed,
including resource usage statistics. If no node ID's are passed, then a
short-hand list of all nodes will be displayed. The -self flag is useful to
quickly access the status of the local node.
General Options:
` + generalOptionsUsage() + `
Node Status Options:
-self
Query the status of the local node.
-stats
Display detailed resource usage statistics.
-allocs
Display a count of running allocations for each node.
-short
Display short output. Used only when a single node is being
queried, and drops verbose output about node allocations.
-verbose
Display full information.
-json
Output the node in its JSON format.
-t
Format and display node using a Go template.
`
return strings.TrimSpace(helpText)
}
func (c *NodeStatusCommand) Synopsis() string {
return "Display status information about nodes"
}
func (c *NodeStatusCommand) Run(args []string) int {
flags := c.Meta.FlagSet("node-status", FlagSetClient)
flags.Usage = func() { c.Ui.Output(c.Help()) }
flags.BoolVar(&c.short, "short", false, "")
flags.BoolVar(&c.verbose, "verbose", false, "")
flags.BoolVar(&c.list_allocs, "allocs", false, "")
flags.BoolVar(&c.self, "self", false, "")
flags.BoolVar(&c.stats, "stats", false, "")
flags.BoolVar(&c.json, "json", false, "")
flags.StringVar(&c.tmpl, "t", "", "")
if err := flags.Parse(args); err != nil {
return 1
}
// Check that we got either a single node or none
args = flags.Args()
if len(args) > 1 {
c.Ui.Error(c.Help())
return 1
}
// Truncate the id unless full length is requested
c.length = shortId
if c.verbose {
c.length = fullId
}
// Get the HTTP client
client, err := c.Meta.Client()
if err != nil {
c.Ui.Error(fmt.Sprintf("Error initializing client: %s", err))
return 1
}
// Use list mode if no node name was provided
if len(args) == 0 && !c.self {
// If output format is specified, format and output the node data list
var format string
if c.json && len(c.tmpl) > 0 {
c.Ui.Error("Both -json and -t are not allowed")
return 1
} else if c.json {
format = "json"
} else if len(c.tmpl) > 0 {
format = "template"
}
// Query the node info
nodes, _, err := client.Nodes().List(nil)
if err != nil {
c.Ui.Error(fmt.Sprintf("Error querying node status: %s", err))
return 1
}
// Return nothing if no nodes found
if len(nodes) == 0 {
return 0
}
if len(format) > 0 {
f, err := DataFormat(format, c.tmpl)
if err != nil {
c.Ui.Error(fmt.Sprintf("Error getting formatter: %s", err))
return 1
}
out, err := f.TransformData(nodes)
if err != nil {
c.Ui.Error(fmt.Sprintf("Error formatting the data: %s", err))
return 1
}
c.Ui.Output(out)
return 0
}
// Format the nodes list
out := make([]string, len(nodes)+1)
if c.list_allocs {
out[0] = "ID|DC|Name|Class|Drain|Status|Running Allocs"
} else {
out[0] = "ID|DC|Name|Class|Drain|Status"
}
for i, node := range nodes {
if c.list_allocs {
numAllocs, err := getRunningAllocs(client, node.ID)
if err != nil {
c.Ui.Error(fmt.Sprintf("Error querying node allocations: %s", err))
return 1
}
out[i+1] = fmt.Sprintf("%s|%s|%s|%s|%v|%s|%v",
limit(node.ID, c.length),
node.Datacenter,
node.Name,
node.NodeClass,
node.Drain,
node.Status,
len(numAllocs))
} else {
out[i+1] = fmt.Sprintf("%s|%s|%s|%s|%v|%s",
limit(node.ID, c.length),
node.Datacenter,
node.Name,
node.NodeClass,
node.Drain,
node.Status)
}
}
// Dump the output
c.Ui.Output(formatList(out))
return 0
}
// Query the specific node
nodeID := ""
if !c.self {
nodeID = args[0]
} else {
var err error
if nodeID, err = getLocalNodeID(client); err != nil {
c.Ui.Error(err.Error())
return 1
}
}
if len(nodeID) == 1 {
c.Ui.Error(fmt.Sprintf("Identifier must contain at least two characters."))
return 1
}
if len(nodeID)%2 == 1 {
// Identifiers must be of even length, so we strip off the last byte
// to provide a consistent user experience.
nodeID = nodeID[:len(nodeID)-1]
}
nodes, _, err := client.Nodes().PrefixList(nodeID)
if err != nil {
c.Ui.Error(fmt.Sprintf("Error querying node info: %s", err))
return 1
}
// Return error if no nodes are found
if len(nodes) == 0 {
c.Ui.Error(fmt.Sprintf("No node(s) with prefix %q found", nodeID))
return 1
}
if len(nodes) > 1 {
// Format the nodes list that matches the prefix so that the user
// can create a more specific request
out := make([]string, len(nodes)+1)
out[0] = "ID|DC|Name|Class|Drain|Status"
for i, node := range nodes {
out[i+1] = fmt.Sprintf("%s|%s|%s|%s|%v|%s",
limit(node.ID, c.length),
node.Datacenter,
node.Name,
node.NodeClass,
node.Drain,
node.Status)
}
// Dump the output
c.Ui.Output(fmt.Sprintf("Prefix matched multiple nodes\n\n%s", formatList(out)))
return 0
}
// Prefix lookup matched a single node
node, _, err := client.Nodes().Info(nodes[0].ID, nil)
if err != nil {
c.Ui.Error(fmt.Sprintf("Error querying node info: %s", err))
return 1
}
// If output format is specified, format and output the data
var format string
if c.json && len(c.tmpl) > 0 {
c.Ui.Error("Both -json and -t are not allowed")
return 1
} else if c.json {
format = "json"
} else if len(c.tmpl) > 0 {
format = "template"
}
if len(format) > 0 {
f, err := DataFormat(format, c.tmpl)
if err != nil {
c.Ui.Error(fmt.Sprintf("Error getting formatter: %s", err))
return 1
}
out, err := f.TransformData(node)
if err != nil {
c.Ui.Error(fmt.Sprintf("Error formatting the data: %s", err))
return 1
}
c.Ui.Output(out)
return 0
}
return c.formatNode(client, node)
}
func (c *NodeStatusCommand) formatNode(client *api.Client, node *api.Node) int {
// Get the host stats
hostStats, nodeStatsErr := client.Nodes().Stats(node.ID, nil)
if nodeStatsErr != nil {
c.Ui.Output("")
c.Ui.Error(fmt.Sprintf("error fetching node stats (HINT: ensure Client.Advertise.HTTP is set): %v", nodeStatsErr))
}
// Format the header output
basic := []string{
fmt.Sprintf("ID|%s", limit(node.ID, c.length)),
fmt.Sprintf("Name|%s", node.Name),
fmt.Sprintf("Class|%s", node.NodeClass),
fmt.Sprintf("DC|%s", node.Datacenter),
fmt.Sprintf("Drain|%v", node.Drain),
fmt.Sprintf("Status|%s", node.Status),
}
if hostStats != nil {
uptime := time.Duration(hostStats.Uptime * uint64(time.Second))
basic = append(basic, fmt.Sprintf("Uptime|%s", uptime.String()))
}
c.Ui.Output(c.Colorize().Color(formatKV(basic)))
if !c.short {
// Get list of running allocations on the node
runningAllocs, err := getRunningAllocs(client, node.ID)
if err != nil {
c.Ui.Error(fmt.Sprintf("Error querying node for running allocations: %s", err))
return 1
}
allocatedResources := getAllocatedResources(client, runningAllocs, node)
c.Ui.Output(c.Colorize().Color("\n[bold]Allocated Resources[reset]"))
c.Ui.Output(formatList(allocatedResources))
actualResources, err := getActualResources(client, runningAllocs, node)
if err == nil {
c.Ui.Output(c.Colorize().Color("\n[bold]Allocation Resource Utilization[reset]"))
c.Ui.Output(formatList(actualResources))
}
hostResources, err := getHostResources(hostStats, node)
if err != nil {
c.Ui.Output("")
c.Ui.Error(fmt.Sprintf("error fetching node stats (HINT: ensure Client.Advertise.HTTP is set): %v", err))
}
if err == nil {
c.Ui.Output(c.Colorize().Color("\n[bold]Host Resource Utilization[reset]"))
c.Ui.Output(formatList(hostResources))
}
if hostStats != nil && c.stats {
c.Ui.Output(c.Colorize().Color("\n[bold]CPU Stats[reset]"))
c.printCpuStats(hostStats)
c.Ui.Output(c.Colorize().Color("\n[bold]Memory Stats[reset]"))
c.printMemoryStats(hostStats)
c.Ui.Output(c.Colorize().Color("\n[bold]Disk Stats[reset]"))
c.printDiskStats(hostStats)
}
}
allocs, err := getAllocs(client, node, c.length)
if err != nil {
c.Ui.Error(fmt.Sprintf("Error querying node allocations: %s", err))
return 1
}
if len(allocs) > 1 {
c.Ui.Output(c.Colorize().Color("\n[bold]Allocations[reset]"))
c.Ui.Output(formatList(allocs))
}
if c.verbose {
c.formatAttributes(node)
}
return 0
}
func (c *NodeStatusCommand) formatAttributes(node *api.Node) {
// Print the attributes
keys := make([]string, len(node.Attributes))
for k := range node.Attributes {
keys = append(keys, k)
}
sort.Strings(keys)
var attributes []string
for _, k := range keys {
if k != "" {
attributes = append(attributes, fmt.Sprintf("%s|%s", k, node.Attributes[k]))
}
}
c.Ui.Output(c.Colorize().Color("\n[bold]Attributes[reset]"))
c.Ui.Output(formatKV(attributes))
}
func (c *NodeStatusCommand) printCpuStats(hostStats *api.HostStats) {
l := len(hostStats.CPU)
for i, cpuStat := range hostStats.CPU {
cpuStatsAttr := make([]string, 4)
cpuStatsAttr[0] = fmt.Sprintf("CPU|%v", cpuStat.CPU)
cpuStatsAttr[1] = fmt.Sprintf("User|%v%%", humanize.FormatFloat(floatFormat, cpuStat.User))
cpuStatsAttr[2] = fmt.Sprintf("System|%v%%", humanize.FormatFloat(floatFormat, cpuStat.System))
cpuStatsAttr[3] = fmt.Sprintf("Idle|%v%%", humanize.FormatFloat(floatFormat, cpuStat.Idle))
c.Ui.Output(formatKV(cpuStatsAttr))
if i+1 < l {
c.Ui.Output("")
}
}
}
func (c *NodeStatusCommand) printMemoryStats(hostStats *api.HostStats) {
memoryStat := hostStats.Memory
memStatsAttr := make([]string, 4)
memStatsAttr[0] = fmt.Sprintf("Total|%v", humanize.IBytes(memoryStat.Total))
memStatsAttr[1] = fmt.Sprintf("Available|%v", humanize.IBytes(memoryStat.Available))
memStatsAttr[2] = fmt.Sprintf("Used|%v", humanize.IBytes(memoryStat.Used))
memStatsAttr[3] = fmt.Sprintf("Free|%v", humanize.IBytes(memoryStat.Free))
c.Ui.Output(formatKV(memStatsAttr))
}
func (c *NodeStatusCommand) printDiskStats(hostStats *api.HostStats) {
l := len(hostStats.DiskStats)
for i, diskStat := range hostStats.DiskStats {
diskStatsAttr := make([]string, 7)
diskStatsAttr[0] = fmt.Sprintf("Device|%s", diskStat.Device)
diskStatsAttr[1] = fmt.Sprintf("MountPoint|%s", diskStat.Mountpoint)
diskStatsAttr[2] = fmt.Sprintf("Size|%s", humanize.IBytes(diskStat.Size))
diskStatsAttr[3] = fmt.Sprintf("Used|%s", humanize.IBytes(diskStat.Used))
diskStatsAttr[4] = fmt.Sprintf("Available|%s", humanize.IBytes(diskStat.Available))
diskStatsAttr[5] = fmt.Sprintf("Used Percent|%v%%", humanize.FormatFloat(floatFormat, diskStat.UsedPercent))
diskStatsAttr[6] = fmt.Sprintf("Inodes Percent|%v%%", humanize.FormatFloat(floatFormat, diskStat.InodesUsedPercent))
c.Ui.Output(formatKV(diskStatsAttr))
if i+1 < l {
c.Ui.Output("")
}
}
}
// getRunningAllocs returns a slice of allocation id's running on the node
func getRunningAllocs(client *api.Client, nodeID string) ([]*api.Allocation, error) {
var allocs []*api.Allocation
// Query the node allocations
nodeAllocs, _, err := client.Nodes().Allocations(nodeID, nil)
// Filter list to only running allocations
for _, alloc := range nodeAllocs {
if alloc.ClientStatus == "running" {
allocs = append(allocs, alloc)
}
}
return allocs, err
}
// getAllocs returns information about every running allocation on the node
func getAllocs(client *api.Client, node *api.Node, length int) ([]string, error) {
var allocs []string
// Query the node allocations
nodeAllocs, _, err := client.Nodes().Allocations(node.ID, nil)
// Format the allocations
allocs = make([]string, len(nodeAllocs)+1)
allocs[0] = "ID|Eval ID|Job ID|Task Group|Desired Status|Client Status"
for i, alloc := range nodeAllocs {
allocs[i+1] = fmt.Sprintf("%s|%s|%s|%s|%s|%s",
limit(alloc.ID, length),
limit(alloc.EvalID, length),
alloc.JobID,
alloc.TaskGroup,
alloc.DesiredStatus,
alloc.ClientStatus)
}
return allocs, err
}
// getAllocatedResources returns the resource usage of the node.
func getAllocatedResources(client *api.Client, runningAllocs []*api.Allocation, node *api.Node) []string {
// Compute the total
total := computeNodeTotalResources(node)
// Get Resources
var cpu, mem, disk, iops int
for _, alloc := range runningAllocs {
cpu += alloc.Resources.CPU
mem += alloc.Resources.MemoryMB
disk += alloc.Resources.DiskMB
iops += alloc.Resources.IOPS
}
resources := make([]string, 2)
resources[0] = "CPU|Memory|Disk|IOPS"
resources[1] = fmt.Sprintf("%v/%v MHz|%v/%v|%v/%v|%v/%v",
cpu,
total.CPU,
humanize.IBytes(uint64(mem*bytesPerMegabyte)),
humanize.IBytes(uint64(total.MemoryMB*bytesPerMegabyte)),
humanize.IBytes(uint64(disk*bytesPerMegabyte)),
humanize.IBytes(uint64(total.DiskMB*bytesPerMegabyte)),
iops,
total.IOPS)
return resources
}
// computeNodeTotalResources returns the total allocatable resources (resources
// minus reserved)
func computeNodeTotalResources(node *api.Node) api.Resources {
total := api.Resources{}
r := node.Resources
res := node.Reserved
if res == nil {
res = &api.Resources{}
}
total.CPU = r.CPU - res.CPU
total.MemoryMB = r.MemoryMB - res.MemoryMB
total.DiskMB = r.DiskMB - res.DiskMB
total.IOPS = r.IOPS - res.IOPS
return total
}
// getActualResources returns the actual resource usage of the allocations.
func getActualResources(client *api.Client, runningAllocs []*api.Allocation, node *api.Node) ([]string, error) {
// Compute the total
total := computeNodeTotalResources(node)
// Get Resources
var cpu float64
var mem uint64
for _, alloc := range runningAllocs {
// Make the call to the client to get the actual usage.
stats, err := client.Allocations().Stats(alloc, nil)
if err != nil {
return nil, err
}
cpu += stats.ResourceUsage.CpuStats.TotalTicks
mem += stats.ResourceUsage.MemoryStats.RSS
}
resources := make([]string, 2)
resources[0] = "CPU|Memory"
resources[1] = fmt.Sprintf("%v/%v MHz|%v/%v",
math.Floor(cpu),
total.CPU,
humanize.IBytes(mem),
humanize.IBytes(uint64(total.MemoryMB*bytesPerMegabyte)))
return resources, nil
}
// getHostResources returns the actual resource usage of the node.
func getHostResources(hostStats *api.HostStats, node *api.Node) ([]string, error) {
if hostStats == nil {
return nil, fmt.Errorf("actual resource usage not present")
}
var resources []string
// calculate disk usage
storageDevice := node.Attributes["unique.storage.volume"]
var diskUsed, diskSize uint64
var physical bool
for _, disk := range hostStats.DiskStats {
if disk.Device == storageDevice {
diskUsed = disk.Used
diskSize = disk.Size
physical = true
}
}
resources = make([]string, 2)
resources[0] = "CPU|Memory|Disk"
if physical {
resources[1] = fmt.Sprintf("%v/%v MHz|%v/%v|%v/%v",
math.Floor(hostStats.CPUTicksConsumed),
node.Resources.CPU,
humanize.IBytes(hostStats.Memory.Used),
humanize.IBytes(hostStats.Memory.Total),
humanize.IBytes(diskUsed),
humanize.IBytes(diskSize),
)
} else {
// If non-physical device are used, output device name only,
// since nomad doesn't collect the stats data.
resources[1] = fmt.Sprintf("%v/%v MHz|%v/%v|(%s)",
math.Floor(hostStats.CPUTicksConsumed),
node.Resources.CPU,
humanize.IBytes(hostStats.Memory.Used),
humanize.IBytes(hostStats.Memory.Total),
storageDevice,
)
}
return resources, nil
}

View File

@ -1,503 +0,0 @@
package command
import (
"fmt"
"sort"
"strings"
"time"
"github.com/hashicorp/nomad/api"
"github.com/hashicorp/nomad/nomad/structs"
"github.com/hashicorp/nomad/scheduler"
"github.com/mitchellh/colorstring"
)
const (
jobModifyIndexHelp = `To submit the job with version verification run:
nomad run -check-index %d %s
When running the job with the check-index flag, the job will only be run if the
server side version matches the the job modify index returned. If the index has
changed, another user has modified the job and the plan's results are
potentially invalid.`
)
type PlanCommand struct {
Meta
JobGetter
color *colorstring.Colorize
}
func (c *PlanCommand) Help() string {
helpText := `
Usage: nomad plan [options] <file>
Plan invokes a dry-run of the scheduler to determine the effects of submitting
either a new or updated version of a job. The plan will not result in any
changes to the cluster but gives insight into whether the job could be run
successfully and how it would affect existing allocations.
If the supplied path is "-", the jobfile is read from stdin. Otherwise
it is read from the file at the supplied path or downloaded and
read from URL specified.
A job modify index is returned with the plan. This value can be used when
submitting the job using "nomad run -check-index", which will check that the job
was not modified between the plan and run command before invoking the
scheduler. This ensures the job has not been modified since the plan.
A structured diff between the local and remote job is displayed to
give insight into what the scheduler will attempt to do and why.
If the job has specified the region, the -region flag and NOMAD_REGION
environment variable are overridden and the the job's region is used.
Plan will return one of the following exit codes:
* 0: No allocations created or destroyed.
* 1: Allocations created or destroyed.
* 255: Error determining plan results.
General Options:
` + generalOptionsUsage() + `
Plan Options:
-diff
Determines whether the diff between the remote job and planned job is shown.
Defaults to true.
-verbose
Increase diff verbosity.
`
return strings.TrimSpace(helpText)
}
func (c *PlanCommand) Synopsis() string {
return "Dry-run a job update to determine its effects"
}
func (c *PlanCommand) Run(args []string) int {
var diff, verbose bool
flags := c.Meta.FlagSet("plan", FlagSetClient)
flags.Usage = func() { c.Ui.Output(c.Help()) }
flags.BoolVar(&diff, "diff", true, "")
flags.BoolVar(&verbose, "verbose", false, "")
if err := flags.Parse(args); err != nil {
return 255
}
// Check that we got exactly one job
args = flags.Args()
if len(args) != 1 {
c.Ui.Error(c.Help())
return 255
}
path := args[0]
// Get Job struct from Jobfile
job, err := c.JobGetter.StructJob(args[0])
if err != nil {
c.Ui.Error(fmt.Sprintf("Error getting job struct: %s", err))
return 255
}
// Initialize any fields that need to be.
job.Canonicalize()
// Check that the job is valid
if err := job.Validate(); err != nil {
c.Ui.Error(fmt.Sprintf("Error validating job: %s", err))
return 255
}
// Convert it to something we can use
apiJob, err := convertStructJob(job)
if err != nil {
c.Ui.Error(fmt.Sprintf("Error converting job: %s", err))
return 255
}
// Get the HTTP client
client, err := c.Meta.Client()
if err != nil {
c.Ui.Error(fmt.Sprintf("Error initializing client: %s", err))
return 255
}
// Force the region to be that of the job.
if r := job.Region; r != "" {
client.SetRegion(r)
}
// Submit the job
resp, _, err := client.Jobs().Plan(apiJob, diff, nil)
if err != nil {
c.Ui.Error(fmt.Sprintf("Error during plan: %s", err))
return 255
}
// Print the diff if not disabled
if diff {
c.Ui.Output(fmt.Sprintf("%s\n",
c.Colorize().Color(strings.TrimSpace(formatJobDiff(resp.Diff, verbose)))))
}
// Print the scheduler dry-run output
c.Ui.Output(c.Colorize().Color("[bold]Scheduler dry-run:[reset]"))
c.Ui.Output(c.Colorize().Color(formatDryRun(resp, job)))
c.Ui.Output("")
// Print the job index info
c.Ui.Output(c.Colorize().Color(formatJobModifyIndex(resp.JobModifyIndex, path)))
return getExitCode(resp)
}
// getExitCode returns 0:
// * 0: No allocations created or destroyed.
// * 1: Allocations created or destroyed.
func getExitCode(resp *api.JobPlanResponse) int {
// Check for changes
for _, d := range resp.Annotations.DesiredTGUpdates {
if d.Stop+d.Place+d.Migrate+d.DestructiveUpdate > 0 {
return 1
}
}
return 0
}
// formatJobModifyIndex produces a help string that displays the job modify
// index and how to submit a job with it.
func formatJobModifyIndex(jobModifyIndex uint64, jobName string) string {
help := fmt.Sprintf(jobModifyIndexHelp, jobModifyIndex, jobName)
out := fmt.Sprintf("[reset][bold]Job Modify Index: %d[reset]\n%s", jobModifyIndex, help)
return out
}
// formatDryRun produces a string explaining the results of the dry run.
func formatDryRun(resp *api.JobPlanResponse, job *structs.Job) string {
var rolling *api.Evaluation
for _, eval := range resp.CreatedEvals {
if eval.TriggeredBy == "rolling-update" {
rolling = eval
}
}
var out string
if len(resp.FailedTGAllocs) == 0 {
out = "[bold][green]- All tasks successfully allocated.[reset]\n"
} else {
// Change the output depending on if we are a system job or not
if job.Type == "system" {
out = "[bold][yellow]- WARNING: Failed to place allocations on all nodes.[reset]\n"
} else {
out = "[bold][yellow]- WARNING: Failed to place all allocations.[reset]\n"
}
sorted := sortedTaskGroupFromMetrics(resp.FailedTGAllocs)
for _, tg := range sorted {
metrics := resp.FailedTGAllocs[tg]
noun := "allocation"
if metrics.CoalescedFailures > 0 {
noun += "s"
}
out += fmt.Sprintf("%s[yellow]Task Group %q (failed to place %d %s):\n[reset]", strings.Repeat(" ", 2), tg, metrics.CoalescedFailures+1, noun)
out += fmt.Sprintf("[yellow]%s[reset]\n\n", formatAllocMetrics(metrics, false, strings.Repeat(" ", 4)))
}
if rolling == nil {
out = strings.TrimSuffix(out, "\n")
}
}
if rolling != nil {
out += fmt.Sprintf("[green]- Rolling update, next evaluation will be in %s.\n", rolling.Wait)
}
if next := resp.NextPeriodicLaunch; !next.IsZero() {
out += fmt.Sprintf("[green]- If submitted now, next periodic launch would be at %s (%s from now).\n",
formatTime(next), formatTimeDifference(time.Now().UTC(), next, time.Second))
}
out = strings.TrimSuffix(out, "\n")
return out
}
// formatJobDiff produces an annoted diff of the the job. If verbose mode is
// set, added or deleted task groups and tasks are expanded.
func formatJobDiff(job *api.JobDiff, verbose bool) string {
marker, _ := getDiffString(job.Type)
out := fmt.Sprintf("%s[bold]Job: %q\n", marker, job.ID)
// Determine the longest markers and fields so that the output can be
// properly aligned.
longestField, longestMarker := getLongestPrefixes(job.Fields, job.Objects)
for _, tg := range job.TaskGroups {
if _, l := getDiffString(tg.Type); l > longestMarker {
longestMarker = l
}
}
// Only show the job's field and object diffs if the job is edited or
// verbose mode is set.
if job.Type == "Edited" || verbose {
fo := alignedFieldAndObjects(job.Fields, job.Objects, 0, longestField, longestMarker)
out += fo
if len(fo) > 0 {
out += "\n"
}
}
// Print the task groups
for _, tg := range job.TaskGroups {
_, mLength := getDiffString(tg.Type)
kPrefix := longestMarker - mLength
out += fmt.Sprintf("%s\n", formatTaskGroupDiff(tg, kPrefix, verbose))
}
return out
}
// formatTaskGroupDiff produces an annotated diff of a task group. If the
// verbose field is set, the task groups fields and objects are expanded even if
// the full object is an addition or removal. tgPrefix is the number of spaces to prefix
// the output of the task group.
func formatTaskGroupDiff(tg *api.TaskGroupDiff, tgPrefix int, verbose bool) string {
marker, _ := getDiffString(tg.Type)
out := fmt.Sprintf("%s%s[bold]Task Group: %q[reset]", marker, strings.Repeat(" ", tgPrefix), tg.Name)
// Append the updates and colorize them
if l := len(tg.Updates); l > 0 {
order := make([]string, 0, l)
for updateType := range tg.Updates {
order = append(order, updateType)
}
sort.Strings(order)
updates := make([]string, 0, l)
for _, updateType := range order {
count := tg.Updates[updateType]
var color string
switch updateType {
case scheduler.UpdateTypeIgnore:
case scheduler.UpdateTypeCreate:
color = "[green]"
case scheduler.UpdateTypeDestroy:
color = "[red]"
case scheduler.UpdateTypeMigrate:
color = "[blue]"
case scheduler.UpdateTypeInplaceUpdate:
color = "[cyan]"
case scheduler.UpdateTypeDestructiveUpdate:
color = "[yellow]"
}
updates = append(updates, fmt.Sprintf("[reset]%s%d %s", color, count, updateType))
}
out += fmt.Sprintf(" (%s[reset])\n", strings.Join(updates, ", "))
} else {
out += "[reset]\n"
}
// Determine the longest field and markers so the output is properly
// aligned
longestField, longestMarker := getLongestPrefixes(tg.Fields, tg.Objects)
for _, task := range tg.Tasks {
if _, l := getDiffString(task.Type); l > longestMarker {
longestMarker = l
}
}
// Only show the task groups's field and object diffs if the group is edited or
// verbose mode is set.
subStartPrefix := tgPrefix + 2
if tg.Type == "Edited" || verbose {
fo := alignedFieldAndObjects(tg.Fields, tg.Objects, subStartPrefix, longestField, longestMarker)
out += fo
if len(fo) > 0 {
out += "\n"
}
}
// Output the tasks
for _, task := range tg.Tasks {
_, mLength := getDiffString(task.Type)
prefix := longestMarker - mLength
out += fmt.Sprintf("%s\n", formatTaskDiff(task, subStartPrefix, prefix, verbose))
}
return out
}
// formatTaskDiff produces an annotated diff of a task. If the verbose field is
// set, the tasks fields and objects are expanded even if the full object is an
// addition or removal. startPrefix is the number of spaces to prefix the output of
// the task and taskPrefix is the number of spaces to put between the marker and
// task name output.
func formatTaskDiff(task *api.TaskDiff, startPrefix, taskPrefix int, verbose bool) string {
marker, _ := getDiffString(task.Type)
out := fmt.Sprintf("%s%s%s[bold]Task: %q",
strings.Repeat(" ", startPrefix), marker, strings.Repeat(" ", taskPrefix), task.Name)
if len(task.Annotations) != 0 {
out += fmt.Sprintf(" [reset](%s)", colorAnnotations(task.Annotations))
}
if task.Type == "None" {
return out
} else if (task.Type == "Deleted" || task.Type == "Added") && !verbose {
// Exit early if the job was not edited and it isn't verbose output
return out
} else {
out += "\n"
}
subStartPrefix := startPrefix + 2
longestField, longestMarker := getLongestPrefixes(task.Fields, task.Objects)
out += alignedFieldAndObjects(task.Fields, task.Objects, subStartPrefix, longestField, longestMarker)
return out
}
// formatObjectDiff produces an annotated diff of an object. startPrefix is the
// number of spaces to prefix the output of the object and keyPrefix is the number
// of spaces to put between the marker and object name output.
func formatObjectDiff(diff *api.ObjectDiff, startPrefix, keyPrefix int) string {
start := strings.Repeat(" ", startPrefix)
marker, _ := getDiffString(diff.Type)
out := fmt.Sprintf("%s%s%s%s {\n", start, marker, strings.Repeat(" ", keyPrefix), diff.Name)
// Determine the length of the longest name and longest diff marker to
// properly align names and values
longestField, longestMarker := getLongestPrefixes(diff.Fields, diff.Objects)
subStartPrefix := startPrefix + 2
out += alignedFieldAndObjects(diff.Fields, diff.Objects, subStartPrefix, longestField, longestMarker)
return fmt.Sprintf("%s\n%s}", out, start)
}
// formatFieldDiff produces an annotated diff of a field. startPrefix is the
// number of spaces to prefix the output of the field, keyPrefix is the number
// of spaces to put between the marker and field name output and valuePrefix is
// the number of spaces to put infront of the value for aligning values.
func formatFieldDiff(diff *api.FieldDiff, startPrefix, keyPrefix, valuePrefix int) string {
marker, _ := getDiffString(diff.Type)
out := fmt.Sprintf("%s%s%s%s: %s",
strings.Repeat(" ", startPrefix),
marker, strings.Repeat(" ", keyPrefix),
diff.Name,
strings.Repeat(" ", valuePrefix))
switch diff.Type {
case "Added":
out += fmt.Sprintf("%q", diff.New)
case "Deleted":
out += fmt.Sprintf("%q", diff.Old)
case "Edited":
out += fmt.Sprintf("%q => %q", diff.Old, diff.New)
default:
out += fmt.Sprintf("%q", diff.New)
}
// Color the annotations where possible
if l := len(diff.Annotations); l != 0 {
out += fmt.Sprintf(" (%s)", colorAnnotations(diff.Annotations))
}
return out
}
// alignedFieldAndObjects is a helper method that prints fields and objects
// properly aligned.
func alignedFieldAndObjects(fields []*api.FieldDiff, objects []*api.ObjectDiff,
startPrefix, longestField, longestMarker int) string {
var out string
numFields := len(fields)
numObjects := len(objects)
haveObjects := numObjects != 0
for i, field := range fields {
_, mLength := getDiffString(field.Type)
kPrefix := longestMarker - mLength
vPrefix := longestField - len(field.Name)
out += formatFieldDiff(field, startPrefix, kPrefix, vPrefix)
// Avoid a dangling new line
if i+1 != numFields || haveObjects {
out += "\n"
}
}
for i, object := range objects {
_, mLength := getDiffString(object.Type)
kPrefix := longestMarker - mLength
out += formatObjectDiff(object, startPrefix, kPrefix)
// Avoid a dangling new line
if i+1 != numObjects {
out += "\n"
}
}
return out
}
// getLongestPrefixes takes a list of fields and objects and determines the
// longest field name and the longest marker.
func getLongestPrefixes(fields []*api.FieldDiff, objects []*api.ObjectDiff) (longestField, longestMarker int) {
for _, field := range fields {
if l := len(field.Name); l > longestField {
longestField = l
}
if _, l := getDiffString(field.Type); l > longestMarker {
longestMarker = l
}
}
for _, obj := range objects {
if _, l := getDiffString(obj.Type); l > longestMarker {
longestMarker = l
}
}
return longestField, longestMarker
}
// getDiffString returns a colored diff marker and the length of the string
// without color annotations.
func getDiffString(diffType string) (string, int) {
switch diffType {
case "Added":
return "[green]+[reset] ", 2
case "Deleted":
return "[red]-[reset] ", 2
case "Edited":
return "[light_yellow]+/-[reset] ", 4
default:
return "", 0
}
}
// colorAnnotations returns a comma concatonated list of the annotations where
// the annotations are colored where possible.
func colorAnnotations(annotations []string) string {
l := len(annotations)
if l == 0 {
return ""
}
colored := make([]string, l)
for i, annotation := range annotations {
switch annotation {
case "forces create":
colored[i] = fmt.Sprintf("[green]%s[reset]", annotation)
case "forces destroy":
colored[i] = fmt.Sprintf("[red]%s[reset]", annotation)
case "forces in-place update":
colored[i] = fmt.Sprintf("[cyan]%s[reset]", annotation)
case "forces create/destroy update":
colored[i] = fmt.Sprintf("[yellow]%s[reset]", annotation)
default:
colored[i] = annotation
}
}
return strings.Join(colored, ", ")
}

View File

@ -1,248 +0,0 @@
package command
import (
"bytes"
"encoding/gob"
"encoding/json"
"fmt"
"regexp"
"strconv"
"strings"
"time"
"github.com/hashicorp/nomad/api"
"github.com/hashicorp/nomad/nomad/structs"
)
var (
// enforceIndexRegex is a regular expression which extracts the enforcement error
enforceIndexRegex = regexp.MustCompile(`\((Enforcing job modify index.*)\)`)
)
type RunCommand struct {
Meta
JobGetter
}
func (c *RunCommand) Help() string {
helpText := `
Usage: nomad run [options] <path>
Starts running a new job or updates an existing job using
the specification located at <path>. This is the main command
used to interact with Nomad.
If the supplied path is "-", the jobfile is read from stdin. Otherwise
it is read from the file at the supplied path or downloaded and
read from URL specified.
Upon successful job submission, this command will immediately
enter an interactive monitor. This is useful to watch Nomad's
internals make scheduling decisions and place the submitted work
onto nodes. The monitor will end once job placement is done. It
is safe to exit the monitor early using ctrl+c.
On successful job submission and scheduling, exit code 0 will be
returned. If there are job placement issues encountered
(unsatisfiable constraints, resource exhaustion, etc), then the
exit code will be 2. Any other errors, including client connection
issues or internal errors, are indicated by exit code 1.
If the job has specified the region, the -region flag and NOMAD_REGION
environment variable are overridden and the the job's region is used.
General Options:
` + generalOptionsUsage() + `
Run Options:
-check-index
If set, the job is only registered or updated if the the passed
job modify index matches the server side version. If a check-index value of
zero is passed, the job is only registered if it does not yet exist. If a
non-zero value is passed, it ensures that the job is being updated from a
known state. The use of this flag is most common in conjunction with plan
command.
-detach
Return immediately instead of entering monitor mode. After job submission,
the evaluation ID will be printed to the screen, which can be used to
examine the evaluation using the eval-status command.
-verbose
Display full information.
-output
Output the JSON that would be submitted to the HTTP API without submitting
the job.
`
return strings.TrimSpace(helpText)
}
func (c *RunCommand) Synopsis() string {
return "Run a new job or update an existing job"
}
func (c *RunCommand) Run(args []string) int {
var detach, verbose, output bool
var checkIndexStr string
flags := c.Meta.FlagSet("run", FlagSetClient)
flags.Usage = func() { c.Ui.Output(c.Help()) }
flags.BoolVar(&detach, "detach", false, "")
flags.BoolVar(&verbose, "verbose", false, "")
flags.BoolVar(&output, "output", false, "")
flags.StringVar(&checkIndexStr, "check-index", "", "")
if err := flags.Parse(args); err != nil {
return 1
}
// Truncate the id unless full length is requested
length := shortId
if verbose {
length = fullId
}
// Check that we got exactly one argument
args = flags.Args()
if len(args) != 1 {
c.Ui.Error(c.Help())
return 1
}
// Check that we got exactly one node
args = flags.Args()
if len(args) != 1 {
c.Ui.Error(c.Help())
return 1
}
// Get Job struct from Jobfile
job, err := c.JobGetter.StructJob(args[0])
if err != nil {
c.Ui.Error(fmt.Sprintf("Error getting job struct: %s", err))
return 1
}
// Initialize any fields that need to be.
job.Canonicalize()
// Check that the job is valid
if err := job.Validate(); err != nil {
c.Ui.Error(fmt.Sprintf("Error validating job: %v", err))
return 1
}
// Check if the job is periodic.
periodic := job.IsPeriodic()
// Convert it to something we can use
apiJob, err := convertStructJob(job)
if err != nil {
c.Ui.Error(fmt.Sprintf("Error converting job: %s", err))
return 1
}
if output {
req := api.RegisterJobRequest{Job: apiJob}
buf, err := json.MarshalIndent(req, "", " ")
if err != nil {
c.Ui.Error(fmt.Sprintf("Error converting job: %s", err))
return 1
}
c.Ui.Output(string(buf))
return 0
}
// Get the HTTP client
client, err := c.Meta.Client()
if err != nil {
c.Ui.Error(fmt.Sprintf("Error initializing client: %s", err))
return 1
}
// Force the region to be that of the job.
if r := job.Region; r != "" {
client.SetRegion(r)
}
// Parse the check-index
checkIndex, enforce, err := parseCheckIndex(checkIndexStr)
if err != nil {
c.Ui.Error(fmt.Sprintf("Error parsing check-index value %q: %v", checkIndexStr, err))
return 1
}
// Submit the job
var evalID string
if enforce {
evalID, _, err = client.Jobs().EnforceRegister(apiJob, checkIndex, nil)
} else {
evalID, _, err = client.Jobs().Register(apiJob, nil)
}
if err != nil {
if strings.Contains(err.Error(), api.RegisterEnforceIndexErrPrefix) {
// Format the error specially if the error is due to index
// enforcement
matches := enforceIndexRegex.FindStringSubmatch(err.Error())
if len(matches) == 2 {
c.Ui.Error(matches[1]) // The matched group
c.Ui.Error("Job not updated")
return 1
}
}
c.Ui.Error(fmt.Sprintf("Error submitting job: %s", err))
return 1
}
// Check if we should enter monitor mode
if detach || periodic {
c.Ui.Output("Job registration successful")
if periodic {
now := time.Now().UTC()
next := job.Periodic.Next(now)
c.Ui.Output(fmt.Sprintf("Approximate next launch time: %s (%s from now)",
formatTime(next), formatTimeDifference(now, next, time.Second)))
} else {
c.Ui.Output("Evaluation ID: " + evalID)
}
return 0
}
// Detach was not specified, so start monitoring
mon := newMonitor(c.Ui, client, length)
return mon.monitor(evalID, false)
}
// parseCheckIndex parses the check-index flag and returns the index, whether it
// was set and potentially an error during parsing.
func parseCheckIndex(input string) (uint64, bool, error) {
if input == "" {
return 0, false, nil
}
u, err := strconv.ParseUint(input, 10, 64)
return u, true, err
}
// convertStructJob is used to take a *structs.Job and convert it to an *api.Job.
// This function is just a hammer and probably needs to be revisited.
func convertStructJob(in *structs.Job) (*api.Job, error) {
gob.Register([]map[string]interface{}{})
gob.Register([]interface{}{})
var apiJob *api.Job
buf := new(bytes.Buffer)
if err := gob.NewEncoder(buf).Encode(in); err != nil {
return nil, err
}
if err := gob.NewDecoder(buf).Decode(&apiJob); err != nil {
return nil, err
}
return apiJob, nil
}

View File

@ -1,60 +0,0 @@
package command
import (
"fmt"
"strings"
)
type ServerForceLeaveCommand struct {
Meta
}
func (c *ServerForceLeaveCommand) Help() string {
helpText := `
Usage: nomad server-force-leave [options] <node>
Forces an server to enter the "left" state. This can be used to
eject nodes which have failed and will not rejoin the cluster.
Note that if the member is actually still alive, it will
eventually rejoin the cluster again.
General Options:
` + generalOptionsUsage()
return strings.TrimSpace(helpText)
}
func (c *ServerForceLeaveCommand) Synopsis() string {
return "Force a server into the 'left' state"
}
func (c *ServerForceLeaveCommand) Run(args []string) int {
flags := c.Meta.FlagSet("server-force-leave", FlagSetClient)
flags.Usage = func() { c.Ui.Output(c.Help()) }
if err := flags.Parse(args); err != nil {
return 1
}
// Check that we got exactly one node
args = flags.Args()
if len(args) != 1 {
c.Ui.Error(c.Help())
return 1
}
node := args[0]
// Get the HTTP client
client, err := c.Meta.Client()
if err != nil {
c.Ui.Error(fmt.Sprintf("Error initializing client: %s", err))
return 1
}
// Call force-leave on the node
if err := client.Agent().ForceLeave(node); err != nil {
c.Ui.Error(fmt.Sprintf("Error force-leaving server %s: %s", node, err))
return 1
}
return 0
}

View File

@ -1,64 +0,0 @@
package command
import (
"fmt"
"strings"
)
type ServerJoinCommand struct {
Meta
}
func (c *ServerJoinCommand) Help() string {
helpText := `
Usage: nomad server-join [options] <addr> [<addr>...]
Joins the local server to one or more Nomad servers. Joining is
only required for server nodes, and only needs to succeed
against one or more of the provided addresses. Once joined, the
gossip layer will handle discovery of the other server nodes in
the cluster.
General Options:
` + generalOptionsUsage()
return strings.TrimSpace(helpText)
}
func (c *ServerJoinCommand) Synopsis() string {
return "Join server nodes together"
}
func (c *ServerJoinCommand) Run(args []string) int {
flags := c.Meta.FlagSet("server-join", FlagSetClient)
flags.Usage = func() { c.Ui.Output(c.Help()) }
if err := flags.Parse(args); err != nil {
return 1
}
// Check that we got at least one node
args = flags.Args()
if len(args) < 1 {
c.Ui.Error(c.Help())
return 1
}
nodes := args
// Get the HTTP client
client, err := c.Meta.Client()
if err != nil {
c.Ui.Error(fmt.Sprintf("Error initializing client: %s", err))
return 1
}
// Attempt the join
n, err := client.Agent().Join(nodes...)
if err != nil {
c.Ui.Error(fmt.Sprintf("Error joining: %s", err))
return 1
}
// Success
c.Ui.Output(fmt.Sprintf("Joined %d servers successfully", n))
return 0
}

View File

@ -1,174 +0,0 @@
package command
import (
"fmt"
"sort"
"strings"
"github.com/hashicorp/nomad/api"
"github.com/ryanuber/columnize"
)
type ServerMembersCommand struct {
Meta
}
func (c *ServerMembersCommand) Help() string {
helpText := `
Usage: nomad server-members [options]
Display a list of the known servers and their status.
General Options:
` + generalOptionsUsage() + `
Server Members Options:
-detailed
Show detailed information about each member. This dumps
a raw set of tags which shows more information than the
default output format.
`
return strings.TrimSpace(helpText)
}
func (c *ServerMembersCommand) Synopsis() string {
return "Display a list of known servers and their status"
}
func (c *ServerMembersCommand) Run(args []string) int {
var detailed bool
flags := c.Meta.FlagSet("server-members", FlagSetClient)
flags.Usage = func() { c.Ui.Output(c.Help()) }
flags.BoolVar(&detailed, "detailed", false, "Show detailed output")
if err := flags.Parse(args); err != nil {
return 1
}
// Check for extra arguments
args = flags.Args()
if len(args) != 0 {
c.Ui.Error(c.Help())
return 1
}
// Get the HTTP client
client, err := c.Meta.Client()
if err != nil {
c.Ui.Error(fmt.Sprintf("Error initializing client: %s", err))
return 1
}
// Query the members
mem, err := client.Agent().Members()
if err != nil {
c.Ui.Error(fmt.Sprintf("Error querying servers: %s", err))
return 1
}
// Sort the members
sort.Sort(api.AgentMembersNameSort(mem))
// Determine the leaders per region.
leaders, err := regionLeaders(client, mem)
if err != nil {
c.Ui.Error(fmt.Sprintf("Error determining leaders: %s", err))
return 1
}
// Format the list
var out []string
if detailed {
out = detailedOutput(mem)
} else {
out = standardOutput(mem, leaders)
}
// Dump the list
c.Ui.Output(columnize.SimpleFormat(out))
return 0
}
func standardOutput(mem []*api.AgentMember, leaders map[string]string) []string {
// Format the members list
members := make([]string, len(mem)+1)
members[0] = "Name|Address|Port|Status|Leader|Protocol|Build|Datacenter|Region"
for i, member := range mem {
reg := member.Tags["region"]
regLeader, ok := leaders[reg]
isLeader := false
if ok {
if regLeader == fmt.Sprintf("%s:%s", member.Addr, member.Tags["port"]) {
isLeader = true
}
}
members[i+1] = fmt.Sprintf("%s|%s|%d|%s|%t|%d|%s|%s|%s",
member.Name,
member.Addr,
member.Port,
member.Status,
isLeader,
member.ProtocolCur,
member.Tags["build"],
member.Tags["dc"],
member.Tags["region"])
}
return members
}
func detailedOutput(mem []*api.AgentMember) []string {
// Format the members list
members := make([]string, len(mem)+1)
members[0] = "Name|Address|Port|Tags"
for i, member := range mem {
// Format the tags
tagPairs := make([]string, 0, len(member.Tags))
for k, v := range member.Tags {
tagPairs = append(tagPairs, fmt.Sprintf("%s=%s", k, v))
}
tags := strings.Join(tagPairs, ",")
members[i+1] = fmt.Sprintf("%s|%s|%d|%s",
member.Name,
member.Addr,
member.Port,
tags)
}
return members
}
// regionLeaders returns a map of regions to the IP of the member that is the
// leader.
func regionLeaders(client *api.Client, mem []*api.AgentMember) (map[string]string, error) {
// Determine the unique regions.
leaders := make(map[string]string)
regions := make(map[string]struct{})
for _, m := range mem {
regions[m.Tags["region"]] = struct{}{}
}
if len(regions) == 0 {
return leaders, nil
}
status := client.Status()
for reg := range regions {
l, err := status.RegionLeader(reg)
if err != nil {
// This error means that region has no leader.
if strings.Contains(err.Error(), "No cluster leader") {
continue
}
return nil, err
}
leaders[reg] = l
}
return leaders, nil
}

View File

@ -1,378 +0,0 @@
package command
import (
"bytes"
"encoding/gob"
"fmt"
"sort"
"strings"
"time"
"github.com/hashicorp/nomad/api"
"github.com/hashicorp/nomad/nomad/structs"
)
const (
// maxFailedTGs is the maximum number of task groups we show failure reasons
// for before defering to eval-status
maxFailedTGs = 5
)
type StatusCommand struct {
Meta
length int
evals bool
verbose bool
}
func (c *StatusCommand) Help() string {
helpText := `
Usage: nomad status [options] <job>
Display status information about jobs. If no job ID is given,
a list of all known jobs will be dumped.
General Options:
` + generalOptionsUsage() + `
Status Options:
-short
Display short output. Used only when a single job is being
queried, and drops verbose information about allocations.
-evals
Display the evaluations associated with the job.
-verbose
Display full information.
`
return strings.TrimSpace(helpText)
}
func (c *StatusCommand) Synopsis() string {
return "Display status information about jobs"
}
func (c *StatusCommand) Run(args []string) int {
var short bool
flags := c.Meta.FlagSet("status", FlagSetClient)
flags.Usage = func() { c.Ui.Output(c.Help()) }
flags.BoolVar(&short, "short", false, "")
flags.BoolVar(&c.evals, "evals", false, "")
flags.BoolVar(&c.verbose, "verbose", false, "")
if err := flags.Parse(args); err != nil {
return 1
}
// Check that we either got no jobs or exactly one.
args = flags.Args()
if len(args) > 1 {
c.Ui.Error(c.Help())
return 1
}
// Truncate the id unless full length is requested
c.length = shortId
if c.verbose {
c.length = fullId
}
// Get the HTTP client
client, err := c.Meta.Client()
if err != nil {
c.Ui.Error(fmt.Sprintf("Error initializing client: %s", err))
return 1
}
// Invoke list mode if no job ID.
if len(args) == 0 {
jobs, _, err := client.Jobs().List(nil)
if err != nil {
c.Ui.Error(fmt.Sprintf("Error querying jobs: %s", err))
return 1
}
if len(jobs) == 0 {
// No output if we have no jobs
c.Ui.Output("No running jobs")
} else {
c.Ui.Output(createStatusListOutput(jobs))
}
return 0
}
// Try querying the job
jobID := args[0]
jobs, _, err := client.Jobs().PrefixList(jobID)
if err != nil {
c.Ui.Error(fmt.Sprintf("Error querying job: %s", err))
return 1
}
if len(jobs) == 0 {
c.Ui.Error(fmt.Sprintf("No job(s) with prefix or id %q found", jobID))
return 1
}
if len(jobs) > 1 && strings.TrimSpace(jobID) != jobs[0].ID {
c.Ui.Output(fmt.Sprintf("Prefix matched multiple jobs\n\n%s", createStatusListOutput(jobs)))
return 0
}
// Prefix lookup matched a single job
job, _, err := client.Jobs().Info(jobs[0].ID, nil)
if err != nil {
c.Ui.Error(fmt.Sprintf("Error querying job: %s", err))
return 1
}
// Check if it is periodic
sJob, err := convertApiJob(job)
if err != nil {
c.Ui.Error(fmt.Sprintf("Error converting job: %s", err))
return 1
}
periodic := sJob.IsPeriodic()
// Format the job info
basic := []string{
fmt.Sprintf("ID|%s", job.ID),
fmt.Sprintf("Name|%s", job.Name),
fmt.Sprintf("Type|%s", job.Type),
fmt.Sprintf("Priority|%d", job.Priority),
fmt.Sprintf("Datacenters|%s", strings.Join(job.Datacenters, ",")),
fmt.Sprintf("Status|%s", job.Status),
fmt.Sprintf("Periodic|%v", periodic),
}
if periodic {
now := time.Now().UTC()
next := sJob.Periodic.Next(now)
basic = append(basic, fmt.Sprintf("Next Periodic Launch|%s",
fmt.Sprintf("%s (%s from now)",
formatTime(next), formatTimeDifference(now, next, time.Second))))
}
c.Ui.Output(formatKV(basic))
// Exit early
if short {
return 0
}
// Print periodic job information
if periodic {
if err := c.outputPeriodicInfo(client, job); err != nil {
c.Ui.Error(err.Error())
return 1
}
return 0
}
if err := c.outputJobInfo(client, job); err != nil {
c.Ui.Error(err.Error())
return 1
}
return 0
}
// outputPeriodicInfo prints information about the passed periodic job. If a
// request fails, an error is returned.
func (c *StatusCommand) outputPeriodicInfo(client *api.Client, job *api.Job) error {
// Generate the prefix that matches launched jobs from the periodic job.
prefix := fmt.Sprintf("%s%s", job.ID, structs.PeriodicLaunchSuffix)
children, _, err := client.Jobs().PrefixList(prefix)
if err != nil {
return fmt.Errorf("Error querying job: %s", err)
}
if len(children) == 0 {
c.Ui.Output("\nNo instances of periodic job found")
return nil
}
out := make([]string, 1)
out[0] = "ID|Status"
for _, child := range children {
// Ensure that we are only showing jobs whose parent is the requested
// job.
if child.ParentID != job.ID {
continue
}
out = append(out, fmt.Sprintf("%s|%s",
child.ID,
child.Status))
}
c.Ui.Output(fmt.Sprintf("\nPreviously launched jobs:\n%s", formatList(out)))
return nil
}
// outputJobInfo prints information about the passed non-periodic job. If a
// request fails, an error is returned.
func (c *StatusCommand) outputJobInfo(client *api.Client, job *api.Job) error {
var evals, allocs []string
// Query the allocations
jobAllocs, _, err := client.Jobs().Allocations(job.ID, nil)
if err != nil {
return fmt.Errorf("Error querying job allocations: %s", err)
}
// Query the evaluations
jobEvals, _, err := client.Jobs().Evaluations(job.ID, nil)
if err != nil {
return fmt.Errorf("Error querying job evaluations: %s", err)
}
// Query the summary
summary, _, err := client.Jobs().Summary(job.ID, nil)
if err != nil {
return fmt.Errorf("Error querying job summary: %s", err)
}
// Format the summary
c.Ui.Output(c.Colorize().Color("\n[bold]Summary[reset]"))
if summary != nil {
summaries := make([]string, len(summary.Summary)+1)
summaries[0] = "Task Group|Queued|Starting|Running|Failed|Complete|Lost"
taskGroups := make([]string, 0, len(summary.Summary))
for taskGroup := range summary.Summary {
taskGroups = append(taskGroups, taskGroup)
}
sort.Strings(taskGroups)
for idx, taskGroup := range taskGroups {
tgs := summary.Summary[taskGroup]
summaries[idx+1] = fmt.Sprintf("%s|%d|%d|%d|%d|%d|%d",
taskGroup, tgs.Queued, tgs.Starting,
tgs.Running, tgs.Failed,
tgs.Complete, tgs.Lost,
)
}
c.Ui.Output(formatList(summaries))
}
// Determine latest evaluation with failures whose follow up hasn't
// completed, this is done while formatting
var latestFailedPlacement *api.Evaluation
blockedEval := false
// Format the evals
evals = make([]string, len(jobEvals)+1)
evals[0] = "ID|Priority|Triggered By|Status|Placement Failures"
for i, eval := range jobEvals {
failures, _ := evalFailureStatus(eval)
evals[i+1] = fmt.Sprintf("%s|%d|%s|%s|%s",
limit(eval.ID, c.length),
eval.Priority,
eval.TriggeredBy,
eval.Status,
failures,
)
if eval.Status == "blocked" {
blockedEval = true
}
if len(eval.FailedTGAllocs) == 0 {
// Skip evals without failures
continue
}
if latestFailedPlacement == nil || latestFailedPlacement.CreateIndex < eval.CreateIndex {
latestFailedPlacement = eval
}
}
if c.verbose || c.evals {
c.Ui.Output(c.Colorize().Color("\n[bold]Evaluations[reset]"))
c.Ui.Output(formatList(evals))
}
if blockedEval && latestFailedPlacement != nil {
c.outputFailedPlacements(latestFailedPlacement)
}
// Format the allocs
c.Ui.Output(c.Colorize().Color("\n[bold]Allocations[reset]"))
if len(jobAllocs) > 0 {
allocs = make([]string, len(jobAllocs)+1)
allocs[0] = "ID|Eval ID|Node ID|Task Group|Desired|Status|Created At"
for i, alloc := range jobAllocs {
allocs[i+1] = fmt.Sprintf("%s|%s|%s|%s|%s|%s|%s",
limit(alloc.ID, c.length),
limit(alloc.EvalID, c.length),
limit(alloc.NodeID, c.length),
alloc.TaskGroup,
alloc.DesiredStatus,
alloc.ClientStatus,
formatUnixNanoTime(alloc.CreateTime))
}
c.Ui.Output(formatList(allocs))
} else {
c.Ui.Output("No allocations placed")
}
return nil
}
func (c *StatusCommand) outputFailedPlacements(failedEval *api.Evaluation) {
if failedEval == nil || len(failedEval.FailedTGAllocs) == 0 {
return
}
c.Ui.Output(c.Colorize().Color("\n[bold]Placement Failure[reset]"))
sorted := sortedTaskGroupFromMetrics(failedEval.FailedTGAllocs)
for i, tg := range sorted {
if i >= maxFailedTGs {
break
}
c.Ui.Output(fmt.Sprintf("Task Group %q:", tg))
metrics := failedEval.FailedTGAllocs[tg]
c.Ui.Output(formatAllocMetrics(metrics, false, " "))
if i != len(sorted)-1 {
c.Ui.Output("")
}
}
if len(sorted) > maxFailedTGs {
trunc := fmt.Sprintf("\nPlacement failures truncated. To see remainder run:\nnomad eval-status %s", failedEval.ID)
c.Ui.Output(trunc)
}
}
// convertApiJob is used to take a *api.Job and convert it to an *struct.Job.
// This function is just a hammer and probably needs to be revisited.
func convertApiJob(in *api.Job) (*structs.Job, error) {
gob.Register(map[string]interface{}{})
gob.Register([]interface{}{})
var structJob *structs.Job
buf := new(bytes.Buffer)
if err := gob.NewEncoder(buf).Encode(in); err != nil {
return nil, err
}
if err := gob.NewDecoder(buf).Decode(&structJob); err != nil {
return nil, err
}
return structJob, nil
}
// list general information about a list of jobs
func createStatusListOutput(jobs []*api.JobListStub) string {
out := make([]string, len(jobs)+1)
out[0] = "ID|Type|Priority|Status"
for i, job := range jobs {
out[i+1] = fmt.Sprintf("%s|%s|%d|%s",
job.ID,
job.Type,
job.Priority,
job.Status)
}
return formatList(out)
}

View File

@ -1,154 +0,0 @@
package command
import (
"fmt"
"strings"
)
type StopCommand struct {
Meta
}
func (c *StopCommand) Help() string {
helpText := `
Usage: nomad stop [options] <job>
Stop an existing job. This command is used to signal allocations
to shut down for the given job ID. Upon successful deregistraion,
an interactive monitor session will start to display log lines as
the job unwinds its allocations and completes shutting down. It
is safe to exit the monitor early using ctrl+c.
General Options:
` + generalOptionsUsage() + `
Stop Options:
-detach
Return immediately instead of entering monitor mode. After the
deregister command is submitted, a new evaluation ID is printed to the
screen, which can be used to examine the evaluation using the eval-status
command.
-yes
Automatic yes to prompts.
-verbose
Display full information.
`
return strings.TrimSpace(helpText)
}
func (c *StopCommand) Synopsis() string {
return "Stop a running job"
}
func (c *StopCommand) Run(args []string) int {
var detach, verbose, autoYes bool
flags := c.Meta.FlagSet("stop", FlagSetClient)
flags.Usage = func() { c.Ui.Output(c.Help()) }
flags.BoolVar(&detach, "detach", false, "")
flags.BoolVar(&verbose, "verbose", false, "")
flags.BoolVar(&autoYes, "yes", false, "")
if err := flags.Parse(args); err != nil {
return 1
}
// Truncate the id unless full length is requested
length := shortId
if verbose {
length = fullId
}
// Check that we got exactly one job
args = flags.Args()
if len(args) != 1 {
c.Ui.Error(c.Help())
return 1
}
jobID := args[0]
// Get the HTTP client
client, err := c.Meta.Client()
if err != nil {
c.Ui.Error(fmt.Sprintf("Error initializing client: %s", err))
return 1
}
// Check if the job exists
jobs, _, err := client.Jobs().PrefixList(jobID)
if err != nil {
c.Ui.Error(fmt.Sprintf("Error deregistering job: %s", err))
return 1
}
if len(jobs) == 0 {
c.Ui.Error(fmt.Sprintf("No job(s) with prefix or id %q found", jobID))
return 1
}
if len(jobs) > 1 && strings.TrimSpace(jobID) != jobs[0].ID {
out := make([]string, len(jobs)+1)
out[0] = "ID|Type|Priority|Status"
for i, job := range jobs {
out[i+1] = fmt.Sprintf("%s|%s|%d|%s",
job.ID,
job.Type,
job.Priority,
job.Status)
}
c.Ui.Output(fmt.Sprintf("Prefix matched multiple jobs\n\n%s", formatList(out)))
return 0
}
// Prefix lookup matched a single job
job, _, err := client.Jobs().Info(jobs[0].ID, nil)
if err != nil {
c.Ui.Error(fmt.Sprintf("Error deregistering job: %s", err))
return 1
}
// Confirm the stop if the job was a prefix match.
if jobID != job.ID && !autoYes {
question := fmt.Sprintf("Are you sure you want to stop job %q? [y/N]", job.ID)
answer, err := c.Ui.Ask(question)
if err != nil {
c.Ui.Error(fmt.Sprintf("Failed to parse answer: %v", err))
return 1
}
if answer == "" || strings.ToLower(answer)[0] == 'n' {
// No case
c.Ui.Output("Cancelling job stop")
return 0
} else if strings.ToLower(answer)[0] == 'y' && len(answer) > 1 {
// Non exact match yes
c.Ui.Output("For confirmation, an exact y is required.")
return 0
} else if answer != "y" {
c.Ui.Output("No confirmation detected. For confirmation, an exact 'y' is required.")
return 1
}
}
// Invoke the stop
evalID, _, err := client.Jobs().Deregister(job.ID, nil)
if err != nil {
c.Ui.Error(fmt.Sprintf("Error deregistering job: %s", err))
return 1
}
// If we are stopping a periodic job there won't be an evalID.
if evalID == "" {
return 0
}
if detach {
c.Ui.Output(evalID)
return 0
}
// Start monitoring the stop eval
mon := newMonitor(c.Ui, client, length)
return mon.monitor(evalID, false)
}

View File

@ -1,44 +0,0 @@
package command
import (
"os"
"strings"
"github.com/hashicorp/go-plugin"
"github.com/hashicorp/nomad/client/driver"
)
type SyslogPluginCommand struct {
Meta
}
func (e *SyslogPluginCommand) Help() string {
helpText := `
This is a command used by Nomad internally to launch a syslog collector"
`
return strings.TrimSpace(helpText)
}
func (s *SyslogPluginCommand) Synopsis() string {
return "internal - lanch a syslog collector plugin"
}
func (s *SyslogPluginCommand) Run(args []string) int {
if len(args) == 0 {
s.Ui.Error("log output file isn't provided")
return 1
}
logFileName := args[0]
stdo, err := os.OpenFile(logFileName, os.O_CREATE|os.O_RDWR|os.O_APPEND, 0666)
if err != nil {
s.Ui.Error(err.Error())
return 1
}
plugin.Serve(&plugin.ServeConfig{
HandshakeConfig: driver.HandshakeConfig,
Plugins: driver.GetPluginMap(stdo),
})
return 0
}

View File

@ -1,3 +0,0 @@
import sys
sys.exit(int(sys.argv[1]))

View File

@ -1,64 +0,0 @@
package command
import (
"fmt"
"strings"
)
type ValidateCommand struct {
Meta
JobGetter
}
func (c *ValidateCommand) Help() string {
helpText := `
Usage: nomad validate [options] <file>
Checks if a given HCL job file has a valid specification. This can be used to
check for any syntax errors or validation problems with a job.
If the supplied path is "-", the jobfile is read from stdin. Otherwise
it is read from the file at the supplied path or downloaded and
read from URL specified.
`
return strings.TrimSpace(helpText)
}
func (c *ValidateCommand) Synopsis() string {
return "Checks if a given job specification is valid"
}
func (c *ValidateCommand) Run(args []string) int {
flags := c.Meta.FlagSet("validate", FlagSetNone)
flags.Usage = func() { c.Ui.Output(c.Help()) }
if err := flags.Parse(args); err != nil {
return 1
}
// Check that we got exactly one node
args = flags.Args()
if len(args) != 1 {
c.Ui.Error(c.Help())
return 1
}
// Get Job struct from Jobfile
job, err := c.JobGetter.StructJob(args[0])
if err != nil {
c.Ui.Error(fmt.Sprintf("Error getting job struct: %s", err))
return 1
}
// Initialize any fields that need to be.
job.Canonicalize()
// Check that the job is valid
if err := job.Validate(); err != nil {
c.Ui.Error(fmt.Sprintf("Error validating job: %s", err))
return 1
}
// Done!
c.Ui.Output("Job validation successful")
return 0
}

View File

@ -1,40 +0,0 @@
package command
import (
"bytes"
"fmt"
"github.com/mitchellh/cli"
)
// VersionCommand is a Command implementation prints the version.
type VersionCommand struct {
Revision string
Version string
VersionPrerelease string
Ui cli.Ui
}
func (c *VersionCommand) Help() string {
return ""
}
func (c *VersionCommand) Run(_ []string) int {
var versionString bytes.Buffer
fmt.Fprintf(&versionString, "Nomad v%s", c.Version)
if c.VersionPrerelease != "" {
fmt.Fprintf(&versionString, "-%s", c.VersionPrerelease)
if c.Revision != "" {
fmt.Fprintf(&versionString, " (%s)", c.Revision)
}
}
c.Ui.Output(versionString.String())
return 0
}
func (c *VersionCommand) Synopsis() string {
return "Prints the Nomad version"
}

View File

@ -1,166 +0,0 @@
package main
import (
"os"
"github.com/hashicorp/nomad/command"
"github.com/hashicorp/nomad/command/agent"
"github.com/mitchellh/cli"
)
// Commands returns the mapping of CLI commands for Nomad. The meta
// parameter lets you set meta options for all commands.
func Commands(metaPtr *command.Meta) map[string]cli.CommandFactory {
if metaPtr == nil {
metaPtr = new(command.Meta)
}
meta := *metaPtr
if meta.Ui == nil {
meta.Ui = &cli.BasicUi{
Reader: os.Stdin,
Writer: os.Stdout,
ErrorWriter: os.Stderr,
}
}
return map[string]cli.CommandFactory{
"alloc-status": func() (cli.Command, error) {
return &command.AllocStatusCommand{
Meta: meta,
}, nil
},
"agent": func() (cli.Command, error) {
return &agent.Command{
Revision: GitCommit,
Version: Version,
VersionPrerelease: VersionPrerelease,
Ui: meta.Ui,
ShutdownCh: make(chan struct{}),
}, nil
},
"agent-info": func() (cli.Command, error) {
return &command.AgentInfoCommand{
Meta: meta,
}, nil
},
"check": func() (cli.Command, error) {
return &command.AgentCheckCommand{
Meta: meta,
}, nil
},
"client-config": func() (cli.Command, error) {
return &command.ClientConfigCommand{
Meta: meta,
}, nil
},
"eval-status": func() (cli.Command, error) {
return &command.EvalStatusCommand{
Meta: meta,
}, nil
},
"executor": func() (cli.Command, error) {
return &command.ExecutorPluginCommand{
Meta: meta,
}, nil
},
"fs": func() (cli.Command, error) {
return &command.FSCommand{
Meta: meta,
}, nil
},
"init": func() (cli.Command, error) {
return &command.InitCommand{
Meta: meta,
}, nil
},
"inspect": func() (cli.Command, error) {
return &command.InspectCommand{
Meta: meta,
}, nil
},
"logs": func() (cli.Command, error) {
return &command.LogsCommand{
Meta: meta,
}, nil
},
"node-drain": func() (cli.Command, error) {
return &command.NodeDrainCommand{
Meta: meta,
}, nil
},
"node-status": func() (cli.Command, error) {
return &command.NodeStatusCommand{
Meta: meta,
}, nil
},
"plan": func() (cli.Command, error) {
return &command.PlanCommand{
Meta: meta,
}, nil
},
"run": func() (cli.Command, error) {
return &command.RunCommand{
Meta: meta,
}, nil
},
"syslog": func() (cli.Command, error) {
return &command.SyslogPluginCommand{
Meta: meta,
}, nil
},
"server-force-leave": func() (cli.Command, error) {
return &command.ServerForceLeaveCommand{
Meta: meta,
}, nil
},
"server-join": func() (cli.Command, error) {
return &command.ServerJoinCommand{
Meta: meta,
}, nil
},
"server-members": func() (cli.Command, error) {
return &command.ServerMembersCommand{
Meta: meta,
}, nil
},
"status": func() (cli.Command, error) {
return &command.StatusCommand{
Meta: meta,
}, nil
},
"stop": func() (cli.Command, error) {
return &command.StopCommand{
Meta: meta,
}, nil
},
"validate": func() (cli.Command, error) {
return &command.ValidateCommand{
Meta: meta,
}, nil
},
"version": func() (cli.Command, error) {
ver := Version
rel := VersionPrerelease
if GitDescribe != "" {
ver = GitDescribe
// Trim off a leading 'v', we append it anyways.
if ver[0] == 'v' {
ver = ver[1:]
}
}
if GitDescribe == "" && rel == "" && VersionPrerelease != "" {
rel = "dev"
}
return &command.VersionCommand{
Revision: GitCommit,
Version: ver,
VersionPrerelease: rel,
Ui: meta.Ui,
}, nil
},
}
}

View File

@ -1,115 +0,0 @@
package main
import (
"fmt"
"io/ioutil"
"os"
"os/exec"
"strconv"
"time"
"github.com/hashicorp/nomad/api"
"github.com/hashicorp/nomad/nomad/structs"
)
func main() {
client, err := api.NewClient(api.DefaultConfig())
if err != nil {
fmt.Println(err.Error())
return
}
total := 0
if len(os.Args) != 2 {
fmt.Println("need 1 arg")
return
}
if total, err = strconv.Atoi(os.Args[1]); err != nil {
fmt.Println("arg 1 must be number")
return
}
fh, err := ioutil.TempFile("", "bench")
if err != nil {
fmt.Println(err.Error())
return
}
defer os.Remove(fh.Name())
jobContent := fmt.Sprintf(job, total)
if _, err := fh.WriteString(jobContent); err != nil {
fmt.Println(err.Error())
return
}
fh.Close()
isRunning := false
allocClient := client.Allocations()
cmd := exec.Command("nomad", "run", fh.Name())
if err := cmd.Run(); err != nil {
fmt.Println("nomad run failed: " + err.Error())
return
}
start := time.Now()
last := 0
fmt.Printf("benchmarking %d allocations\n", total)
opts := &api.QueryOptions{AllowStale: true}
for {
time.Sleep(100 * time.Millisecond)
allocs, _, err := allocClient.List(opts)
if err != nil {
fmt.Println(err.Error())
// keep going to paper over minor errors
continue
}
now := time.Now()
running := 0
for _, alloc := range allocs {
if alloc.ClientStatus == structs.AllocClientStatusRunning {
if !isRunning {
fmt.Printf("time to first running: %s\n", now.Sub(start))
isRunning = true
}
running++
}
}
if last != running {
fmt.Printf("%d running after %s\n", running, now.Sub(start))
}
last = running
if running == total {
return
}
}
}
const job = `
job "bench" {
datacenters = ["ams2", "ams3", "nyc3", "sfo1"]
group "cache" {
count = %d
task "redis" {
driver = "docker"
config {
image = "redis"
}
resources {
cpu = 100
memory = 100
}
}
}
}
`

View File

@ -1,5 +0,0 @@
data_dir = "/opt/nomad"
log_level = "DEBUG"
enable_debug = true
bind_addr = "0.0.0.0"
disable_update_check = true

View File

@ -1,49 +0,0 @@
{
"variables": {
"bin_url": "{{ env `NOMAD_URL` }}"
},
"builders": [
{
"type": "digitalocean",
"image": "ubuntu-12-04-x64",
"region": "nyc3",
"size": "512mb",
"snapshot_name": "nomad-demo-{{timestamp}}"
}
],
"provisioners": [
{
"type": "shell",
"inline": [
"apt-key adv --keyserver hkp://p80.pool.sks-keyservers.net:80 --recv-keys 58118E89F3A912897C070ADBF76221572C52609D",
"echo 'deb https://apt.dockerproject.org/repo ubuntu-precise main' > /etc/apt/sources.list.d/docker.list",
"sudo apt-get -y update",
"sudo apt-get -y install unzip docker-engine curl",
"curl -o /tmp/nomad.zip -L {{ user `bin_url` }}",
"sudo unzip -d /usr/local/bin /tmp/nomad.zip",
"mkdir -p /usr/local/etc/nomad"
]
},
{
"type": "file",
"source": "upstart.nomad",
"destination": "/etc/init/nomad.conf"
},
{
"type": "file",
"source": "default.hcl",
"destination": "/usr/local/etc/nomad/nomad.hcl"
}
],
"post-processors": [
{
"type": "atlas",
"artifact": "hashicorp/nomad-demo",
"artifact_type": "digitalocean.image"
}
],
"push": {
"name": "hashicorp/nomad-demo",
"vcs": true
}
}

View File

@ -1,12 +0,0 @@
description "Nomad by HashiCorp"
start on runlevel [2345]
stop on runlevel [!2345]
respawn
script
CONFIG_DIR=/usr/local/etc/nomad
mkdir -p $CONFIG_DIR
exec /usr/local/bin/nomad agent -config $CONFIG_DIR >> /var/log/nomad.log 2>&1
end script

View File

@ -1,2 +0,0 @@
[statsite]
stream_cmd = cat >> /opt/statsite.out

View File

@ -1,50 +0,0 @@
{
"variables": {
"bin_url": "{{ env `STATSITE_URL` }}"
},
"builders": [
{
"type": "digitalocean",
"image": "ubuntu-12-04-x64",
"region": "nyc3",
"size": "512mb",
"snapshot_name": "nomad-demo-statsite-{{timestamp}}"
}
],
"provisioners": [
{
"type": "shell",
"inline": [
"sudo apt-get -y update",
"sudo apt-get -y install unzip build-essential scons",
"curl -o /tmp/statsite.zip -L {{ user `bin_url` }}",
"mkdir -p /tmp/statsite",
"unzip -d /tmp/statsite /tmp/statsite.zip",
"cd /tmp/statsite/* && make",
"mv /tmp/statsite/*/statsite /usr/local/bin",
"rm -rf /tmp/statsite"
]
},
{
"type": "file",
"source": "upstart.statsite",
"destination": "/etc/init/statsite.conf"
},
{
"type": "file",
"source": "default.conf",
"destination": "/usr/local/etc/statsite.conf"
}
],
"post-processors": [
{
"type": "atlas",
"artifact": "hashicorp/nomad-demo-statsite",
"artifact_type": "digitalocean.image"
}
],
"push": {
"name": "hashicorp/nomad-demo-statsite",
"vcs": true
}
}

View File

@ -1,10 +0,0 @@
description "Statsite"
start on runlevel [2345]
stop on runlevel [!2345]
respawn
script
exec /usr/local/bin/statsite -f /usr/local/etc/statsite.conf >> /var/log/statsite.log 2>&1
end script

View File

@ -1,6 +0,0 @@
datacenter = "${datacenter}"
client {
enabled = true
servers = [${join(",", formatlist("\"%s:4647\"", servers))}]
node_class = "linux-64bit"
}

View File

@ -1,35 +0,0 @@
variable "count" {}
variable "image" {}
variable "region" {}
variable "size" { default = "1gb" }
variable "servers" {}
variable "ssh_keys" {}
resource "template_file" "client_config" {
filename = "${path.module}/client.hcl.tpl"
vars {
datacenter = "${var.region}"
servers = "${split(",", var.servers)}"
}
}
resource "digitalocean_droplet" "client" {
image = "${var.image}"
name = "nomad-client-${var.region}-${count.index}"
count = "${var.count}"
size = "${var.size}"
region = "${var.region}"
ssh_keys = ["${split(",", var.ssh_keys)}"]
provisioner "remote-exec" {
inline = <<CMD
cat > /usr/local/etc/nomad/client.hcl <<EOF
${template_file.client_config.rendered}
EOF
CMD
}
provisioner "remote-exec" {
inline = "sudo start nomad || sudo restart nomad"
}
}

View File

@ -1,65 +0,0 @@
variable "ssh_keys" {}
resource "atlas_artifact" "nomad-digitalocean" {
name = "hashicorp/nomad-demo"
type = "digitalocean.image"
version = "latest"
}
module "statsite" {
source = "./statsite"
region = "nyc3"
ssh_keys = "${var.ssh_keys}"
}
module "servers" {
source = "./server"
region = "nyc3"
image = "${atlas_artifact.nomad-digitalocean.id}"
ssh_keys = "${var.ssh_keys}"
statsite = "${module.statsite.addr}"
}
module "clients-nyc3" {
source = "./client"
region = "nyc3"
count = 500
image = "${atlas_artifact.nomad-digitalocean.id}"
servers = "${module.servers.addrs}"
ssh_keys = "${var.ssh_keys}"
}
module "clients-ams2" {
source = "./client"
region = "ams2"
count = 500
image = "${atlas_artifact.nomad-digitalocean.id}"
servers = "${module.servers.addrs}"
ssh_keys = "${var.ssh_keys}"
}
module "clients-ams3" {
source = "./client"
region = "ams3"
count = 500
image = "${atlas_artifact.nomad-digitalocean.id}"
servers = "${module.servers.addrs}"
ssh_keys = "${var.ssh_keys}"
}
module "clients-sfo1" {
source = "./client"
region = "sfo1"
count = 500
image = "${atlas_artifact.nomad-digitalocean.id}"
servers = "${module.servers.addrs}"
ssh_keys = "${var.ssh_keys}"
}
output "Nomad Servers" {
value = "${join(" ", split(",", module.servers.addrs))}"
}
output "Statsite Server" {
value = "${module.statsite.addr}"
}

View File

@ -1,54 +0,0 @@
variable "image" {}
variable "region" {}
variable "size" { default = "8gb" }
variable "ssh_keys" {}
variable "statsite" {}
resource "digitalocean_droplet" "server" {
image = "${var.image}"
name = "nomad-server-${var.region}-${count.index}"
count = 3
size = "${var.size}"
region = "${var.region}"
ssh_keys = ["${split(",", var.ssh_keys)}"]
provisioner "remote-exec" {
inline = <<CMD
cat > /usr/local/etc/nomad/server.hcl <<EOF
datacenter = "${var.region}"
server {
enabled = true
bootstrap_expect = 3
}
advertise {
rpc = "${self.ipv4_address}:4647"
serf = "${self.ipv4_address}:4648"
}
telemetry {
statsite_address = "${var.statsite}"
disable_hostname = true
}
EOF
CMD
}
provisioner "remote-exec" {
inline = "sudo start nomad || sudo restart nomad"
}
}
resource "null_resource" "server_join" {
provisioner "local-exec" {
command = <<CMD
join() {
curl -X PUT ${digitalocean_droplet.server.0.ipv4_address}:4646/v1/agent/join?address=$1
}
join ${digitalocean_droplet.server.1.ipv4_address}
join ${digitalocean_droplet.server.2.ipv4_address}
CMD
}
}
output "addrs" {
value = "${join(",", digitalocean_droplet.server.*.ipv4_address)}"
}

View File

@ -1,26 +0,0 @@
variable "size" { default = "1gb" }
variable "region" {}
variable "ssh_keys" {}
resource "atlas_artifact" "statsite-digitalocean" {
name = "hashicorp/nomad-demo-statsite"
type = "digitalocean.image"
version = "latest"
}
resource "digitalocean_droplet" "statsite" {
image = "${atlas_artifact.statsite-digitalocean.id}"
name = "nomad-statsite-${var.region}-${count.index}"
count = 1
size = "${var.size}"
region = "${var.region}"
ssh_keys = ["${split(",", var.ssh_keys)}"]
provisioner "remote-exec" {
inline = "sudo start statsite || true"
}
}
output "addr" {
value = "${digitalocean_droplet.statsite.ipv4_address}:8125"
}

View File

@ -1,4 +0,0 @@
# This is a comma-separated list of SSH key ID's or fingerprints
# available in your DigitalOcean account. These keys will be granted
# SSH access to all of the deployed instances.
ssh_keys = "7b:40:be:5a:9a:90:1f:8a:b6:ec:7e:48:82:ae:73:dc"

View File

@ -1,24 +0,0 @@
# Vagrant Nomad Demo
This Vagrantfile and associated Nomad configuration files are meant
to be used along with the
[getting started guide](https://nomadproject.io/intro/getting-started/install.html).
Follow along with the guide, or just start the Vagrant box with:
$ vagrant up
Once it is finished, you should be able to SSH in and interact with Nomad:
$ vagrant ssh
...
$ nomad
usage: nomad [--version] [--help] <command> [<args>]
Available commands are:
agent Runs a Nomad agent
agent-info Display status information about the local agent
...
To learn more about starting Nomad see the [official site](https://nomadproject.io).

View File

@ -1,46 +0,0 @@
# -*- mode: ruby -*-
# vi: set ft=ruby :
$script = <<SCRIPT
# Update apt and get dependencies
sudo apt-get update
sudo apt-get install -y unzip curl wget vim
# Download Nomad
echo Fetching Nomad...
cd /tmp/
curl -sSL https://releases.hashicorp.com/nomad/0.4.0/nomad_0.4.0_linux_amd64.zip -o nomad.zip
echo Installing Nomad...
unzip nomad.zip
sudo chmod +x nomad
sudo mv nomad /usr/bin/nomad
sudo mkdir -p /etc/nomad.d
sudo chmod a+w /etc/nomad.d
SCRIPT
Vagrant.configure(2) do |config|
config.vm.box = "puphpet/ubuntu1404-x64"
config.vm.hostname = "nomad"
config.vm.provision "shell", inline: $script, privileged: false
config.vm.provision "docker" # Just install it
# Increase memory for Parallels Desktop
config.vm.provider "parallels" do |p, o|
p.memory = "1024"
end
# Increase memory for Virtualbox
config.vm.provider "virtualbox" do |vb|
vb.memory = "1024"
end
# Increase memory for VMware
["vmware_fusion", "vmware_workstation"].each do |p|
config.vm.provider p do |v|
v.vmx["memsize"] = "1024"
end
end
end

View File

@ -1,31 +0,0 @@
# Increase log verbosity
log_level = "DEBUG"
# Setup data dir
data_dir = "/tmp/client1"
enable_debug = true
name = "client1"
# Enable the client
client {
enabled = true
# For demo assume we are talking to server1. For production,
# this should be like "nomad.service.consul:4647" and a system
# like Consul used for service discovery.
servers = ["127.0.0.1:4647"]
node_class = "foo"
options {
"driver.raw_exec.enable" = "1"
}
reserved {
cpu = 500
}
}
# Modify our port to avoid a collision with server1
ports {
http = 5656
}

View File

@ -1,30 +0,0 @@
# Increase log verbosity
log_level = "DEBUG"
# Setup data dir
data_dir = "/tmp/client1"
enable_debug = true
name = "client1"
# Enable the client
client {
enabled = true
# For demo assume we are talking to server1. For production,
# this should be like "nomad.service.consul:4647" and a system
# like Consul used for service discovery.
node_class = "foo"
options {
"driver.raw_exec.enable" = "1"
}
reserved {
cpu = 500
}
}
# Modify our port to avoid a collision with server1
ports {
http = 5656
}

View File

@ -1,25 +0,0 @@
# Increase log verbosity
log_level = "DEBUG"
# Setup data dir
data_dir = "/tmp/client2"
# Enable the client
client {
enabled = true
# For demo assume we are talking to server1. For production,
# this should be like "nomad.service.consul:4647" and a system
# like Consul used for service discovery.
servers = ["127.0.0.1:4647"]
# Set ourselves as thing one
meta {
ssd = "true"
}
}
# Modify our port to avoid a collision with server1 and client1
ports {
http = 5657
}

View File

@ -1,13 +0,0 @@
# Increase log verbosity
log_level = "DEBUG"
# Setup data dir
data_dir = "/tmp/server1"
# Enable the server
server {
enabled = true
# Self-elect, should be 3 or 5 for production
bootstrap_expect = 1
}

View File

@ -1,30 +0,0 @@
# Dist
The `dist` folder contains sample configs for various platforms.
## Conventions
On unixes we will place agent configs under `/etc/nomad` and store data under `/var/lib/nomad/`. You will need to create both of these directories. We assume that `nomad` is installed to `/usr/bin/nomad`.
## Agent Configs
The following example configuration files are provided:
- `server.hcl`
- `client.hcl`
Place one of these under `/etc/nomad` depending on the node's role. You should use `server.hcl` to configure a node as a server (which is responsible for scheduling) or `client.hcl` to configure a node as a client (which is responsible for running workloads).
Read <https://nomadproject.io/docs/agent/config.html> to learn which options are available and how to configure them.
## Upstart
On systems using upstart the basic upstart file under `upstart/nomad.conf` starts and stops the nomad agent. Place it under `/etc/init/nomad.conf`.
You can control Nomad with `start|stop|restart nomad`.
## Systemd
On systems using systemd the basic systemd unit file under `systemd/nomad.service` starts and stops the nomad agent. Place it under `/etc/systemd/system/nomad.service`.
You can control Nomad with `systemctl start|stop|restart nomad`.

View File

@ -1,7 +0,0 @@
bind_addr = "127.0.0.1"
data_dir = "/var/lib/nomad/"
client {
enabled = true
servers = ["10.1.0.1", "10.1.0.2", "10.1.0.3"]
}

View File

@ -1,13 +0,0 @@
bind_addr = "0.0.0.0"
data_dir = "/var/lib/nomad"
advertise {
# This should be the IP of THIS MACHINE and must be routable by every node
# in your cluster
rpc = "1.2.3.4:4647"
}
server {
enabled = true
bootstrap_expect = 3
}

View File

@ -1,11 +0,0 @@
[Unit]
Description=Nomad
Documentation=https://nomadproject.io/docs/
[Service]
ExecStart=/usr/bin/nomad agent -config /etc/nomad
ExecReload=/bin/kill -HUP $MAINPID
LimitNOFILE=65536
[Install]
WantedBy=multi-user.target

View File

@ -1,4 +0,0 @@
start on (filesystem and net-device-up IFACE=lo)
stop on runlevel [!2345]
exec /usr/bin/nomad agent -config /etc/nomad

View File

@ -1,60 +0,0 @@
package discover
import (
"fmt"
"os"
"os/exec"
"path/filepath"
"runtime"
"github.com/kardianos/osext"
)
// Checks the current executable, then $GOPATH/bin, and finally the CWD, in that
// order. If it can't be found, an error is returned.
func NomadExecutable() (string, error) {
nomadExe := "nomad"
if runtime.GOOS == "windows" {
nomadExe = "nomad.exe"
}
// Check the current executable.
bin, err := osext.Executable()
if err != nil {
return "", fmt.Errorf("Failed to determine the nomad executable: %v", err)
}
if filepath.Base(bin) == nomadExe {
return bin, nil
}
// Check the $PATH
if bin, err := exec.LookPath(nomadExe); err == nil {
return bin, nil
}
// Check the $GOPATH.
bin = filepath.Join(os.Getenv("GOPATH"), "bin", nomadExe)
if _, err := os.Stat(bin); err == nil {
return bin, nil
}
// Check the CWD.
pwd, err := os.Getwd()
if err != nil {
return "", fmt.Errorf("Could not find Nomad executable (%v): %v", nomadExe, err)
}
bin = filepath.Join(pwd, nomadExe)
if _, err := os.Stat(bin); err == nil {
return bin, nil
}
// Check CWD/bin
bin = filepath.Join(pwd, "bin", nomadExe)
if _, err := os.Stat(bin); err == nil {
return bin, nil
}
return "", fmt.Errorf("Could not find Nomad executable (%v)", nomadExe)
}

View File

@ -1,169 +0,0 @@
package fields
import (
"fmt"
"github.com/hashicorp/go-multierror"
"github.com/mitchellh/mapstructure"
)
// FieldData contains the raw data and the schema that the data should adhere to
type FieldData struct {
Raw map[string]interface{}
Schema map[string]*FieldSchema
}
// Validate cycles through the raw data and validates conversions in the schema.
// It also checks for the existence and value of required fields.
func (d *FieldData) Validate() error {
var result *multierror.Error
// Scan for missing required fields
for field, schema := range d.Schema {
if schema.Required {
_, ok := d.Raw[field]
if !ok {
result = multierror.Append(result, fmt.Errorf(
"field %q is required", field))
}
}
}
// Validate field type and value
for field, value := range d.Raw {
schema, ok := d.Schema[field]
if !ok {
result = multierror.Append(result, fmt.Errorf(
"%q is an invalid field", field))
continue
}
switch schema.Type {
case TypeBool, TypeInt, TypeMap, TypeArray, TypeString:
val, _, err := d.getPrimitive(field, schema)
if err != nil {
result = multierror.Append(result, fmt.Errorf(
"field %q with input %q doesn't seem to be of type %s",
field, value, schema.Type))
}
// Check that we don't have an empty value for required fields
if schema.Required && val == schema.Type.Zero() {
result = multierror.Append(result, fmt.Errorf(
"field %q is required, but no value was found", field))
}
default:
result = multierror.Append(result, fmt.Errorf(
"unknown field type %s for field %s", schema.Type, field))
}
}
return result.ErrorOrNil()
}
// Get gets the value for the given field. If the key is an invalid field,
// FieldData will panic. If you want a safer version of this method, use
// GetOk. If the field k is not set, the default value (if set) will be
// returned, otherwise the zero value will be returned.
func (d *FieldData) Get(k string) interface{} {
schema, ok := d.Schema[k]
if !ok {
panic(fmt.Sprintf("field %s not in the schema", k))
}
value, ok := d.GetOk(k)
if !ok {
value = schema.DefaultOrZero()
}
return value
}
// GetOk gets the value for the given field. The second return value
// will be false if the key is invalid or the key is not set at all.
func (d *FieldData) GetOk(k string) (interface{}, bool) {
schema, ok := d.Schema[k]
if !ok {
return nil, false
}
result, ok, err := d.GetOkErr(k)
if err != nil {
panic(fmt.Sprintf("error reading %s: %s", k, err))
}
if ok && result == nil {
result = schema.DefaultOrZero()
}
return result, ok
}
// GetOkErr is the most conservative of all the Get methods. It returns
// whether key is set or not, but also an error value. The error value is
// non-nil if the field doesn't exist or there was an error parsing the
// field value.
func (d *FieldData) GetOkErr(k string) (interface{}, bool, error) {
schema, ok := d.Schema[k]
if !ok {
return nil, false, fmt.Errorf("unknown field: %s", k)
}
switch schema.Type {
case TypeBool, TypeInt, TypeMap, TypeArray, TypeString:
return d.getPrimitive(k, schema)
default:
return nil, false,
fmt.Errorf("unknown field type %s for field %s", schema.Type, k)
}
}
// getPrimitive tries to convert the raw value of a field to its data type as
// defined in the schema. It does strict type checking, so the value will need
// to be able to convert to the appropriate type directly.
func (d *FieldData) getPrimitive(
k string, schema *FieldSchema) (interface{}, bool, error) {
raw, ok := d.Raw[k]
if !ok {
return nil, false, nil
}
switch schema.Type {
case TypeBool:
var result bool
if err := mapstructure.Decode(raw, &result); err != nil {
return nil, true, err
}
return result, true, nil
case TypeInt:
var result int
if err := mapstructure.Decode(raw, &result); err != nil {
return nil, true, err
}
return result, true, nil
case TypeString:
var result string
if err := mapstructure.Decode(raw, &result); err != nil {
return nil, true, err
}
return result, true, nil
case TypeMap:
var result map[string]interface{}
if err := mapstructure.Decode(raw, &result); err != nil {
return nil, true, err
}
return result, true, nil
case TypeArray:
var result []interface{}
if err := mapstructure.Decode(raw, &result); err != nil {
return nil, true, err
}
return result, true, nil
default:
panic(fmt.Sprintf("Unknown type: %s", schema.Type))
}
}

View File

@ -1,19 +0,0 @@
package fields
// FieldSchema is a basic schema to describe the format of a configuration field
type FieldSchema struct {
Type FieldType
Default interface{}
Description string
Required bool
}
// DefaultOrZero returns the default value if it is set, or otherwise
// the zero value of the type.
func (s *FieldSchema) DefaultOrZero() interface{} {
if s.Default != nil {
return s.Default
}
return s.Type.Zero()
}

View File

@ -1,47 +0,0 @@
package fields
// FieldType is the enum of types that a field can be.
type FieldType uint
const (
TypeInvalid FieldType = 0
TypeString FieldType = iota
TypeInt
TypeBool
TypeMap
TypeArray
)
func (t FieldType) String() string {
switch t {
case TypeString:
return "string"
case TypeInt:
return "integer"
case TypeBool:
return "boolean"
case TypeMap:
return "map"
case TypeArray:
return "array"
default:
return "unknown type"
}
}
func (t FieldType) Zero() interface{} {
switch t {
case TypeString:
return ""
case TypeInt:
return 0
case TypeBool:
return false
case TypeMap:
return map[string]interface{}{}
case TypeArray:
return []interface{}{}
default:
panic("unknown type: " + t.String())
}
}

View File

@ -1,16 +0,0 @@
package sliceflag
import "strings"
// StringFlag implements the flag.Value interface and allows multiple
// calls to the same variable to append a list.
type StringFlag []string
func (s *StringFlag) String() string {
return strings.Join(*s, ",")
}
func (s *StringFlag) Set(value string) error {
*s = append(*s, value)
return nil
}

View File

@ -1,43 +0,0 @@
package gatedwriter
import (
"io"
"sync"
)
// Writer is an io.Writer implementation that buffers all of its
// data into an internal buffer until it is told to let data through.
type Writer struct {
Writer io.Writer
buf [][]byte
flush bool
lock sync.RWMutex
}
// Flush tells the Writer to flush any buffered data and to stop
// buffering.
func (w *Writer) Flush() {
w.lock.Lock()
w.flush = true
w.lock.Unlock()
for _, p := range w.buf {
w.Write(p)
}
w.buf = nil
}
func (w *Writer) Write(p []byte) (n int, err error) {
w.lock.RLock()
defer w.lock.RUnlock()
if w.flush {
return w.Writer.Write(p)
}
p2 := make([]byte, len(p))
copy(p2, p)
w.buf = append(w.buf, p2)
return len(p), nil
}

View File

@ -1,118 +0,0 @@
// Package testtask implements a portable set of commands useful as stand-ins
// for user tasks.
package testtask
import (
"fmt"
"io/ioutil"
"os"
"os/exec"
"time"
"github.com/hashicorp/nomad/client/driver/env"
"github.com/hashicorp/nomad/nomad/structs"
"github.com/kardianos/osext"
)
// Path returns the path to the currently running executable.
func Path() string {
path, err := osext.Executable()
if err != nil {
panic(err)
}
return path
}
// SetEnv configures the environment of the task so that Run executes a testtask
// script when called from within cmd.
func SetEnv(env *env.TaskEnvironment) {
env.AppendEnvvars(map[string]string{"TEST_TASK": "execute"})
}
// SetCmdEnv configures the environment of cmd so that Run executes a testtask
// script when called from within cmd.
func SetCmdEnv(cmd *exec.Cmd) {
cmd.Env = append(os.Environ(), "TEST_TASK=execute")
}
// SetTaskEnv configures the environment of t so that Run executes a testtask
// script when called from within t.
func SetTaskEnv(t *structs.Task) {
if t.Env == nil {
t.Env = map[string]string{}
}
t.Env["TEST_TASK"] = "execute"
}
// Run interprets os.Args as a testtask script if the current program was
// launched with an environment configured by SetCmdEnv or SetTaskEnv. It
// returns false if the environment was not set by this package.
func Run() bool {
switch tm := os.Getenv("TEST_TASK"); tm {
case "":
return false
case "execute":
execute()
return true
default:
fmt.Fprintf(os.Stderr, "unexpected value for TEST_TASK, \"%s\"\n", tm)
os.Exit(1)
return true
}
}
func execute() {
if len(os.Args) < 2 {
fmt.Fprintln(os.Stderr, "no command provided")
os.Exit(1)
}
args := os.Args[1:]
// popArg removes the first argument from args and returns it.
popArg := func() string {
s := args[0]
args = args[1:]
return s
}
// execute a sequence of operations from args
for len(args) > 0 {
switch cmd := popArg(); cmd {
case "sleep":
// sleep <dur>: sleep for a duration indicated by the first
// argument
if len(args) < 1 {
fmt.Fprintln(os.Stderr, "expected arg for sleep")
os.Exit(1)
}
dur, err := time.ParseDuration(popArg())
if err != nil {
fmt.Fprintf(os.Stderr, "could not parse sleep time: %v", err)
os.Exit(1)
}
time.Sleep(dur)
case "echo":
// echo <msg>: write the msg followed by a newline to stdout.
fmt.Println(popArg())
case "write":
// write <msg> <file>: write a message to a file. The first
// argument is the msg. The second argument is the path to the
// target file.
if len(args) < 2 {
fmt.Fprintln(os.Stderr, "expected two args for write")
os.Exit(1)
}
msg := popArg()
file := popArg()
ioutil.WriteFile(file, []byte(msg), 0666)
default:
fmt.Fprintln(os.Stderr, "unknown command:", cmd)
os.Exit(1)
}
}
}

View File

@ -100,6 +100,7 @@ func parseJob(result *structs.Job, list *ast.ObjectList) error {
delete(m, "meta")
delete(m, "update")
delete(m, "periodic")
delete(m, "vault")
// Set the ID and name to the object key
result.ID = obj.Keys[0].Token.Value().(string)
@ -138,6 +139,8 @@ func parseJob(result *structs.Job, list *ast.ObjectList) error {
"meta",
"task",
"group",
"vault",
"vault_token",
}
if err := checkHCLKeys(listVal, valid); err != nil {
return multierror.Prefix(err, "job:")
@ -188,9 +191,10 @@ func parseJob(result *structs.Job, list *ast.ObjectList) error {
result.TaskGroups = make([]*structs.TaskGroup, len(tasks), len(tasks)*2)
for i, t := range tasks {
result.TaskGroups[i] = &structs.TaskGroup{
Name: t.Name,
Count: 1,
Tasks: []*structs.Task{t},
Name: t.Name,
Count: 1,
EphemeralDisk: structs.DefaultEphemeralDisk(),
Tasks: []*structs.Task{t},
}
}
}
@ -202,6 +206,23 @@ func parseJob(result *structs.Job, list *ast.ObjectList) error {
}
}
// If we have a vault block, then parse that
if o := listVal.Filter("vault"); len(o.Items) > 0 {
jobVault := structs.DefaultVaultBlock()
if err := parseVault(jobVault, o); err != nil {
return multierror.Prefix(err, "vault ->")
}
// Go through the task groups/tasks and if they don't have a Vault block, set it
for _, tg := range result.TaskGroups {
for _, task := range tg.Tasks {
if task.Vault == nil {
task.Vault = jobVault
}
}
}
}
return nil
}
@ -238,6 +259,8 @@ func parseGroups(result *structs.Job, list *ast.ObjectList) error {
"restart",
"meta",
"task",
"ephemeral_disk",
"vault",
}
if err := checkHCLKeys(listVal, valid); err != nil {
return multierror.Prefix(err, fmt.Sprintf("'%s' ->", n))
@ -251,6 +274,8 @@ func parseGroups(result *structs.Job, list *ast.ObjectList) error {
delete(m, "meta")
delete(m, "task")
delete(m, "restart")
delete(m, "ephemeral_disk")
delete(m, "vault")
// Default count to 1 if not specified
if _, ok := m["count"]; !ok {
@ -278,6 +303,14 @@ func parseGroups(result *structs.Job, list *ast.ObjectList) error {
}
}
// Parse ephemeral disk
g.EphemeralDisk = structs.DefaultEphemeralDisk()
if o := listVal.Filter("ephemeral_disk"); len(o.Items) > 0 {
if err := parseEphemeralDisk(&g.EphemeralDisk, o); err != nil {
return multierror.Prefix(err, fmt.Sprintf("'%s', ephemeral_disk ->", n))
}
}
// Parse out meta fields. These are in HCL as a list so we need
// to iterate over them and merge them.
if metaO := listVal.Filter("meta"); len(metaO.Items) > 0 {
@ -299,6 +332,21 @@ func parseGroups(result *structs.Job, list *ast.ObjectList) error {
}
}
// If we have a vault block, then parse that
if o := listVal.Filter("vault"); len(o.Items) > 0 {
tgVault := structs.DefaultVaultBlock()
if err := parseVault(tgVault, o); err != nil {
return multierror.Prefix(err, fmt.Sprintf("'%s', vault ->", n))
}
// Go through the tasks and if they don't have a Vault block, set it
for _, task := range g.Tasks {
if task.Vault == nil {
task.Vault = tgVault
}
}
}
collection = append(collection, &g)
}
@ -358,6 +406,7 @@ func parseConstraints(result *[]*structs.Constraint, list *ast.ObjectList) error
"version",
"regexp",
"distinct_hosts",
"set_contains",
}
if err := checkHCLKeys(o.Val, valid); err != nil {
return err
@ -386,6 +435,13 @@ func parseConstraints(result *[]*structs.Constraint, list *ast.ObjectList) error
m["RTarget"] = constraint
}
// If "set_contains" is provided, set the operand
// to "set_contains" and the value to the "RTarget"
if constraint, ok := m[structs.ConstraintSetContains]; ok {
m["Operand"] = structs.ConstraintSetContains
m["RTarget"] = constraint
}
if value, ok := m[structs.ConstraintDistinctHosts]; ok {
enabled, err := parseBool(value)
if err != nil {
@ -415,6 +471,39 @@ func parseConstraints(result *[]*structs.Constraint, list *ast.ObjectList) error
return nil
}
func parseEphemeralDisk(result **structs.EphemeralDisk, list *ast.ObjectList) error {
list = list.Elem()
if len(list.Items) > 1 {
return fmt.Errorf("only one 'ephemeral_disk' block allowed")
}
// Get our ephemeral_disk object
obj := list.Items[0]
// Check for invalid keys
valid := []string{
"sticky",
"size",
"migrate",
}
if err := checkHCLKeys(obj.Val, valid); err != nil {
return err
}
var m map[string]interface{}
if err := hcl.DecodeObject(&m, obj.Val); err != nil {
return err
}
var ephemeralDisk structs.EphemeralDisk
if err := mapstructure.WeakDecode(m, &ephemeralDisk); err != nil {
return err
}
*result = &ephemeralDisk
return nil
}
// parseBool takes an interface value and tries to convert it to a boolean and
// returns an error if the type can't be converted.
func parseBool(value interface{}) (bool, error) {
@ -459,17 +548,19 @@ func parseTasks(jobName string, taskGroupName string, result *[]*structs.Task, l
// Check for invalid keys
valid := []string{
"driver",
"user",
"env",
"service",
"artifact",
"config",
"constraint",
"driver",
"env",
"kill_timeout",
"logs",
"meta",
"resources",
"logs",
"kill_timeout",
"artifact",
"service",
"template",
"user",
"vault",
}
if err := checkHCLKeys(listVal, valid); err != nil {
return multierror.Prefix(err, fmt.Sprintf("'%s' ->", n))
@ -479,14 +570,16 @@ func parseTasks(jobName string, taskGroupName string, result *[]*structs.Task, l
if err := hcl.DecodeObject(&m, item.Val); err != nil {
return err
}
delete(m, "artifact")
delete(m, "config")
delete(m, "env")
delete(m, "constraint")
delete(m, "service")
delete(m, "env")
delete(m, "logs")
delete(m, "meta")
delete(m, "resources")
delete(m, "logs")
delete(m, "artifact")
delete(m, "service")
delete(m, "template")
delete(m, "vault")
// Build the task
var t structs.Task
@ -606,6 +699,23 @@ func parseTasks(jobName string, taskGroupName string, result *[]*structs.Task, l
}
}
// Parse templates
if o := listVal.Filter("template"); len(o.Items) > 0 {
if err := parseTemplates(&t.Templates, o); err != nil {
return multierror.Prefix(err, fmt.Sprintf("'%s', template ->", n))
}
}
// If we have a vault block, then parse that
if o := listVal.Filter("vault"); len(o.Items) > 0 {
v := structs.DefaultVaultBlock()
if err := parseVault(v, o); err != nil {
return multierror.Prefix(err, fmt.Sprintf("'%s', vault ->", n))
}
t.Vault = v
}
*result = append(*result, &t)
}
@ -683,6 +793,46 @@ func parseArtifactOption(result map[string]string, list *ast.ObjectList) error {
return nil
}
func parseTemplates(result *[]*structs.Template, list *ast.ObjectList) error {
for _, o := range list.Elem().Items {
// Check for invalid keys
valid := []string{
"source",
"destination",
"data",
"change_mode",
"change_signal",
"splay",
"once",
}
if err := checkHCLKeys(o.Val, valid); err != nil {
return err
}
var m map[string]interface{}
if err := hcl.DecodeObject(&m, o.Val); err != nil {
return err
}
templ := structs.DefaultTemplate()
dec, err := mapstructure.NewDecoder(&mapstructure.DecoderConfig{
DecodeHook: mapstructure.StringToTimeDurationHookFunc(),
WeaklyTypedInput: true,
Result: templ,
})
if err != nil {
return err
}
if err := dec.Decode(m); err != nil {
return err
}
*result = append(*result, templ)
}
return nil
}
func parseServices(jobName string, taskGroupName string, task *structs.Task, serviceObjs *ast.ObjectList) error {
task.Services = make([]*structs.Service, len(serviceObjs.Items))
var defaultServiceName bool
@ -805,8 +955,8 @@ func parseResources(result *structs.Resources, list *ast.ObjectList) error {
// Check for invalid keys
valid := []string{
"cpu",
"disk",
"iops",
"disk",
"memory",
"network",
}
@ -995,6 +1145,49 @@ func parsePeriodic(result **structs.PeriodicConfig, list *ast.ObjectList) error
return nil
}
func parseVault(result *structs.Vault, list *ast.ObjectList) error {
list = list.Elem()
if len(list.Items) == 0 {
return nil
}
if len(list.Items) > 1 {
return fmt.Errorf("only one 'vault' block allowed per task")
}
// Get our resource object
o := list.Items[0]
// We need this later
var listVal *ast.ObjectList
if ot, ok := o.Val.(*ast.ObjectType); ok {
listVal = ot.List
} else {
return fmt.Errorf("vault: should be an object")
}
// Check for invalid keys
valid := []string{
"policies",
"env",
"change_mode",
"change_signal",
}
if err := checkHCLKeys(listVal, valid); err != nil {
return multierror.Prefix(err, "vault ->")
}
var m map[string]interface{}
if err := hcl.DecodeObject(&m, o.Val); err != nil {
return err
}
if err := mapstructure.WeakDecode(m, result); err != nil {
return err
}
return nil
}
func checkHCLKeys(node ast.Node, valid []string) error {
var list *ast.ObjectList
switch n := node.(type) {

View File

@ -1,24 +0,0 @@
job "binstore-storagelocker" {
group "binsl" {
task "binstore" {
driver = "docker"
artifact {
source = "http://foo.com/bar"
destination = ""
options {
foo = "bar"
}
}
artifact {
source = "http://foo.com/baz"
}
artifact {
source = "http://foo.com/bam"
destination = "var/foo"
}
resources {}
}
}
}

View File

@ -1,13 +0,0 @@
job "binstore-storagelocker" {
group "binsl" {
count = 5
task "binstore" {
driver = "docker"
artifact {
bad = "bad"
}
resources {}
}
}
}

Some files were not shown because too many files have changed in this diff Show More