Better config test (#177)
* Better config test Previously, when using the config test option `-test`, we quit fairly earlier in the process and would not catch a variety of additional parsing errors (such as lighthouse IP addresses, local_range, the new check to make sure static hosts are in the certificate's subnet, etc). * run config test as part of smoke test * don't need privileges for configtest Co-authored-by: Nathan Brown <nate@slack-corp.com>
This commit is contained in:
parent
b4f2f7ce4e
commit
7cdbb14a18
|
@ -2,6 +2,10 @@
|
||||||
|
|
||||||
set -e -x
|
set -e -x
|
||||||
|
|
||||||
|
docker run --name lighthouse1 --rm nebula:smoke -config lighthouse1.yml -test
|
||||||
|
docker run --name host2 --rm nebula:smoke -config host2.yml -test
|
||||||
|
docker run --name host3 --rm nebula:smoke -config host3.yml -test
|
||||||
|
|
||||||
docker run --name lighthouse1 --device /dev/net/tun:/dev/net/tun --cap-add NET_ADMIN --rm nebula:smoke -config lighthouse1.yml &
|
docker run --name lighthouse1 --device /dev/net/tun:/dev/net/tun --cap-add NET_ADMIN --rm nebula:smoke -config lighthouse1.yml &
|
||||||
sleep 1
|
sleep 1
|
||||||
docker run --name host2 --device /dev/net/tun:/dev/net/tun --cap-add NET_ADMIN --rm nebula:smoke -config host2.yml &
|
docker run --name host2 --device /dev/net/tun:/dev/net/tun --cap-add NET_ADMIN --rm nebula:smoke -config host2.yml &
|
||||||
|
|
72
main.go
72
main.go
|
@ -101,32 +101,35 @@ func Main(configPath string, configTest bool, buildVersion string) {
|
||||||
// tun config, listeners, anything modifying the computer should be below
|
// tun config, listeners, anything modifying the computer should be below
|
||||||
////////////////////////////////////////////////////////////////////////////////////////////////////////////////////
|
////////////////////////////////////////////////////////////////////////////////////////////////////////////////////
|
||||||
|
|
||||||
if configTest {
|
var tun *Tun
|
||||||
os.Exit(0)
|
if !configTest {
|
||||||
}
|
config.CatchHUP()
|
||||||
|
|
||||||
config.CatchHUP()
|
// set up our tun dev
|
||||||
|
tun, err = newTun(
|
||||||
// set up our tun dev
|
config.GetString("tun.dev", ""),
|
||||||
tun, err := newTun(
|
tunCidr,
|
||||||
config.GetString("tun.dev", ""),
|
config.GetInt("tun.mtu", DEFAULT_MTU),
|
||||||
tunCidr,
|
routes,
|
||||||
config.GetInt("tun.mtu", DEFAULT_MTU),
|
unsafeRoutes,
|
||||||
routes,
|
config.GetInt("tun.tx_queue", 500),
|
||||||
unsafeRoutes,
|
)
|
||||||
config.GetInt("tun.tx_queue", 500),
|
if err != nil {
|
||||||
)
|
l.WithError(err).Fatal("Failed to get a tun/tap device")
|
||||||
if err != nil {
|
}
|
||||||
l.WithError(err).Fatal("Failed to get a tun/tap device")
|
|
||||||
}
|
}
|
||||||
|
|
||||||
// set up our UDP listener
|
// set up our UDP listener
|
||||||
udpQueues := config.GetInt("listen.routines", 1)
|
udpQueues := config.GetInt("listen.routines", 1)
|
||||||
udpServer, err := NewListener(config.GetString("listen.host", "0.0.0.0"), config.GetInt("listen.port", 0), udpQueues > 1)
|
var udpServer *udpConn
|
||||||
if err != nil {
|
|
||||||
l.WithError(err).Fatal("Failed to open udp listener")
|
if !configTest {
|
||||||
|
udpServer, err = NewListener(config.GetString("listen.host", "0.0.0.0"), config.GetInt("listen.port", 0), udpQueues > 1)
|
||||||
|
if err != nil {
|
||||||
|
l.WithError(err).Fatal("Failed to open udp listener")
|
||||||
|
}
|
||||||
|
udpServer.reloadConfig(config)
|
||||||
}
|
}
|
||||||
udpServer.reloadConfig(config)
|
|
||||||
|
|
||||||
// Set up my internal host map
|
// Set up my internal host map
|
||||||
var preferredRanges []*net.IPNet
|
var preferredRanges []*net.IPNet
|
||||||
|
@ -178,14 +181,14 @@ func Main(configPath string, configTest bool, buildVersion string) {
|
||||||
*/
|
*/
|
||||||
|
|
||||||
punchy := NewPunchyFromConfig(config)
|
punchy := NewPunchyFromConfig(config)
|
||||||
if punchy.Punch {
|
if punchy.Punch && !configTest {
|
||||||
l.Info("UDP hole punching enabled")
|
l.Info("UDP hole punching enabled")
|
||||||
go hostMap.Punchy(udpServer)
|
go hostMap.Punchy(udpServer)
|
||||||
}
|
}
|
||||||
|
|
||||||
port := config.GetInt("listen.port", 0)
|
port := config.GetInt("listen.port", 0)
|
||||||
// If port is dynamic, discover it
|
// If port is dynamic, discover it
|
||||||
if port == 0 {
|
if port == 0 && !configTest {
|
||||||
uPort, err := udpServer.LocalAddr()
|
uPort, err := udpServer.LocalAddr()
|
||||||
if err != nil {
|
if err != nil {
|
||||||
l.WithError(err).Fatal("Failed to get listening port")
|
l.WithError(err).Fatal("Failed to get listening port")
|
||||||
|
@ -306,21 +309,28 @@ func Main(configPath string, configTest bool, buildVersion string) {
|
||||||
l.Fatalf("Unknown cipher: %v", ifConfig.Cipher)
|
l.Fatalf("Unknown cipher: %v", ifConfig.Cipher)
|
||||||
}
|
}
|
||||||
|
|
||||||
ifce, err := NewInterface(ifConfig)
|
var ifce *Interface
|
||||||
if err != nil {
|
if !configTest {
|
||||||
l.WithError(err).Fatal("Failed to initialize interface")
|
ifce, err = NewInterface(ifConfig)
|
||||||
|
if err != nil {
|
||||||
|
l.WithError(err).Fatal("Failed to initialize interface")
|
||||||
|
}
|
||||||
|
|
||||||
|
ifce.RegisterConfigChangeCallbacks(config)
|
||||||
|
|
||||||
|
go handshakeManager.Run(ifce)
|
||||||
|
go lightHouse.LhUpdateWorker(ifce)
|
||||||
}
|
}
|
||||||
|
|
||||||
ifce.RegisterConfigChangeCallbacks(config)
|
err = startStats(config, configTest)
|
||||||
|
|
||||||
go handshakeManager.Run(ifce)
|
|
||||||
go lightHouse.LhUpdateWorker(ifce)
|
|
||||||
|
|
||||||
err = startStats(config)
|
|
||||||
if err != nil {
|
if err != nil {
|
||||||
l.WithError(err).Fatal("Failed to start stats emitter")
|
l.WithError(err).Fatal("Failed to start stats emitter")
|
||||||
}
|
}
|
||||||
|
|
||||||
|
if configTest {
|
||||||
|
os.Exit(0)
|
||||||
|
}
|
||||||
|
|
||||||
//TODO: check if we _should_ be emitting stats
|
//TODO: check if we _should_ be emitting stats
|
||||||
go ifce.emitStats(config.GetDuration("stats.interval", time.Second*10))
|
go ifce.emitStats(config.GetDuration("stats.interval", time.Second*10))
|
||||||
|
|
||||||
|
|
26
stats.go
26
stats.go
|
@ -14,7 +14,7 @@ import (
|
||||||
"time"
|
"time"
|
||||||
)
|
)
|
||||||
|
|
||||||
func startStats(c *Config) error {
|
func startStats(c *Config, configTest bool) error {
|
||||||
mType := c.GetString("stats.type", "")
|
mType := c.GetString("stats.type", "")
|
||||||
if mType == "" || mType == "none" {
|
if mType == "" || mType == "none" {
|
||||||
return nil
|
return nil
|
||||||
|
@ -27,9 +27,9 @@ func startStats(c *Config) error {
|
||||||
|
|
||||||
switch mType {
|
switch mType {
|
||||||
case "graphite":
|
case "graphite":
|
||||||
startGraphiteStats(interval, c)
|
startGraphiteStats(interval, c, configTest)
|
||||||
case "prometheus":
|
case "prometheus":
|
||||||
startPrometheusStats(interval, c)
|
startPrometheusStats(interval, c, configTest)
|
||||||
default:
|
default:
|
||||||
return fmt.Errorf("stats.type was not understood: %s", mType)
|
return fmt.Errorf("stats.type was not understood: %s", mType)
|
||||||
}
|
}
|
||||||
|
@ -43,7 +43,7 @@ func startStats(c *Config) error {
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
func startGraphiteStats(i time.Duration, c *Config) error {
|
func startGraphiteStats(i time.Duration, c *Config, configTest bool) error {
|
||||||
proto := c.GetString("stats.protocol", "tcp")
|
proto := c.GetString("stats.protocol", "tcp")
|
||||||
host := c.GetString("stats.host", "")
|
host := c.GetString("stats.host", "")
|
||||||
if host == "" {
|
if host == "" {
|
||||||
|
@ -57,11 +57,13 @@ func startGraphiteStats(i time.Duration, c *Config) error {
|
||||||
}
|
}
|
||||||
|
|
||||||
l.Infof("Starting graphite. Interval: %s, prefix: %s, addr: %s", i, prefix, addr)
|
l.Infof("Starting graphite. Interval: %s, prefix: %s, addr: %s", i, prefix, addr)
|
||||||
go graphite.Graphite(metrics.DefaultRegistry, i, prefix, addr)
|
if !configTest {
|
||||||
|
go graphite.Graphite(metrics.DefaultRegistry, i, prefix, addr)
|
||||||
|
}
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
func startPrometheusStats(i time.Duration, c *Config) error {
|
func startPrometheusStats(i time.Duration, c *Config, configTest bool) error {
|
||||||
namespace := c.GetString("stats.namespace", "")
|
namespace := c.GetString("stats.namespace", "")
|
||||||
subsystem := c.GetString("stats.subsystem", "")
|
subsystem := c.GetString("stats.subsystem", "")
|
||||||
|
|
||||||
|
@ -79,11 +81,13 @@ func startPrometheusStats(i time.Duration, c *Config) error {
|
||||||
pClient := mp.NewPrometheusProvider(metrics.DefaultRegistry, namespace, subsystem, pr, i)
|
pClient := mp.NewPrometheusProvider(metrics.DefaultRegistry, namespace, subsystem, pr, i)
|
||||||
go pClient.UpdatePrometheusMetrics()
|
go pClient.UpdatePrometheusMetrics()
|
||||||
|
|
||||||
go func() {
|
if !configTest {
|
||||||
l.Infof("Prometheus stats listening on %s at %s", listen, path)
|
go func() {
|
||||||
http.Handle(path, promhttp.HandlerFor(pr, promhttp.HandlerOpts{ErrorLog: l}))
|
l.Infof("Prometheus stats listening on %s at %s", listen, path)
|
||||||
log.Fatal(http.ListenAndServe(listen, nil))
|
http.Handle(path, promhttp.HandlerFor(pr, promhttp.HandlerOpts{ErrorLog: l}))
|
||||||
}()
|
log.Fatal(http.ListenAndServe(listen, nil))
|
||||||
|
}()
|
||||||
|
}
|
||||||
|
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|
Loading…
Reference in New Issue