update tests for new consul packages
Reuse the running consul server for all tests. Update the lostLockConnection package, since the api client should no longer lose a lock immediately on network errors.
This commit is contained in:
parent
3a03d3683e
commit
cf54ca3b0f
|
@ -15,14 +15,28 @@ func TestBackend_impl(t *testing.T) {
|
||||||
var _ backend.Backend = new(Backend)
|
var _ backend.Backend = new(Backend)
|
||||||
}
|
}
|
||||||
|
|
||||||
func newConsulTestServer(t *testing.T) *testutil.TestServer {
|
var srv *testutil.TestServer
|
||||||
skip := os.Getenv("TF_ACC") == "" && os.Getenv("TF_CONSUL_TEST") == ""
|
|
||||||
if skip {
|
func TestMain(m *testing.M) {
|
||||||
t.Log("consul server tests require setting TF_ACC or TF_CONSUL_TEST")
|
if os.Getenv("TF_ACC") == "" && os.Getenv("TF_CONSUL_TEST") == "" {
|
||||||
t.Skip()
|
fmt.Println("consul server tests require setting TF_ACC or TF_CONSUL_TEST")
|
||||||
|
return
|
||||||
}
|
}
|
||||||
|
|
||||||
srv, err := testutil.NewTestServerConfigT(t, func(c *testutil.TestServerConfig) {
|
var err error
|
||||||
|
srv, err = newConsulTestServer()
|
||||||
|
if err != nil {
|
||||||
|
fmt.Println(err)
|
||||||
|
os.Exit(1)
|
||||||
|
}
|
||||||
|
|
||||||
|
rc := m.Run()
|
||||||
|
srv.Stop()
|
||||||
|
os.Exit(rc)
|
||||||
|
}
|
||||||
|
|
||||||
|
func newConsulTestServer() (*testutil.TestServer, error) {
|
||||||
|
srv, err := testutil.NewTestServerConfig(func(c *testutil.TestServerConfig) {
|
||||||
c.LogLevel = "warn"
|
c.LogLevel = "warn"
|
||||||
|
|
||||||
if !testing.Verbose() {
|
if !testing.Verbose() {
|
||||||
|
@ -31,17 +45,10 @@ func newConsulTestServer(t *testing.T) *testutil.TestServer {
|
||||||
}
|
}
|
||||||
})
|
})
|
||||||
|
|
||||||
if err != nil {
|
return srv, err
|
||||||
t.Fatal(err)
|
|
||||||
}
|
|
||||||
|
|
||||||
return srv
|
|
||||||
}
|
}
|
||||||
|
|
||||||
func TestBackend(t *testing.T) {
|
func TestBackend(t *testing.T) {
|
||||||
srv := newConsulTestServer(t)
|
|
||||||
defer srv.Stop()
|
|
||||||
|
|
||||||
path := fmt.Sprintf("tf-unit/%s", time.Now().String())
|
path := fmt.Sprintf("tf-unit/%s", time.Now().String())
|
||||||
|
|
||||||
// Get the backend. We need two to test locking.
|
// Get the backend. We need two to test locking.
|
||||||
|
@ -60,9 +67,6 @@ func TestBackend(t *testing.T) {
|
||||||
}
|
}
|
||||||
|
|
||||||
func TestBackend_lockDisabled(t *testing.T) {
|
func TestBackend_lockDisabled(t *testing.T) {
|
||||||
srv := newConsulTestServer(t)
|
|
||||||
defer srv.Stop()
|
|
||||||
|
|
||||||
path := fmt.Sprintf("tf-unit/%s", time.Now().String())
|
path := fmt.Sprintf("tf-unit/%s", time.Now().String())
|
||||||
|
|
||||||
// Get the backend. We need two to test locking.
|
// Get the backend. We need two to test locking.
|
||||||
|
@ -83,9 +87,6 @@ func TestBackend_lockDisabled(t *testing.T) {
|
||||||
}
|
}
|
||||||
|
|
||||||
func TestBackend_gzip(t *testing.T) {
|
func TestBackend_gzip(t *testing.T) {
|
||||||
srv := newConsulTestServer(t)
|
|
||||||
defer srv.Stop()
|
|
||||||
|
|
||||||
// Get the backend
|
// Get the backend
|
||||||
b := backend.TestBackendConfig(t, New(), map[string]interface{}{
|
b := backend.TestBackendConfig(t, New(), map[string]interface{}{
|
||||||
"address": srv.HTTPAddr,
|
"address": srv.HTTPAddr,
|
||||||
|
|
|
@ -19,9 +19,6 @@ func TestRemoteClient_impl(t *testing.T) {
|
||||||
}
|
}
|
||||||
|
|
||||||
func TestRemoteClient(t *testing.T) {
|
func TestRemoteClient(t *testing.T) {
|
||||||
srv := newConsulTestServer(t)
|
|
||||||
defer srv.Stop()
|
|
||||||
|
|
||||||
// Get the backend
|
// Get the backend
|
||||||
b := backend.TestBackendConfig(t, New(), map[string]interface{}{
|
b := backend.TestBackendConfig(t, New(), map[string]interface{}{
|
||||||
"address": srv.HTTPAddr,
|
"address": srv.HTTPAddr,
|
||||||
|
@ -40,9 +37,6 @@ func TestRemoteClient(t *testing.T) {
|
||||||
|
|
||||||
// test the gzip functionality of the client
|
// test the gzip functionality of the client
|
||||||
func TestRemoteClient_gzipUpgrade(t *testing.T) {
|
func TestRemoteClient_gzipUpgrade(t *testing.T) {
|
||||||
srv := newConsulTestServer(t)
|
|
||||||
defer srv.Stop()
|
|
||||||
|
|
||||||
statePath := fmt.Sprintf("tf-unit/%s", time.Now().String())
|
statePath := fmt.Sprintf("tf-unit/%s", time.Now().String())
|
||||||
|
|
||||||
// Get the backend
|
// Get the backend
|
||||||
|
@ -78,9 +72,6 @@ func TestRemoteClient_gzipUpgrade(t *testing.T) {
|
||||||
}
|
}
|
||||||
|
|
||||||
func TestConsul_stateLock(t *testing.T) {
|
func TestConsul_stateLock(t *testing.T) {
|
||||||
srv := newConsulTestServer(t)
|
|
||||||
defer srv.Stop()
|
|
||||||
|
|
||||||
path := fmt.Sprintf("tf-unit/%s", time.Now().String())
|
path := fmt.Sprintf("tf-unit/%s", time.Now().String())
|
||||||
|
|
||||||
// create 2 instances to get 2 remote.Clients
|
// create 2 instances to get 2 remote.Clients
|
||||||
|
@ -104,9 +95,6 @@ func TestConsul_stateLock(t *testing.T) {
|
||||||
}
|
}
|
||||||
|
|
||||||
func TestConsul_destroyLock(t *testing.T) {
|
func TestConsul_destroyLock(t *testing.T) {
|
||||||
srv := newConsulTestServer(t)
|
|
||||||
defer srv.Stop()
|
|
||||||
|
|
||||||
// Get the backend
|
// Get the backend
|
||||||
b := backend.TestBackendConfig(t, New(), map[string]interface{}{
|
b := backend.TestBackendConfig(t, New(), map[string]interface{}{
|
||||||
"address": srv.HTTPAddr,
|
"address": srv.HTTPAddr,
|
||||||
|
@ -144,9 +132,6 @@ func TestConsul_destroyLock(t *testing.T) {
|
||||||
}
|
}
|
||||||
|
|
||||||
func TestConsul_lostLock(t *testing.T) {
|
func TestConsul_lostLock(t *testing.T) {
|
||||||
srv := newConsulTestServer(t)
|
|
||||||
defer srv.Stop()
|
|
||||||
|
|
||||||
path := fmt.Sprintf("tf-unit/%s", time.Now().String())
|
path := fmt.Sprintf("tf-unit/%s", time.Now().String())
|
||||||
|
|
||||||
// create 2 instances to get 2 remote.Clients
|
// create 2 instances to get 2 remote.Clients
|
||||||
|
@ -194,9 +179,6 @@ func TestConsul_lostLock(t *testing.T) {
|
||||||
}
|
}
|
||||||
|
|
||||||
func TestConsul_lostLockConnection(t *testing.T) {
|
func TestConsul_lostLockConnection(t *testing.T) {
|
||||||
srv := newConsulTestServer(t)
|
|
||||||
defer srv.Stop()
|
|
||||||
|
|
||||||
// create an "unreliable" network by closing all the consul client's
|
// create an "unreliable" network by closing all the consul client's
|
||||||
// network connections
|
// network connections
|
||||||
conns := &unreliableConns{}
|
conns := &unreliableConns{}
|
||||||
|
@ -225,31 +207,17 @@ func TestConsul_lostLockConnection(t *testing.T) {
|
||||||
t.Fatal(err)
|
t.Fatal(err)
|
||||||
}
|
}
|
||||||
|
|
||||||
// set a callback to know when the monitor loop re-connects
|
// kill the connection a few times
|
||||||
dialed := make(chan struct{})
|
for i := 0; i < 3; i++ {
|
||||||
conns.dialCallback = func() {
|
dialed := conns.dialedDone()
|
||||||
close(dialed)
|
|
||||||
conns.dialCallback = nil
|
|
||||||
}
|
|
||||||
|
|
||||||
// kill any open connections
|
// kill any open connections
|
||||||
// once the consul client is fixed, we should loop over this a few time to
|
|
||||||
// be sure, since we can't hook into the client's internal lock monitor
|
|
||||||
// loop.
|
|
||||||
conns.Kill()
|
conns.Kill()
|
||||||
// wait for a new connection to be dialed, and kill it again
|
// wait for a new connection to be dialed, and kill it again
|
||||||
<-dialed
|
<-dialed
|
||||||
conns.Kill()
|
}
|
||||||
|
|
||||||
// since the lock monitor loop is hidden in the consul api client, we can
|
if err := s.Unlock(id); err != nil {
|
||||||
// only wait a bit to make sure we were notified of the failure
|
t.Fatal("unlock error:", err)
|
||||||
time.Sleep(time.Second)
|
|
||||||
|
|
||||||
// once the consul client can reconnect properly, there will no longer be
|
|
||||||
// an error here
|
|
||||||
//if err := s.Unlock(id); err != nil {
|
|
||||||
if err := s.Unlock(id); err != lostLockErr {
|
|
||||||
t.Fatalf("expected lost lock error, got %v", err)
|
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -278,6 +246,18 @@ func (u *unreliableConns) DialContext(ctx context.Context, netw, addr string) (n
|
||||||
return conn, nil
|
return conn, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
|
func (u *unreliableConns) dialedDone() chan struct{} {
|
||||||
|
u.Lock()
|
||||||
|
defer u.Unlock()
|
||||||
|
dialed := make(chan struct{})
|
||||||
|
u.dialCallback = func() {
|
||||||
|
defer close(dialed)
|
||||||
|
u.dialCallback = nil
|
||||||
|
}
|
||||||
|
|
||||||
|
return dialed
|
||||||
|
}
|
||||||
|
|
||||||
// Kill these with a deadline, just to make sure we don't end up with any EOFs
|
// Kill these with a deadline, just to make sure we don't end up with any EOFs
|
||||||
// that get ignored.
|
// that get ignored.
|
||||||
func (u *unreliableConns) Kill() {
|
func (u *unreliableConns) Kill() {
|
||||||
|
|
Loading…
Reference in New Issue