Added tests and readme
This commit is contained in:
@@ -175,43 +175,18 @@ func (c *ClusterConfig) Replace(incoming *ClusterConfig) (bool, error) {
|
||||
return true, nil
|
||||
}
|
||||
|
||||
// FindCheck returns the check with the given ID or name.
|
||||
func (c *ClusterConfig) FindCheck(idOrName string) (*Check, int) {
|
||||
c.mu.RLock()
|
||||
defer c.mu.RUnlock()
|
||||
for i := range c.Checks {
|
||||
if c.Checks[i].ID == idOrName || c.Checks[i].Name == idOrName {
|
||||
cp := c.Checks[i]
|
||||
return &cp, i
|
||||
}
|
||||
}
|
||||
return nil, -1
|
||||
}
|
||||
|
||||
// FindAlert returns the alert with the given ID or name.
|
||||
func (c *ClusterConfig) FindAlert(idOrName string) (*Alert, int) {
|
||||
// FindAlert returns the alert with the given ID or name, or nil if
|
||||
// no entry matches.
|
||||
func (c *ClusterConfig) FindAlert(idOrName string) *Alert {
|
||||
c.mu.RLock()
|
||||
defer c.mu.RUnlock()
|
||||
for i := range c.Alerts {
|
||||
if c.Alerts[i].ID == idOrName || c.Alerts[i].Name == idOrName {
|
||||
cp := c.Alerts[i]
|
||||
return &cp, i
|
||||
return &cp
|
||||
}
|
||||
}
|
||||
return nil, -1
|
||||
}
|
||||
|
||||
// FindPeer returns the peer with the given node ID.
|
||||
func (c *ClusterConfig) FindPeer(nodeID string) (*PeerInfo, int) {
|
||||
c.mu.RLock()
|
||||
defer c.mu.RUnlock()
|
||||
for i := range c.Peers {
|
||||
if c.Peers[i].NodeID == nodeID {
|
||||
cp := c.Peers[i]
|
||||
return &cp, i
|
||||
}
|
||||
}
|
||||
return nil, -1
|
||||
return nil
|
||||
}
|
||||
|
||||
// QuorumSize returns the minimum number of live nodes required for
|
||||
|
||||
@@ -0,0 +1,107 @@
|
||||
package config
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"testing"
|
||||
)
|
||||
|
||||
func TestQuorumSize(t *testing.T) {
|
||||
cases := []struct {
|
||||
peers int
|
||||
want int
|
||||
}{
|
||||
{0, 1},
|
||||
{1, 1},
|
||||
{2, 2},
|
||||
{3, 2},
|
||||
{4, 3},
|
||||
{5, 3},
|
||||
{7, 4},
|
||||
}
|
||||
for _, tc := range cases {
|
||||
c := &ClusterConfig{}
|
||||
for i := 0; i < tc.peers; i++ {
|
||||
c.Peers = append(c.Peers, PeerInfo{NodeID: fmt.Sprintf("n%d", i)})
|
||||
}
|
||||
if got := c.QuorumSize(); got != tc.want {
|
||||
t.Errorf("peers=%d: QuorumSize=%d want %d", tc.peers, got, tc.want)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func TestClusterMutateBumpsVersion(t *testing.T) {
|
||||
t.Setenv("QUPTIME_DIR", t.TempDir())
|
||||
c := &ClusterConfig{}
|
||||
|
||||
err := c.Mutate("nodeA", func(cc *ClusterConfig) error {
|
||||
cc.Checks = append(cc.Checks, Check{ID: "1", Name: "x"})
|
||||
return nil
|
||||
})
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
if c.Version != 1 {
|
||||
t.Errorf("Version=%d want 1", c.Version)
|
||||
}
|
||||
if c.UpdatedBy != "nodeA" {
|
||||
t.Errorf("UpdatedBy=%q want nodeA", c.UpdatedBy)
|
||||
}
|
||||
|
||||
err = c.Mutate("nodeB", func(cc *ClusterConfig) error { return nil })
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
if c.Version != 2 {
|
||||
t.Errorf("Version=%d want 2 after second mutate", c.Version)
|
||||
}
|
||||
}
|
||||
|
||||
func TestClusterReplaceGatesOnVersion(t *testing.T) {
|
||||
t.Setenv("QUPTIME_DIR", t.TempDir())
|
||||
cur := &ClusterConfig{Version: 5, Checks: []Check{{ID: "old"}}}
|
||||
|
||||
if applied, _ := cur.Replace(&ClusterConfig{Version: 4}); applied {
|
||||
t.Error("older version was applied")
|
||||
}
|
||||
if applied, _ := cur.Replace(&ClusterConfig{Version: 5}); applied {
|
||||
t.Error("equal version was applied")
|
||||
}
|
||||
applied, err := cur.Replace(&ClusterConfig{
|
||||
Version: 6,
|
||||
Checks: []Check{{ID: "new"}},
|
||||
})
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
if !applied {
|
||||
t.Error("newer version was not applied")
|
||||
}
|
||||
if cur.Version != 6 || len(cur.Checks) != 1 || cur.Checks[0].ID != "new" {
|
||||
t.Errorf("after replace: %+v", cur)
|
||||
}
|
||||
}
|
||||
|
||||
func TestClusterSnapshotIsCopy(t *testing.T) {
|
||||
c := &ClusterConfig{Checks: []Check{{ID: "a"}}}
|
||||
snap := c.Snapshot()
|
||||
snap.Checks[0].ID = "b"
|
||||
if c.Checks[0].ID != "a" {
|
||||
t.Error("snapshot mutation leaked back to original")
|
||||
}
|
||||
}
|
||||
|
||||
func TestFindAlert(t *testing.T) {
|
||||
c := &ClusterConfig{Alerts: []Alert{
|
||||
{ID: "id-1", Name: "primary", Type: AlertSMTP},
|
||||
{ID: "id-2", Name: "secondary", Type: AlertDiscord},
|
||||
}}
|
||||
if a := c.FindAlert("primary"); a == nil || a.Type != AlertSMTP {
|
||||
t.Errorf("by name: %+v", a)
|
||||
}
|
||||
if a := c.FindAlert("id-2"); a == nil || a.Type != AlertDiscord {
|
||||
t.Errorf("by id: %+v", a)
|
||||
}
|
||||
if a := c.FindAlert("ghost"); a != nil {
|
||||
t.Errorf("expected nil for missing, got %+v", a)
|
||||
}
|
||||
}
|
||||
@@ -0,0 +1,58 @@
|
||||
package config
|
||||
|
||||
import "testing"
|
||||
|
||||
func TestAdvertiseAddrFallback(t *testing.T) {
|
||||
cases := []struct {
|
||||
name string
|
||||
cfg NodeConfig
|
||||
want string
|
||||
}{
|
||||
{"explicit advertise wins", NodeConfig{Advertise: "host:1234", BindAddr: "0.0.0.0", BindPort: 9001}, "host:1234"},
|
||||
{"empty bind falls back to loopback", NodeConfig{BindPort: 9001}, "127.0.0.1:9001"},
|
||||
{"wildcard bind falls back to loopback", NodeConfig{BindAddr: "0.0.0.0", BindPort: 9001}, "127.0.0.1:9001"},
|
||||
{"ipv6 wildcard falls back to loopback", NodeConfig{BindAddr: "::", BindPort: 9001}, "127.0.0.1:9001"},
|
||||
{"specific bind preserved", NodeConfig{BindAddr: "10.0.0.1", BindPort: 9001}, "10.0.0.1:9001"},
|
||||
}
|
||||
for _, tc := range cases {
|
||||
t.Run(tc.name, func(t *testing.T) {
|
||||
if got := tc.cfg.AdvertiseAddr(); got != tc.want {
|
||||
t.Errorf("got %q want %q", got, tc.want)
|
||||
}
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
func TestNodeConfigRoundtrip(t *testing.T) {
|
||||
t.Setenv("QUPTIME_DIR", t.TempDir())
|
||||
n := &NodeConfig{NodeID: "abc", BindAddr: "127.0.0.1", BindPort: 9001, Advertise: "10.0.0.1:9001"}
|
||||
if err := n.Save(); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
loaded, err := LoadNodeConfig()
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
if *loaded != *n {
|
||||
t.Errorf("got %+v want %+v", *loaded, *n)
|
||||
}
|
||||
}
|
||||
|
||||
func TestLoadNodeConfigAppliesDefaults(t *testing.T) {
|
||||
t.Setenv("QUPTIME_DIR", t.TempDir())
|
||||
// Save with empty bind addr/port to verify Load fills them.
|
||||
n := &NodeConfig{NodeID: "abc"}
|
||||
if err := n.Save(); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
loaded, err := LoadNodeConfig()
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
if loaded.BindPort != 9001 {
|
||||
t.Errorf("BindPort=%d want 9001", loaded.BindPort)
|
||||
}
|
||||
if loaded.BindAddr != "0.0.0.0" {
|
||||
t.Errorf("BindAddr=%q want 0.0.0.0", loaded.BindAddr)
|
||||
}
|
||||
}
|
||||
@@ -6,7 +6,6 @@
|
||||
// cluster.yaml — replicated state (peers, checks, alerts, version)
|
||||
// trust.yaml — local fingerprint trust store
|
||||
// keys/ — RSA private + public keys + self-signed cert
|
||||
// state.json — runtime cache (last check results, current master)
|
||||
//
|
||||
// A unix socket for the local CLI lives alongside (defaults to
|
||||
// /var/run/quptime/quptime.sock when running as root, otherwise
|
||||
@@ -25,7 +24,6 @@ const (
|
||||
NodeFile = "node.yaml"
|
||||
ClusterFile = "cluster.yaml"
|
||||
TrustFile = "trust.yaml"
|
||||
StateFile = "state.json"
|
||||
KeysDir = "keys"
|
||||
PrivateKey = "private.pem"
|
||||
PublicKey = "public.pem"
|
||||
@@ -86,9 +84,6 @@ func ClusterFilePath() string { return filepath.Join(DataDir(), ClusterFile) }
|
||||
// TrustFilePath returns the absolute path to trust.yaml.
|
||||
func TrustFilePath() string { return filepath.Join(DataDir(), TrustFile) }
|
||||
|
||||
// StateFilePath returns the absolute path to state.json.
|
||||
func StateFilePath() string { return filepath.Join(DataDir(), StateFile) }
|
||||
|
||||
// PrivateKeyPath returns the absolute path to the RSA private key.
|
||||
func PrivateKeyPath() string { return filepath.Join(DataDir(), KeysDir, PrivateKey) }
|
||||
|
||||
|
||||
Reference in New Issue
Block a user