Skip to content

Commit 3b24bfd

Browse files
committed
test(raft): Add comprehensive test coverage for Node methods
Implement thorough test suites for Raft Node methods: - GetPeers: Test peer retrieval in various cluster configurations - GetLeaderClient: Verify leader client retrieval in single and multi-node clusters - Shutdown: Validate node shutdown behavior with different scenarios Enhance test coverage for Raft node management, improving reliability and robustness of cluster operations.
1 parent a216162 commit 3b24bfd

File tree

2 files changed

+357
-0
lines changed

2 files changed

+357
-0
lines changed

raft/raft.go

Lines changed: 4 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -721,6 +721,10 @@ func (n *Node) forwardToLeader(ctx context.Context, data []byte, timeout time.Du
721721
// shutdown properly, ignoring the ErrRaftShutdown error which indicates the node was already
722722
// shutdown.
723723
func (n *Node) Shutdown() error {
724+
if n == nil {
725+
return nil
726+
}
727+
724728
if n.peerSyncCancel != nil {
725729
n.peerSyncCancel()
726730
}

raft/raft_test.go

Lines changed: 353 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -1261,3 +1261,356 @@ func TestLeaveCluster(t *testing.T) {
12611261
})
12621262
}
12631263
}
1264+
1265+
func TestGetPeers(t *testing.T) {
1266+
logger := setupTestLogger()
1267+
tempDir := t.TempDir()
1268+
1269+
tests := []struct {
1270+
name string
1271+
setupNode func() *Node
1272+
expectedPeers int
1273+
}{
1274+
{
1275+
name: "uninitialized raft",
1276+
setupNode: func() *Node {
1277+
return &Node{
1278+
raft: nil,
1279+
Logger: logger,
1280+
}
1281+
},
1282+
expectedPeers: 0,
1283+
},
1284+
{
1285+
name: "single node cluster",
1286+
setupNode: func() *Node {
1287+
node, err := NewRaftNode(logger, config.Raft{
1288+
NodeID: "singleNode",
1289+
Address: "127.0.0.1:0",
1290+
GRPCAddress: "127.0.0.1:0",
1291+
IsBootstrap: true,
1292+
Directory: tempDir,
1293+
})
1294+
require.NoError(t, err)
1295+
time.Sleep(1 * time.Second) // Wait for initialization
1296+
return node
1297+
},
1298+
expectedPeers: 1,
1299+
},
1300+
{
1301+
name: "multi-node cluster",
1302+
setupNode: func() *Node {
1303+
// Create a 3-node cluster configuration
1304+
node1, err := NewRaftNode(logger, config.Raft{
1305+
NodeID: "multiNode1",
1306+
Address: "127.0.0.1:6002",
1307+
GRPCAddress: "127.0.0.1:6012",
1308+
IsBootstrap: true,
1309+
Directory: tempDir,
1310+
Peers: []config.RaftPeer{
1311+
{ID: "multiNode2", Address: "127.0.0.1:6003", GRPCAddress: "127.0.0.1:6013"},
1312+
{ID: "multiNode3", Address: "127.0.0.1:6004", GRPCAddress: "127.0.0.1:6014"},
1313+
},
1314+
})
1315+
require.NoError(t, err)
1316+
1317+
// Create and start other nodes
1318+
node2, err := NewRaftNode(logger, config.Raft{
1319+
NodeID: "multiNode2",
1320+
Address: "127.0.0.1:6003",
1321+
GRPCAddress: "127.0.0.1:6013",
1322+
IsBootstrap: false,
1323+
Directory: tempDir,
1324+
Peers: []config.RaftPeer{
1325+
{ID: "multiNode1", Address: "127.0.0.1:6002", GRPCAddress: "127.0.0.1:6012"},
1326+
{ID: "multiNode3", Address: "127.0.0.1:6004", GRPCAddress: "127.0.0.1:6014"},
1327+
},
1328+
})
1329+
require.NoError(t, err)
1330+
defer func() {
1331+
require.NoError(t, node2.Shutdown())
1332+
}()
1333+
1334+
node3, err := NewRaftNode(logger, config.Raft{
1335+
NodeID: "multiNode3",
1336+
Address: "127.0.0.1:6004",
1337+
GRPCAddress: "127.0.0.1:6014",
1338+
IsBootstrap: false,
1339+
Directory: tempDir,
1340+
Peers: []config.RaftPeer{
1341+
{ID: "multiNode1", Address: "127.0.0.1:6002", GRPCAddress: "127.0.0.1:6012"},
1342+
{ID: "multiNode2", Address: "127.0.0.1:6003", GRPCAddress: "127.0.0.1:6013"},
1343+
},
1344+
})
1345+
require.NoError(t, err)
1346+
defer func() {
1347+
require.NoError(t, node3.Shutdown())
1348+
}()
1349+
1350+
// Wait for cluster to stabilize
1351+
time.Sleep(3 * time.Second)
1352+
return node1
1353+
},
1354+
expectedPeers: 3,
1355+
},
1356+
}
1357+
1358+
for _, testCase := range tests {
1359+
t.Run(testCase.name, func(t *testing.T) {
1360+
node := testCase.setupNode()
1361+
if node != nil && node.raft != nil {
1362+
defer func() {
1363+
require.NoError(t, node.Shutdown())
1364+
}()
1365+
}
1366+
1367+
peers := node.GetPeers()
1368+
assert.Equal(t, testCase.expectedPeers, len(peers), "Unexpected number of peers")
1369+
1370+
if testCase.expectedPeers > 0 {
1371+
// Verify peer information
1372+
for _, peer := range peers {
1373+
assert.NotEmpty(t, peer.ID, "Peer ID should not be empty")
1374+
assert.NotEmpty(t, peer.Address, "Peer address should not be empty")
1375+
}
1376+
}
1377+
})
1378+
}
1379+
}
1380+
1381+
func TestGetLeaderClient(t *testing.T) {
1382+
logger := setupTestLogger()
1383+
tempDir := t.TempDir()
1384+
1385+
tests := []struct {
1386+
name string
1387+
setupNodes func() []*Node
1388+
testNode int
1389+
wantErr bool
1390+
errMsg string
1391+
}{
1392+
{
1393+
name: "successful leader client retrieval",
1394+
setupNodes: func() []*Node {
1395+
nodes := make([]*Node, 0, 2)
1396+
1397+
// Create leader node
1398+
node1, err := NewRaftNode(logger, config.Raft{
1399+
NodeID: "leaderClientNode1",
1400+
Address: "127.0.0.1:7101",
1401+
GRPCAddress: "127.0.0.1:7201",
1402+
IsBootstrap: true,
1403+
Directory: tempDir,
1404+
Peers: []config.RaftPeer{
1405+
{ID: "leaderClientNode2", Address: "127.0.0.1:7102", GRPCAddress: "127.0.0.1:7202"},
1406+
},
1407+
})
1408+
require.NoError(t, err)
1409+
nodes = append(nodes, node1)
1410+
1411+
// Create follower node
1412+
node2, err := NewRaftNode(logger, config.Raft{
1413+
NodeID: "leaderClientNode2",
1414+
Address: "127.0.0.1:7102",
1415+
GRPCAddress: "127.0.0.1:7202",
1416+
IsBootstrap: false,
1417+
Directory: tempDir,
1418+
Peers: []config.RaftPeer{
1419+
{ID: "leaderClientNode1", Address: "127.0.0.1:7101", GRPCAddress: "127.0.0.1:7201"},
1420+
},
1421+
})
1422+
require.NoError(t, err)
1423+
nodes = append(nodes, node2)
1424+
1425+
// Wait for cluster to stabilize
1426+
time.Sleep(3 * time.Second)
1427+
return nodes
1428+
},
1429+
testNode: 1, // Test from follower node
1430+
wantErr: false,
1431+
},
1432+
{
1433+
name: "single node cluster",
1434+
setupNodes: func() []*Node {
1435+
node, err := NewRaftNode(logger, config.Raft{
1436+
NodeID: "singleLeaderNode",
1437+
Address: "127.0.0.1:7103",
1438+
GRPCAddress: "127.0.0.1:7203",
1439+
IsBootstrap: true,
1440+
Directory: tempDir,
1441+
})
1442+
require.NoError(t, err)
1443+
time.Sleep(1 * time.Second) // Wait for initialization
1444+
return []*Node{node}
1445+
},
1446+
testNode: 0,
1447+
wantErr: false,
1448+
},
1449+
{
1450+
name: "no leader available",
1451+
setupNodes: func() []*Node {
1452+
node, err := NewRaftNode(logger, config.Raft{
1453+
NodeID: "noLeaderNode",
1454+
Address: "127.0.0.1:7104",
1455+
GRPCAddress: "127.0.0.1:7204",
1456+
IsBootstrap: false, // Non-bootstrap node without peers
1457+
Directory: tempDir,
1458+
})
1459+
require.NoError(t, err)
1460+
return []*Node{node}
1461+
},
1462+
testNode: 0,
1463+
wantErr: true,
1464+
errMsg: "timeout waiting for leader:",
1465+
},
1466+
}
1467+
1468+
for _, testCase := range tests {
1469+
t.Run(testCase.name, func(t *testing.T) {
1470+
nodes := testCase.setupNodes()
1471+
defer func() {
1472+
for _, node := range nodes {
1473+
if node != nil {
1474+
_ = node.Shutdown()
1475+
}
1476+
}
1477+
}()
1478+
1479+
client, err := nodes[testCase.testNode].getLeaderClient()
1480+
1481+
if testCase.wantErr {
1482+
assert.Error(t, err)
1483+
if testCase.errMsg != "" {
1484+
assert.Contains(t, err.Error(), testCase.errMsg)
1485+
}
1486+
assert.Nil(t, client)
1487+
} else {
1488+
assert.NoError(t, err)
1489+
assert.NotNil(t, client)
1490+
}
1491+
})
1492+
}
1493+
}
1494+
1495+
func TestShutdown(t *testing.T) {
1496+
logger := setupTestLogger()
1497+
tempDir := t.TempDir()
1498+
1499+
tests := []struct {
1500+
name string
1501+
setupNode func() *Node
1502+
wantErr bool
1503+
errMsg string
1504+
setupExtra func(*Node) // Additional setup for specific test cases
1505+
}{
1506+
{
1507+
name: "successful shutdown",
1508+
setupNode: func() *Node {
1509+
node, err := NewRaftNode(logger, config.Raft{
1510+
NodeID: "shutdown-node1",
1511+
Address: "127.0.0.1:0",
1512+
IsBootstrap: true,
1513+
Directory: tempDir,
1514+
})
1515+
require.NoError(t, err)
1516+
return node
1517+
},
1518+
wantErr: false,
1519+
},
1520+
{
1521+
name: "shutdown already shutdown node",
1522+
setupNode: func() *Node {
1523+
node, err := NewRaftNode(logger, config.Raft{
1524+
NodeID: "shutdown-node2",
1525+
Address: "127.0.0.1:0",
1526+
IsBootstrap: true,
1527+
Directory: tempDir,
1528+
})
1529+
require.NoError(t, err)
1530+
return node
1531+
},
1532+
setupExtra: func(n *Node) {
1533+
err := n.Shutdown()
1534+
require.NoError(t, err)
1535+
},
1536+
wantErr: false, // Should not error as ErrRaftShutdown is ignored
1537+
},
1538+
{
1539+
name: "shutdown nil node",
1540+
setupNode: func() *Node {
1541+
return nil
1542+
},
1543+
wantErr: false, // Should not panic, gracefully handle nil node
1544+
},
1545+
{
1546+
name: "shutdown with active peer synchronization",
1547+
setupNode: func() *Node {
1548+
node, err := NewRaftNode(logger, config.Raft{
1549+
NodeID: "shutdown-node3",
1550+
Address: "127.0.0.1:0",
1551+
IsBootstrap: true,
1552+
Directory: tempDir,
1553+
})
1554+
require.NoError(t, err)
1555+
// Start peer synchronization
1556+
ctx, cancel := context.WithCancel(context.Background())
1557+
node.peerSyncCancel = cancel
1558+
node.StartPeerSynchronizer(ctx)
1559+
return node
1560+
},
1561+
wantErr: false,
1562+
},
1563+
{
1564+
name: "shutdown with active gRPC server",
1565+
setupNode: func() *Node {
1566+
node, err := NewRaftNode(logger, config.Raft{
1567+
NodeID: "shutdown-node4",
1568+
Address: "127.0.0.1:0",
1569+
GRPCAddress: "127.0.0.1:0",
1570+
IsBootstrap: true,
1571+
Directory: tempDir,
1572+
})
1573+
require.NoError(t, err)
1574+
return node
1575+
},
1576+
wantErr: false,
1577+
},
1578+
}
1579+
1580+
for _, testCase := range tests {
1581+
t.Run(testCase.name, func(t *testing.T) {
1582+
node := testCase.setupNode()
1583+
if testCase.setupExtra != nil {
1584+
testCase.setupExtra(node)
1585+
}
1586+
1587+
err := node.Shutdown()
1588+
1589+
if testCase.wantErr {
1590+
assert.Error(t, err)
1591+
if testCase.errMsg != "" {
1592+
assert.Contains(t, err.Error(), testCase.errMsg)
1593+
}
1594+
} else {
1595+
assert.NoError(t, err)
1596+
}
1597+
1598+
// Additional verification for non-nil nodes
1599+
if node != nil {
1600+
// Verify that a second shutdown doesn't cause issues
1601+
err = node.Shutdown()
1602+
assert.NoError(t, err, "Second shutdown should not error")
1603+
1604+
// Verify all components are properly shut down
1605+
// assert.Nil(t, node.peerSyncCancel, "peerSyncCancel should be nil after shutdown")
1606+
if node.rpcServer != nil {
1607+
// Check if the gRPC server is stopped
1608+
// This is a bit tricky to verify directly, but we can try to use the server
1609+
assert.NotPanics(t, func() {
1610+
node.rpcServer.GracefulStop() // Should not block if already stopped
1611+
})
1612+
}
1613+
}
1614+
})
1615+
}
1616+
}

0 commit comments

Comments
 (0)