Skip to content

Commit ecaad2c

Browse files
committed
fix(cleanup): add missing handlers for cleanup
kube-router v2.X introduced the idea of iptables and ipset handlers that allow kube-router to be dual-stack capable. However, the cleanup logic for the various controllers was not properly ported when this happened. When the cleanup functions run, they often have not had their controllers fully initialized as cleanup should not be dependant on kube-router being able to reach a kube-apiserver. As such, they were missing these handlers. And as such they either silently ended up doing noops or worse, they would run into nil pointer failures. This corrects that, so that kube-router no longer fails this way and cleans up as it had in v1.X.
1 parent 7755b4a commit ecaad2c

File tree

4 files changed

+177
-96
lines changed

4 files changed

+177
-96
lines changed

pkg/controllers/netpol/network_policy_controller.go

Lines changed: 22 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -723,6 +723,26 @@ func (npc *NetworkPolicyController) cleanupStaleIPSets(activePolicyIPSets map[st
723723
func (npc *NetworkPolicyController) Cleanup() {
724724
klog.Info("Cleaning up NetworkPolicyController configurations...")
725725

726+
if len(npc.iptablesCmdHandlers) < 1 {
727+
iptablesCmdHandlers, ipSetHandlers, err := NewIPTablesHandlers(nil)
728+
if err != nil {
729+
klog.Errorf("unable to get iptables and ipset handlers: %v", err)
730+
return
731+
}
732+
npc.iptablesCmdHandlers = iptablesCmdHandlers
733+
npc.ipSetHandlers = ipSetHandlers
734+
735+
// Make other structures that we rely on
736+
npc.iptablesSaveRestore = make(map[v1core.IPFamily]utils.IPTablesSaveRestorer, 2)
737+
npc.iptablesSaveRestore[v1core.IPv4Protocol] = utils.NewIPTablesSaveRestore(v1core.IPv4Protocol)
738+
npc.iptablesSaveRestore[v1core.IPv6Protocol] = utils.NewIPTablesSaveRestore(v1core.IPv6Protocol)
739+
npc.filterTableRules = make(map[v1core.IPFamily]*bytes.Buffer, 2)
740+
var buf bytes.Buffer
741+
npc.filterTableRules[v1core.IPv4Protocol] = &buf
742+
var buf2 bytes.Buffer
743+
npc.filterTableRules[v1core.IPv6Protocol] = &buf2
744+
}
745+
726746
var emptySet map[string]bool
727747
// Take a dump (iptables-save) of the current filter table for cleanupStaleRules() to work on
728748
for ipFamily, iptablesSaveRestore := range npc.iptablesSaveRestore {
@@ -762,7 +782,7 @@ func NewIPTablesHandlers(config *options.KubeRouterConfig) (
762782
iptablesCmdHandlers := make(map[v1core.IPFamily]utils.IPTablesHandler, 2)
763783
ipSetHandlers := make(map[v1core.IPFamily]utils.IPSetHandler, 2)
764784

765-
if config.EnableIPv4 {
785+
if config == nil || config.EnableIPv4 {
766786
iptHandler, err := iptables.NewWithProtocol(iptables.ProtocolIPv4)
767787
if err != nil {
768788
return nil, nil, fmt.Errorf("failed to create iptables handler: %w", err)
@@ -775,7 +795,7 @@ func NewIPTablesHandlers(config *options.KubeRouterConfig) (
775795
}
776796
ipSetHandlers[v1core.IPv4Protocol] = ipset
777797
}
778-
if config.EnableIPv6 {
798+
if config == nil || config.EnableIPv6 {
779799
iptHandler, err := iptables.NewWithProtocol(iptables.ProtocolIPv6)
780800
if err != nil {
781801
return nil, nil, fmt.Errorf("failed to create iptables handler: %w", err)

pkg/controllers/proxy/hairpin_controller.go

Lines changed: 2 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -75,15 +75,14 @@ func (hpc *hairpinController) ensureHairpinEnabledForPodInterface(endpointIP str
7575
// WARN: This method is deprecated and will be removed once docker-shim is removed from kubelet.
7676
pid, err = hpc.nsc.ln.getContainerPidWithDocker(containerID)
7777
if err != nil {
78-
return fmt.Errorf("failed to prepare endpoint %s to do direct server return due to %v",
79-
endpointIP, err)
78+
return fmt.Errorf("failed to get pod's (%s) pid for hairpinning due to %v", endpointIP, err)
8079
}
8180
} else {
8281
// We expect CRI compliant runtimes here
8382
// ugly workaround, refactoring of pkg/Proxy is required
8483
pid, err = hpc.nsc.ln.getContainerPidWithCRI(hpc.nsc.dsr.runtimeEndpoint, containerID)
8584
if err != nil {
86-
return fmt.Errorf("failed to prepare endpoint %s to do DSR due to: %v", endpointIP, err)
85+
return fmt.Errorf("failed to get pod's (%s) pid for hairpinning due to %v", endpointIP, err)
8786
}
8887
}
8988
klog.V(2).Infof("Found PID %d for endpoint IP %s", pid, endpointIP)

pkg/controllers/proxy/network_services_controller.go

Lines changed: 91 additions & 53 deletions
Original file line numberDiff line numberDiff line change
@@ -1782,6 +1782,16 @@ func (nsc *NetworkServicesController) Cleanup() {
17821782
handle.Close()
17831783
}
17841784

1785+
// In prep for further steps make sure that ipset and iptables handlers are created
1786+
if len(nsc.iptablesCmdHandlers) < 1 {
1787+
// Even though we have a config at this point (via passed param), we want to send nil so that the node will
1788+
// discover which IP address families it has and act accordingly
1789+
err = nsc.setupHandlers(nil, nil)
1790+
if err != nil {
1791+
klog.Errorf("could not cleanup because we couldn't create iptables/ipset command handlers due to: %v", err)
1792+
}
1793+
}
1794+
17851795
// cleanup iptables masquerade rule
17861796
err = nsc.deleteMasqueradeIptablesRule()
17871797
if err != nil {
@@ -1790,15 +1800,21 @@ func (nsc *NetworkServicesController) Cleanup() {
17901800
}
17911801

17921802
// cleanup iptables hairpin rules
1793-
err = nsc.deleteHairpinIptablesRules(v1.IPv4Protocol)
1794-
if err != nil {
1795-
klog.Errorf("Failed to cleanup iptables hairpin rules: %s", err.Error())
1796-
return
1803+
if _, ok := nsc.iptablesCmdHandlers[v1.IPv4Protocol]; ok {
1804+
klog.Info("Processing IPv4 hairpin rule cleanup")
1805+
err = nsc.deleteHairpinIptablesRules(v1.IPv4Protocol)
1806+
if err != nil {
1807+
klog.Errorf("Failed to cleanup iptables hairpin rules: %s", err.Error())
1808+
return
1809+
}
17971810
}
1798-
err = nsc.deleteHairpinIptablesRules(v1.IPv6Protocol)
1799-
if err != nil {
1800-
klog.Errorf("Failed to cleanup iptables hairpin rules: %s", err.Error())
1801-
return
1811+
if _, ok := nsc.iptablesCmdHandlers[v1.IPv6Protocol]; ok {
1812+
klog.Info("Processing IPv6 hairpin rule cleanup")
1813+
err = nsc.deleteHairpinIptablesRules(v1.IPv6Protocol)
1814+
if err != nil {
1815+
klog.Errorf("Failed to cleanup iptables hairpin rules: %s", err.Error())
1816+
return
1817+
}
18021818
}
18031819

18041820
nsc.cleanupIpvsFirewall()
@@ -1927,6 +1943,70 @@ func (nsc *NetworkServicesController) handleServiceDelete(obj interface{}) {
19271943
nsc.OnServiceUpdate(service)
19281944
}
19291945

1946+
// setupHandlers Here we test to see whether the node is IPv6 capable, if the user has enabled IPv6 (via command-line
1947+
// options) and the node has an IPv6 address, the following method will return an IPv6 address
1948+
func (nsc *NetworkServicesController) setupHandlers(config *options.KubeRouterConfig, node *v1.Node) error {
1949+
// node being nil covers the case where this function is called by something that doesn't have a kube-apiserver
1950+
// connection like the cleanup code. In this instance we want all possible iptables and ipset handlers
1951+
if node != nil {
1952+
nsc.nodeIPv4Addrs, nsc.nodeIPv6Addrs = utils.GetAllNodeIPs(node)
1953+
}
1954+
1955+
// We test for nil configs as the Cleanup() method often doesn't have a valid config in this respect, so rather
1956+
// than trying to guess options, it is better to just let the logic fallthrough. For the primary path to this func,
1957+
// NewNetworkServicesController, the config will not be nil and we want to check that we have options that match
1958+
// the node's capability to ensure sanity later down the road.
1959+
if config != nil {
1960+
if config.EnableIPv4 && len(nsc.nodeIPv4Addrs[v1.NodeInternalIP]) < 1 &&
1961+
len(nsc.nodeIPv4Addrs[v1.NodeExternalIP]) < 1 {
1962+
return fmt.Errorf("IPv4 was enabled, but no IPv4 address was found on the node")
1963+
}
1964+
}
1965+
nsc.isIPv4Capable = len(nsc.nodeIPv4Addrs) > 0
1966+
if config != nil {
1967+
if config.EnableIPv6 && len(nsc.nodeIPv6Addrs[v1.NodeInternalIP]) < 1 &&
1968+
len(nsc.nodeIPv6Addrs[v1.NodeExternalIP]) < 1 {
1969+
return fmt.Errorf("IPv6 was enabled, but no IPv6 address was found on the node")
1970+
}
1971+
}
1972+
nsc.isIPv6Capable = len(nsc.nodeIPv6Addrs) > 0
1973+
1974+
nsc.ipSetHandlers = make(map[v1.IPFamily]utils.IPSetHandler)
1975+
nsc.iptablesCmdHandlers = make(map[v1.IPFamily]utils.IPTablesHandler)
1976+
if node == nil || len(nsc.nodeIPv4Addrs) > 0 {
1977+
iptHandler, err := iptables.NewWithProtocol(iptables.ProtocolIPv4)
1978+
if err != nil {
1979+
klog.Fatalf("Failed to allocate IPv4 iptables handler: %v", err)
1980+
return fmt.Errorf("failed to create iptables handler: %w", err)
1981+
}
1982+
nsc.iptablesCmdHandlers[v1.IPv4Protocol] = iptHandler
1983+
1984+
ipset, err := utils.NewIPSet(false)
1985+
if err != nil {
1986+
klog.Fatalf("Failed to allocate IPv4 ipset handler: %v", err)
1987+
return fmt.Errorf("failed to create ipset handler: %w", err)
1988+
}
1989+
nsc.ipSetHandlers[v1.IPv4Protocol] = ipset
1990+
}
1991+
if node == nil || len(nsc.nodeIPv6Addrs) > 0 {
1992+
iptHandler, err := iptables.NewWithProtocol(iptables.ProtocolIPv6)
1993+
if err != nil {
1994+
klog.Fatalf("Failed to allocate IPv6 iptables handler: %v", err)
1995+
return fmt.Errorf("failed to create iptables handler: %w", err)
1996+
}
1997+
nsc.iptablesCmdHandlers[v1.IPv6Protocol] = iptHandler
1998+
1999+
ipset, err := utils.NewIPSet(true)
2000+
if err != nil {
2001+
klog.Fatalf("Failed to allocate IPv6 ipset handler: %v", err)
2002+
return fmt.Errorf("failed to create ipset handler: %w", err)
2003+
}
2004+
nsc.ipSetHandlers[v1.IPv6Protocol] = ipset
2005+
}
2006+
2007+
return nil
2008+
}
2009+
19302010
// NewNetworkServicesController returns NetworkServicesController object
19312011
func NewNetworkServicesController(clientset kubernetes.Interface,
19322012
config *options.KubeRouterConfig, svcInformer cache.SharedIndexInformer,
@@ -2021,51 +2101,9 @@ func NewNetworkServicesController(clientset kubernetes.Interface,
20212101
return nil, err
20222102
}
20232103

2024-
// Here we test to see whether the node is IPv6 capable, if the user has enabled IPv6 (via command-line options)
2025-
// and the node has an IPv6 address, the following method will return an IPv6 address
2026-
nsc.nodeIPv4Addrs, nsc.nodeIPv6Addrs = utils.GetAllNodeIPs(node)
2027-
if config.EnableIPv4 && len(nsc.nodeIPv4Addrs[v1.NodeInternalIP]) < 1 &&
2028-
len(nsc.nodeIPv4Addrs[v1.NodeExternalIP]) < 1 {
2029-
return nil, fmt.Errorf("IPv4 was enabled, but no IPv4 address was found on the node")
2030-
}
2031-
nsc.isIPv4Capable = len(nsc.nodeIPv4Addrs) > 0
2032-
if config.EnableIPv6 && len(nsc.nodeIPv6Addrs[v1.NodeInternalIP]) < 1 &&
2033-
len(nsc.nodeIPv6Addrs[v1.NodeExternalIP]) < 1 {
2034-
return nil, fmt.Errorf("IPv6 was enabled, but no IPv6 address was found on the node")
2035-
}
2036-
nsc.isIPv6Capable = len(nsc.nodeIPv6Addrs) > 0
2037-
2038-
nsc.ipSetHandlers = make(map[v1.IPFamily]utils.IPSetHandler)
2039-
nsc.iptablesCmdHandlers = make(map[v1.IPFamily]utils.IPTablesHandler)
2040-
if len(nsc.nodeIPv4Addrs) > 0 {
2041-
iptHandler, err := iptables.NewWithProtocol(iptables.ProtocolIPv4)
2042-
if err != nil {
2043-
klog.Fatalf("Failed to allocate IPv4 iptables handler: %v", err)
2044-
return nil, fmt.Errorf("failed to create iptables handler: %w", err)
2045-
}
2046-
nsc.iptablesCmdHandlers[v1.IPv4Protocol] = iptHandler
2047-
2048-
ipset, err := utils.NewIPSet(false)
2049-
if err != nil {
2050-
klog.Fatalf("Failed to allocate IPv4 ipset handler: %v", err)
2051-
return nil, fmt.Errorf("failed to create ipset handler: %w", err)
2052-
}
2053-
nsc.ipSetHandlers[v1.IPv4Protocol] = ipset
2054-
}
2055-
if len(nsc.nodeIPv6Addrs) > 0 {
2056-
iptHandler, err := iptables.NewWithProtocol(iptables.ProtocolIPv6)
2057-
if err != nil {
2058-
klog.Fatalf("Failed to allocate IPv6 iptables handler: %v", err)
2059-
return nil, fmt.Errorf("failed to create iptables handler: %w", err)
2060-
}
2061-
nsc.iptablesCmdHandlers[v1.IPv6Protocol] = iptHandler
2062-
2063-
ipset, err := utils.NewIPSet(true)
2064-
if err != nil {
2065-
klog.Fatalf("Failed to allocate IPv6 ipset handler: %v", err)
2066-
return nil, fmt.Errorf("failed to create ipset handler: %w", err)
2067-
}
2068-
nsc.ipSetHandlers[v1.IPv6Protocol] = ipset
2104+
err = nsc.setupHandlers(config, node)
2105+
if err != nil {
2106+
return nil, err
20692107
}
20702108

20712109
automtu, err := utils.GetMTUFromNodeIP(nsc.primaryIP)

pkg/controllers/routing/network_routes_controller.go

Lines changed: 62 additions & 38 deletions
Original file line numberDiff line numberDiff line change
@@ -896,6 +896,17 @@ func (nrc *NetworkRoutingController) setupOverlayTunnel(tunnelName string, nextH
896896
func (nrc *NetworkRoutingController) Cleanup() {
897897
klog.Infof("Cleaning up NetworkRoutesController configurations")
898898

899+
// In prep for further steps make sure that ipset and iptables handlers are created
900+
if len(nrc.iptablesCmdHandlers) < 1 {
901+
// Even though we have a config at this point (via passed param), we want to send nil so that the node will
902+
// discover which IP address families it has and act accordingly
903+
err := nrc.setupHandlers(nil)
904+
if err != nil {
905+
klog.Errorf("could not cleanup because iptables/ipset handlers could not be created due to: %v", err)
906+
return
907+
}
908+
}
909+
899910
// Pod egress cleanup
900911
err := nrc.deletePodEgressRule()
901912
if err != nil {
@@ -1359,7 +1370,55 @@ func (nrc *NetworkRoutingController) startBgpServer(grpcServer bool) error {
13591370
return nil
13601371
}
13611372

1362-
// func (nrc *NetworkRoutingController) getExternalNodeIPs(
1373+
func (nrc *NetworkRoutingController) setupHandlers(node *v1core.Node) error {
1374+
var err error
1375+
1376+
// node being nil covers the case where this function is called by something that doesn't have a kube-apiserver
1377+
// connection like the cleanup code. In this instance we want all possible iptables and ipset handlers
1378+
if node != nil {
1379+
nrc.podIPv4CIDRs, nrc.podIPv6CIDRs, err = utils.GetPodCIDRsFromNodeSpecDualStack(node)
1380+
if err != nil {
1381+
klog.Fatalf("Failed to get pod CIDRs from node spec. kube-router relies on kube-controller-manager to"+
1382+
"allocate pod CIDRs for the node or an annotation `kube-router.io/pod-cidrs`. Error: %v", err)
1383+
return fmt.Errorf("failed to get pod CIDRs detail from Node.spec: %v", err)
1384+
}
1385+
}
1386+
1387+
nrc.iptablesCmdHandlers = make(map[v1core.IPFamily]utils.IPTablesHandler)
1388+
nrc.ipSetHandlers = make(map[v1core.IPFamily]utils.IPSetHandler)
1389+
if node == nil || len(nrc.nodeIPv4Addrs) > 0 {
1390+
iptHandler, err := iptables.NewWithProtocol(iptables.ProtocolIPv4)
1391+
if err != nil {
1392+
klog.Fatalf("Failed to allocate IPv4 iptables handler: %v", err)
1393+
return fmt.Errorf("failed to create iptables handler: %w", err)
1394+
}
1395+
nrc.iptablesCmdHandlers[v1core.IPv4Protocol] = iptHandler
1396+
1397+
ipset, err := utils.NewIPSet(false)
1398+
if err != nil {
1399+
klog.Fatalf("Failed to allocate IPv4 ipset handler: %v", err)
1400+
return fmt.Errorf("failed to create ipset handler: %w", err)
1401+
}
1402+
nrc.ipSetHandlers[v1core.IPv4Protocol] = ipset
1403+
}
1404+
if node == nil || len(nrc.nodeIPv6Addrs) > 0 {
1405+
iptHandler, err := iptables.NewWithProtocol(iptables.ProtocolIPv6)
1406+
if err != nil {
1407+
klog.Fatalf("Failed to allocate IPv6 iptables handler: %v", err)
1408+
return fmt.Errorf("failed to create iptables handler: %w", err)
1409+
}
1410+
nrc.iptablesCmdHandlers[v1core.IPv6Protocol] = iptHandler
1411+
1412+
ipset, err := utils.NewIPSet(true)
1413+
if err != nil {
1414+
klog.Fatalf("Failed to allocate IPv6 ipset handler: %v", err)
1415+
return fmt.Errorf("failed to create ipset handler: %w", err)
1416+
}
1417+
nrc.ipSetHandlers[v1core.IPv6Protocol] = ipset
1418+
}
1419+
1420+
return nil
1421+
}
13631422

13641423
// NewNetworkRoutingController returns new NetworkRoutingController object
13651424
func NewNetworkRoutingController(clientset kubernetes.Interface,
@@ -1473,44 +1532,9 @@ func NewNetworkRoutingController(clientset kubernetes.Interface,
14731532
}
14741533
nrc.podCidr = cidr
14751534

1476-
nrc.podIPv4CIDRs, nrc.podIPv6CIDRs, err = utils.GetPodCIDRsFromNodeSpecDualStack(node)
1535+
err = nrc.setupHandlers(node)
14771536
if err != nil {
1478-
klog.Fatalf("Failed to get pod CIDRs from node spec. kube-router relies on kube-controller-manager to"+
1479-
"allocate pod CIDRs for the node or an annotation `kube-router.io/pod-cidrs`. Error: %v", err)
1480-
return nil, fmt.Errorf("failed to get pod CIDRs detail from Node.spec: %v", err)
1481-
}
1482-
1483-
nrc.iptablesCmdHandlers = make(map[v1core.IPFamily]utils.IPTablesHandler)
1484-
nrc.ipSetHandlers = make(map[v1core.IPFamily]utils.IPSetHandler)
1485-
if len(nrc.nodeIPv4Addrs) > 0 {
1486-
iptHandler, err := iptables.NewWithProtocol(iptables.ProtocolIPv4)
1487-
if err != nil {
1488-
klog.Fatalf("Failed to allocate IPv4 iptables handler: %v", err)
1489-
return nil, fmt.Errorf("failed to create iptables handler: %w", err)
1490-
}
1491-
nrc.iptablesCmdHandlers[v1core.IPv4Protocol] = iptHandler
1492-
1493-
ipset, err := utils.NewIPSet(false)
1494-
if err != nil {
1495-
klog.Fatalf("Failed to allocate IPv4 ipset handler: %v", err)
1496-
return nil, fmt.Errorf("failed to create ipset handler: %w", err)
1497-
}
1498-
nrc.ipSetHandlers[v1core.IPv4Protocol] = ipset
1499-
}
1500-
if len(nrc.nodeIPv6Addrs) > 0 {
1501-
iptHandler, err := iptables.NewWithProtocol(iptables.ProtocolIPv6)
1502-
if err != nil {
1503-
klog.Fatalf("Failed to allocate IPv6 iptables handler: %v", err)
1504-
return nil, fmt.Errorf("failed to create iptables handler: %w", err)
1505-
}
1506-
nrc.iptablesCmdHandlers[v1core.IPv6Protocol] = iptHandler
1507-
1508-
ipset, err := utils.NewIPSet(true)
1509-
if err != nil {
1510-
klog.Fatalf("Failed to allocate IPv6 ipset handler: %v", err)
1511-
return nil, fmt.Errorf("failed to create ipset handler: %w", err)
1512-
}
1513-
nrc.ipSetHandlers[v1core.IPv6Protocol] = ipset
1537+
return nil, err
15141538
}
15151539

15161540
for _, handler := range nrc.ipSetHandlers {

0 commit comments

Comments
 (0)