Skip to content
Draft
24 changes: 24 additions & 0 deletions doc/userguide/configuration/suricata-yaml.rst
Original file line number Diff line number Diff line change
Expand Up @@ -3025,6 +3025,30 @@ default.
Using this default setting, flows will be associated only if the compared packet
headers are encapsulated in the same number of headers.

Tunnels
~~~~~~~

If your packets sources are multiple tunnels encapsulating the traffic,
you can configure the ``decoder.tunnels`` section to assign a tunnel
identifier to each of these tunnels.

These tunnel identifiers are used in flow hashing to be able to distinguish
the same-looking flow (same 5-tuple) from different tunnels, meaning it
is in fact a different subnetwork (like a VLAN identifier).

This section is a list of tunnels with the following parameters:
::

- id: 1
type: erspan2 # or vxlan
src: 192.168.1.1
dst: 192.168.1.3
session: 123 # erspan span id or vxlan vni

It is also recommended to define ``decoder.tunnels-ifaces`` list of interfaces
receiving tunneled traffic. The traffic received on these interfaces that do
not belong to a defined tunnel will be skipped.

Advanced Options
----------------

Expand Down
257 changes: 252 additions & 5 deletions ebpf/xdp_filter.c
Original file line number Diff line number Diff line change
Expand Up @@ -23,6 +23,10 @@
#include <linux/if_ether.h>
#include <linux/if_packet.h>
#include <linux/if_vlan.h>
/* Workaround to avoid the need of 32bit headers */
#define _LINUX_IF_H
#define IFNAMSIZ 16
#include <linux/if_tunnel.h>
#include <linux/ip.h>
#include <linux/ipv6.h>
#include <linux/tcp.h>
Expand Down Expand Up @@ -61,11 +65,21 @@
* also be used as workaround of some hardware offload issue */
#define VLAN_TRACKING 1

/* vxlan port configurable */
#define VXLAN_PORT 4789

struct vlan_hdr {
__u16 h_vlan_TCI;
__u16 h_vlan_encapsulated_proto;
};

struct flowtunnel_keys {
__u32 src;
__u32 dst;
__u32 session : 24;
__u8 tunnel : 8;
};

struct flowv4_keys {
__u32 src;
__u32 dst;
Expand All @@ -75,7 +89,8 @@ struct flowv4_keys {
};
__u8 ip_proto:1;
__u16 vlan0:15;
__u16 vlan1;
__u8 tunnel : 1;
__u16 vlan1_or_tunnel_id : 15;
};

struct flowv6_keys {
Expand All @@ -87,14 +102,30 @@ struct flowv6_keys {
};
__u8 ip_proto:1;
__u16 vlan0:15;
__u16 vlan1;
__u8 tunnel : 1;
__u16 vlan1_or_tunnel_id : 15;
};

struct pair {
__u64 packets;
__u64 bytes;
};

struct flowtunnel_id {
__u16 tunnel_id;
};

struct {
#if USE_PERCPU_HASH
__uint(type, BPF_MAP_TYPE_PERCPU_HASH);
#else
__uint(type, BPF_MAP_TYPE_HASH);
#endif
__type(key, struct flowtunnel_keys);
__type(value, struct flowtunnel_id);
__uint(max_entries, 256);
} flow_table_tunnels SEC(".maps");

struct {
#if USE_PERCPU_HASH
__uint(type, BPF_MAP_TYPE_PERCPU_HASH);
Expand Down Expand Up @@ -232,7 +263,8 @@ static __always_inline int get_dport(void *trans_data, void *data_end,
}
}

static int __always_inline filter_ipv4(struct xdp_md *ctx, void *data, __u64 nh_off, void *data_end, __u16 vlan0, __u16 vlan1)
static int __always_inline filter_ipv4_final(
struct xdp_md *ctx, void *data, __u64 nh_off, void *data_end, __u16 vlan0, __u16 vlan1)
{
struct iphdr *iph = data + nh_off;
int dport;
Expand Down Expand Up @@ -280,7 +312,8 @@ static int __always_inline filter_ipv4(struct xdp_md *ctx, void *data, __u64 nh_
tuple.port16[1] = (__u16)dport;

tuple.vlan0 = vlan0;
tuple.vlan1 = vlan1;
tuple.tunnel = (vlan1 & 0x8000) != 0;
tuple.vlan1_or_tunnel_id = vlan1 & 0x7FFF;

value = bpf_map_lookup_elem(&flow_table_v4, &tuple);
#if 0
Expand Down Expand Up @@ -421,7 +454,8 @@ static int __always_inline filter_ipv6(struct xdp_md *ctx, void *data, __u64 nh_
tuple.port16[1] = dport;

tuple.vlan0 = vlan0;
tuple.vlan1 = vlan1;
tuple.tunnel = (vlan1 & 0x8000) != 0;
tuple.vlan1_or_tunnel_id = vlan1 & 0x7FFF;

value = bpf_map_lookup_elem(&flow_table_v6, &tuple);
if (value) {
Expand Down Expand Up @@ -482,6 +516,219 @@ static int __always_inline filter_ipv6(struct xdp_md *ctx, void *data, __u64 nh_
#endif
}

static int __always_inline filter_erspan(
struct xdp_md *ctx, void *data, __u64 nh_off, void *data_end, struct iphdr *iph)
{
struct erspan_hdr {
__be16 ver_vlan;
__be16 flags_spanid;
__be32 padding;
};
__u16 vlan0 = 0;
__u16 vlan1 = 0;
struct flowtunnel_keys tuple;
__u16 h_proto;
__u16 flags_spanid;
struct flowtunnel_id *value;

struct erspan_hdr *erhdr = (struct erspan_hdr *)(data + nh_off);
if ((void *)(erhdr + 1) > data_end)
return XDP_PASS;

if ((erhdr->ver_vlan & 0xF0) != 0x10) {
// only handle ERSPAN 2
return XDP_PASS;
}
flags_spanid = erhdr->flags_spanid;
if ((flags_spanid & 0x1800) == 0x800) {
// do not handle ISL encapsulated
return XDP_PASS;
}

tuple.tunnel = 4; // DECODE_TUNNEL_ERSPANII
tuple.src = iph->saddr;
tuple.dst = iph->daddr;
tuple.session = flags_spanid & 0x3FF;
value = bpf_map_lookup_elem(&flow_table_tunnels, &tuple);
if (!value) {
// unknown tunnel
return XDP_PASS;
}
vlan1 = 0x8000 | value->tunnel_id;

nh_off += 8;
if (data + nh_off + sizeof(struct ethhdr) > data_end)
return XDP_PASS;

struct ethhdr *eth = data + nh_off;
nh_off += sizeof(*eth);

h_proto = eth->h_proto;

#if VLAN_TRACKING
if ((flags_spanid & 0x1800) == 0x1000) {
vlan0 = erhdr->ver_vlan & 0xFFF;
}
#endif
if ((flags_spanid & 0x1800) == 0x1800 && (h_proto == __constant_htons(ETH_P_8021Q) ||
h_proto == __constant_htons(ETH_P_8021AD))) {
struct vlan_hdr *vhdr;

if (data + nh_off + sizeof(struct vlan_hdr) > data_end)
return XDP_PASS;
vhdr = data + nh_off;
nh_off += sizeof(struct vlan_hdr);
h_proto = vhdr->h_vlan_encapsulated_proto;
#if VLAN_TRACKING
vlan0 = vhdr->h_vlan_TCI & 0x0fff;
#endif
}
if (h_proto == __constant_htons(ETH_P_IP))
return filter_ipv4_final(ctx, data, nh_off, data_end, vlan0, vlan1);
else if (h_proto == __constant_htons(ETH_P_IPV6))
return filter_ipv6(ctx, data, nh_off, data_end, vlan0, vlan1);
return XDP_PASS;
}

static int __always_inline filter_gre(
struct xdp_md *ctx, void *data, __u64 nh_off, void *data_end, struct iphdr *iph)
{
struct gre_hdr {
__be16 flags;
__be16 proto;
};
__u16 proto;

struct gre_hdr *grhdr = (struct gre_hdr *)(data + nh_off);

if ((void *)(grhdr + 1) > data_end)
return XDP_PASS;

// only GRE version 0 without routing
if (grhdr->flags & (GRE_VERSION | GRE_ROUTING))
return XDP_PASS;

nh_off += 4;
if (grhdr->flags & GRE_CSUM)
nh_off += 4;
if (grhdr->flags & GRE_KEY)
nh_off += 4;
if (grhdr->flags & GRE_SEQ)
nh_off += 4;
if (data + nh_off > data_end)
return XDP_PASS;

proto = grhdr->proto;
// only handle erspan over gre
if (proto == __constant_htons(ETH_P_ERSPAN)) {
return filter_erspan(ctx, data, nh_off, data_end, iph);
}
return XDP_PASS;
}

struct vxlanhdr {
__be16 flags;
__be16 gdp;
__u8 vni0;
__u8 vni1;
__u8 vni2;
__u8 res;
};

static int __always_inline filter_vxlan(
struct xdp_md *ctx, void *data, __u64 nh_off, void *data_end, struct iphdr *iph)
{
__u16 vlan0 = 0;
__u16 vlan1;
__u16 h_proto;
struct flowtunnel_keys tuple;
struct flowtunnel_id *value;

struct vxlanhdr *vh = (struct vxlanhdr *)(data + nh_off);

tuple.tunnel = 6; // DECODE_TUNNEL_VXLAN
tuple.src = iph->saddr;
tuple.dst = iph->daddr;
tuple.session = vh->vni2 | (vh->vni1 << 8) | (vh->vni0 << 16);
value = bpf_map_lookup_elem(&flow_table_tunnels, &tuple);
if (!value) {
// unknown tunnel
return XDP_PASS;
}
vlan1 = 0x8000 | value->tunnel_id;
nh_off += sizeof(*vh);

struct ethhdr *eth = data + nh_off;
nh_off += sizeof(*eth);
h_proto = eth->h_proto;

if (h_proto == __constant_htons(ETH_P_8021Q) || h_proto == __constant_htons(ETH_P_8021AD)) {
struct vlan_hdr *vhdr;

if (data + nh_off + sizeof(struct vlan_hdr) > data_end)
return XDP_PASS;
vhdr = data + nh_off;
nh_off += sizeof(struct vlan_hdr);
h_proto = vhdr->h_vlan_encapsulated_proto;
#if VLAN_TRACKING
vlan0 = vhdr->h_vlan_TCI & 0x0fff;
#endif
}

if (h_proto == __constant_htons(ETH_P_IP))
return filter_ipv4_final(ctx, data, nh_off, data_end, vlan0, vlan1);
else if (h_proto == __constant_htons(ETH_P_IPV6))
return filter_ipv6(ctx, data, nh_off, data_end, vlan0, vlan1);
return XDP_PASS;
}

static int __always_inline is_vxlan(void *data, __u64 nh_off, void *data_end)
{
if (data + nh_off + sizeof(struct iphdr) + sizeof(struct udphdr) + sizeof(struct vxlanhdr) +
sizeof(struct ethhdr) >
data_end) {
return 0;
}
struct udphdr *uh = (struct udphdr *)(data + nh_off + sizeof(struct iphdr));
if (uh->dest != __constant_ntohs(VXLAN_PORT)) {
return 0;
}
struct vxlanhdr *vh =
(struct vxlanhdr *)(data + nh_off + sizeof(struct iphdr) + sizeof(struct udphdr));
// check vni is present and reserved is 0
if ((vh->flags & 0xDEFF) == 8 && vh->res == 0) {
return 0;
}
// check ethernet type is handled
struct ethhdr *eth = (struct ethhdr *)(data + nh_off + sizeof(struct iphdr) +
sizeof(struct udphdr) + sizeof(struct vxlanhdr));
if (eth->h_proto == __constant_htons(ETH_P_8021Q) ||
eth->h_proto == __constant_htons(ETH_P_8021AD) ||
eth->h_proto == __constant_htons(ETH_P_IP) ||
eth->h_proto == __constant_htons(ETH_P_IPV6)) {
return 1;
}

return 0;
}

static int __always_inline filter_ipv4(
struct xdp_md *ctx, void *data, __u64 nh_off, void *data_end, __u16 vlan0, __u16 vlan1)
{
struct iphdr *iph = data + nh_off;
if ((void *)(iph + 1) > data_end)
return XDP_PASS;

if (iph->protocol == IPPROTO_GRE) {
nh_off += sizeof(struct iphdr);
return filter_gre(ctx, data, nh_off, data_end, iph);
} else if (iph->protocol == IPPROTO_UDP && is_vxlan(data, nh_off, data_end)) {
nh_off += sizeof(struct iphdr) + sizeof(struct udphdr);
return filter_vxlan(ctx, data, nh_off, data_end, iph);
}
return filter_ipv4_final(ctx, data, nh_off, data_end, vlan0, vlan1);
}

int SEC("xdp") xdp_hashfilter(struct xdp_md *ctx)
{
void *data_end = (void *)(long)ctx->data_end;
Expand Down
8 changes: 8 additions & 0 deletions etc/schema.json
Original file line number Diff line number Diff line change
Expand Up @@ -7839,9 +7839,17 @@
},
"src_port": {
"type": "integer"
},
"tunnel_id": {
"type": "integer",
"description": "If any, the tunnel identifier defined in suricata.yaml decoder.tunnels section"
}
}
},
"tunnel_id": {
"type": "integer",
"description": "if any, the tunnel identifier defined in suricata.yaml decoder.tunnels section"
},
"tx_guessed": {
"type": "boolean",
"description":
Expand Down
Loading
Loading