#define LINUX_VERSION_CODE 263682
+#define CPUMAP_MAX_CPUS 64
+
struct vlan_hdr {
__u16 h_vlan_TCI;
__u16 h_vlan_encapsulated_proto;
.max_entries = 32768,
};
+/* Special map type that can XDP_REDIRECT frames to another CPU */
+struct bpf_map_def SEC("maps") cpu_map = {
+ .type = BPF_MAP_TYPE_CPUMAP,
+ .key_size = sizeof(__u32),
+ .value_size = sizeof(__u32),
+ .max_entries = CPUMAP_MAX_CPUS,
+};
+
+struct bpf_map_def SEC("maps") cpus_available = {
+ .type = BPF_MAP_TYPE_ARRAY,
+ .key_size = sizeof(__u32),
+ .value_size = sizeof(__u32),
+ .max_entries = CPUMAP_MAX_CPUS,
+};
+
+struct bpf_map_def SEC("maps") cpus_count = {
+ .type = BPF_MAP_TYPE_ARRAY,
+ .key_size = sizeof(__u32),
+ .value_size = sizeof(__u32),
+ .max_entries = 1,
+};
+
static __always_inline int get_sport(void *trans_data, void *data_end,
uint8_t protocol)
{
int sport;
struct flowv4_keys tuple;
struct pair *value;
+ uint32_t cpu_dest;
+ uint32_t key0 = 0;
+ uint32_t *cpu_max = bpf_map_lookup_elem(&cpus_count, &key0);
+ uint32_t *cpu_selected;
if ((void *)(iph + 1) > data_end)
return XDP_PASS;
return XDP_DROP;
}
- return XDP_PASS;
+
+ if (cpu_max && *cpu_max) {
+ cpu_dest = (tuple.src + tuple.dst) % *cpu_max;
+ cpu_selected = bpf_map_lookup_elem(&cpus_available, &cpu_dest);
+ if (!cpu_selected)
+ return XDP_ABORTED;
+ cpu_dest = *cpu_selected;
+ return bpf_redirect_map(&cpu_map, cpu_dest, 0);
+ } else {
+ return XDP_PASS;
+ }
}
static int __always_inline filter_ipv6(void *data, __u64 nh_off, void *data_end)
int sport;
struct flowv6_keys tuple;
struct pair *value;
+ uint32_t cpu_dest;
+ uint32_t key0 = 0;
+ int *cpu_max = bpf_map_lookup_elem(&cpus_count, &key0);
+ uint32_t *cpu_selected;
if ((void *)(ip6h + 1) > data_end)
return 0;
value->time = bpf_ktime_get_ns();
return XDP_DROP;
}
- return XDP_PASS;
+ if (cpu_max && *cpu_max) {
+ cpu_dest = (tuple.src[0] + tuple.dst[0] + tuple.src[3] + tuple.dst[3]) % *cpu_max;
+ cpu_selected = bpf_map_lookup_elem(&cpus_available, &cpu_dest);
+ if (!cpu_selected)
+ return XDP_ABORTED;
+ cpu_dest = *cpu_selected;
+ return bpf_redirect_map(&cpu_map, cpu_dest, 0);
+ } else {
+ return XDP_PASS;
+ }
}
int SEC("xdp") xdp_hashfilter(struct xdp_md *ctx)
if (ret != 0) {
SCLogWarning(SC_ERR_INVALID_VALUE,
"Error when setting up XDP");
+ } else {
+ /* Try to get the xdp-cpu-redirect key */
+ const char *cpuset;
+ if (ConfGetChildValueWithDefault(if_root, if_default,
+ "xdp-cpu-redirect", &cpuset) == 1) {
+ SCLogConfig("Setting up CPU map XDP");
+ ConfNode *node = ConfGetChildWithDefault(if_root, if_default, "xdp-cpu-redirect");
+ if (node == NULL) {
+ SCLogError(SC_ERR_INVALID_VALUE, "Should not be there");
+ } else {
+ EBPFBuildCPUSet(node, aconf->iface);
+ }
+ } else {
+ /* It will just set CPU count to 0 */
+ EBPFBuildCPUSet(NULL, aconf->iface);
+ }
}
}
#else
g_livedev_storage_id = LiveDevStorageRegister("bpfmap", sizeof(void *), NULL, BpfMapsInfoFree);
}
+
+#ifdef HAVE_PACKET_XDP
+
+static uint32_t g_redirect_iface_cpu_counter = 0;
+
+static int EBPFAddCPUToMap(const char *iface, uint32_t i)
+{
+ int cpumap = EBPFGetMapFDByName(iface, "cpu_map");
+ uint32_t queue_size = 4096;
+ int ret;
+
+ if (cpumap < 0) {
+ SCLogError(SC_ERR_AFP_CREATE, "Can't find cpu_map");
+ return -1;
+ }
+ ret = bpf_map_update_elem(cpumap, &i, &queue_size, 0);
+ if (ret) {
+ SCLogError(SC_ERR_AFP_CREATE, "Create CPU entry failed (err:%d)", ret);
+ return -1;
+ }
+ int cpus_available = EBPFGetMapFDByName(iface, "cpus_available");
+ if (cpus_available < 0) {
+ SCLogError(SC_ERR_AFP_CREATE, "Can't find cpus_available map");
+ return -1;
+ }
+
+ ret = bpf_map_update_elem(cpus_available, &g_redirect_iface_cpu_counter, &i, 0);
+ if (ret) {
+ SCLogError(SC_ERR_AFP_CREATE, "Create CPU entry failed (err:%d)", ret);
+ return -1;
+ }
+ return 0;
+}
+
+static void EBPFRedirectMapAddCPU(int i, void *data)
+{
+ if (EBPFAddCPUToMap(data, i) < 0) {
+ SCLogError(SC_ERR_INVALID_VALUE,
+ "Unable to add CPU %d to set", i);
+ } else {
+ g_redirect_iface_cpu_counter++;
+ }
+}
+
+void EBPFBuildCPUSet(ConfNode *node, char *iface)
+{
+ uint32_t key0 = 0;
+ int mapfd = EBPFGetMapFDByName(iface, "cpus_count");
+ if (mapfd < 0) {
+ SCLogError(SC_ERR_INVALID_VALUE,
+ "Unable to find 'cpus_count' map");
+ return;
+ }
+ g_redirect_iface_cpu_counter = 0;
+ if (node == NULL) {
+ bpf_map_update_elem(mapfd, &key0, &g_redirect_iface_cpu_counter,
+ BPF_ANY);
+ return;
+ }
+ BuildCpusetWithCallback("xdp-cpu-redirect", node,
+ EBPFRedirectMapAddCPU,
+ iface);
+ bpf_map_update_elem(mapfd, &key0, &g_redirect_iface_cpu_counter,
+ BPF_ANY);
+}
+
+#endif /* HAVE_PACKET_XDP */
+
#endif
-/* Copyright (C) 2016 Open Information Security Foundation
+/* Copyright (C) 2018 Open Information Security Foundation
*
* You can copy, redistribute or modify this Program under the terms of
* the GNU General Public License version 2 as published by the Free
void EBPFRegisterExtension(void);
+void EBPFBuildCPUSet(ConfNode *node, char *iface);
+
#endif
#endif